From 595d86f0cbd077b5bd78bf144b02f4818e7d284a Mon Sep 17 00:00:00 2001 From: clonemycode Date: Thu, 2 May 2024 01:10:28 +0800 Subject: [PATCH 0001/1400] chore: remove repetitive words Signed-off-by: clonemycode --- clarity/src/vm/contexts.rs | 4 ++-- clarity/src/vm/tests/principals.rs | 2 +- stackslib/src/chainstate/stacks/index/storage.rs | 2 +- stackslib/src/chainstate/stacks/miner.rs | 4 ++-- testnet/stacks-node/src/tests/epoch_23.rs | 6 +++--- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 6e610e85d9..87322d18e8 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -198,7 +198,7 @@ pub struct GlobalContext<'a, 'hooks> { read_only: Vec, pub cost_track: LimitedCostTracker, pub mainnet: bool, - /// This is the epoch of the the block that this transaction is executing within. + /// This is the epoch of the block that this transaction is executing within. pub epoch_id: StacksEpochId, /// This is the chain ID of the transaction pub chain_id: u32, @@ -1053,7 +1053,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { result } - /// This is the epoch of the the block that this transaction is executing within. + /// This is the epoch of the block that this transaction is executing within. /// Note: in the current plans for 2.1, there is also a contract-specific **Clarity version** /// which governs which native functions are available / defined. That is separate from this /// epoch identifier, and most Clarity VM changes should consult that value instead. This diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 78fcf17659..44f3447bad 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -657,7 +657,7 @@ fn test_principal_destruct_bad_version_byte() { // Standard case where construction should work. We compare the output of the // Clarity function to hand-built principals. fn test_principal_construct_good() { - // We always use the the same bytes buffer. + // We always use the same bytes buffer. let mut transfer_buffer = [0u8; 20]; transfer_buffer .copy_from_slice(&hex_bytes("fa6bf38ed557fe417333710d6033e9419391a320").unwrap()); diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 97f7ca999a..8f01117153 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -1656,7 +1656,7 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // save the currently-buffered Trie to disk, and atomically put it into place (possibly to // a different block than the one opened, as indicated by final_bhh). // Runs once -- subsequent calls are no-ops. - // Panics on a failure to rename the Trie file into place (i.e. if the the actual commitment + // Panics on a failure to rename the Trie file into place (i.e. if the actual commitment // fails). self.clear_cached_ancestor_hashes_bytes(); if self.data.readonly { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f718a9fb36..7bb1ea7ca7 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -337,7 +337,7 @@ pub enum TransactionResult { Success(TransactionSuccess), /// Transaction failed when processed. ProcessingError(TransactionError), - /// Transaction wasn't ready to be be processed, but might succeed later. + /// Transaction wasn't ready to be processed, but might succeed later. Skipped(TransactionSkipped), /// Transaction is problematic (e.g. a DDoS vector) and should be dropped. /// This error variant is a placeholder for fixing Clarity VM quirks in the next network @@ -353,7 +353,7 @@ pub enum TransactionEvent { Success(TransactionSuccessEvent), /// Transaction failed. It may succeed later depending on the error. ProcessingError(TransactionErrorEvent), - /// Transaction wasn't ready to be be processed, but might succeed later. + /// Transaction wasn't ready to be processed, but might succeed later. /// The bool represents whether mempool propagation should halt or continue Skipped(TransactionSkippedEvent), /// Transaction is problematic and will be dropped diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 470eda9672..0452be8476 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -268,7 +268,7 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &tx_1); submit_tx(&http_origin, &tx_2); - // this mines bitcoin block epoch_2_1 - 2, and causes the the + // this mines bitcoin block epoch_2_1 - 2, and causes the // stacks node to mine the stacks block which will be included in // epoch_2_1 - 1, so these are the last transactions processed pre-2.1. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -339,7 +339,7 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &tx_1); submit_tx(&http_origin, &tx_2); - // this mines bitcoin block epoch_2_2 - 2, and causes the the + // this mines bitcoin block epoch_2_2 - 2, and causes the // stacks node to mine the stacks block which will be included in // epoch_2_2 - 1, so these are the last transactions processed pre-2.2. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -424,7 +424,7 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &tx_1); submit_tx(&http_origin, &tx_2); - // this mines bitcoin block epoch_2_3 - 2, and causes the the + // this mines bitcoin block epoch_2_3 - 2, and causes the // stacks node to mine the stacks block which will be included in // epoch_2_3 - 1, so these are the last transactions processed pre-2.3. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); From bd86f0d2fa38837f311f8e9fbdbd1214b5c714df Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 13 May 2024 16:26:51 +0300 Subject: [PATCH 0002/1400] warn for not using local stacks node with the signer --- stacks-signer/src/main.rs | 11 +++++++++-- stacks-signer/src/runloop.rs | 1 + 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 9d6f9c2931..14bea4d8f5 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -35,10 +35,10 @@ use clap::Parser; use clarity::vm::types::QualifiedContractIdentifier; use libsigner::{RunningSigner, Signer, SignerEventReceiver, SignerSession, StackerDBSession}; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_info}; +use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; -use stacks_common::{debug, info}; +use stacks_common::{debug, info, warn}; use stacks_signer::cli::{ Cli, Command, GenerateStackingSignatureArgs, GetChunkArgs, GetLatestChunkArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, @@ -81,6 +81,13 @@ fn write_chunk_to_stdout(chunk_opt: Option>) { fn spawn_running_signer(path: &PathBuf) -> SpawnedSigner { let config = GlobalConfig::try_from(path).unwrap(); let endpoint = config.endpoint; + // TODO: check if config.node_host is in a given list of possible local hosts and only if it now, then display the message? + warn!( + "The signer is primarily designed for use with a local stacks node. + It's important to exercise caution if you are communicating with an external node, + as this could potentially expose sensitive data or functionalities to security risks + if additional proper security checks are not in place." + ); info!("Starting signer with config: {}", config); let (cmd_send, cmd_recv) = channel(); let (res_send, res_recv) = channel(); diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 905b4f307c..bc544337a6 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -121,6 +121,7 @@ impl From for RunLoop { /// Creates new runloop from a config fn from(config: GlobalConfig) -> Self { let stacks_client = StacksClient::from(&config); + // TODO: or insert the warn related to stacks-node used by the stacks-signer here? Self { config, stacks_client, From 26e5570f343990703fea69c9977a154642653588 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 14 May 2024 01:38:20 +0300 Subject: [PATCH 0003/1400] removes annotations and keep warn as 1 displayed line --- stacks-signer/src/main.rs | 9 ++++----- stacks-signer/src/runloop.rs | 1 - 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 14bea4d8f5..4ecbc12ae6 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -81,12 +81,11 @@ fn write_chunk_to_stdout(chunk_opt: Option>) { fn spawn_running_signer(path: &PathBuf) -> SpawnedSigner { let config = GlobalConfig::try_from(path).unwrap(); let endpoint = config.endpoint; - // TODO: check if config.node_host is in a given list of possible local hosts and only if it now, then display the message? warn!( - "The signer is primarily designed for use with a local stacks node. - It's important to exercise caution if you are communicating with an external node, - as this could potentially expose sensitive data or functionalities to security risks - if additional proper security checks are not in place." + "The signer is primarily designed for use with a local stacks node. \ + It's important to exercise caution if you are communicating with an external node, \ + as this could potentially expose sensitive data or functionalities to security risks \ + if additional proper security checks are not integrated in place." ); info!("Starting signer with config: {}", config); let (cmd_send, cmd_recv) = channel(); diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index bc544337a6..905b4f307c 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -121,7 +121,6 @@ impl From for RunLoop { /// Creates new runloop from a config fn from(config: GlobalConfig) -> Self { let stacks_client = StacksClient::from(&config); - // TODO: or insert the warn related to stacks-node used by the stacks-signer here? Self { config, stacks_client, From 6a62c982fefe60890a8281f66d14c15ec73dafe3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 13 May 2024 15:52:42 -0700 Subject: [PATCH 0004/1400] Add v0 signer. Still needs tests Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 12 +- stacks-signer/src/client/mod.rs | 14 +- stacks-signer/src/lib.rs | 2 + stacks-signer/src/runloop.rs | 2 +- stacks-signer/src/{v1 => }/signerdb.rs | 51 ++- stacks-signer/src/traits.rs | 4 +- stacks-signer/src/v0/mod.rs | 5 + stacks-signer/src/v0/signer.rs | 318 ++++++++++++++++++ stacks-signer/src/v0/stackerdb.rs | 268 +++++++++++++++ stacks-signer/src/v1/mod.rs | 4 +- stacks-signer/src/v1/signer.rs | 56 +-- stacks-signer/src/{client => v1}/stackerdb.rs | 13 +- testnet/stacks-node/src/tests/signer.rs | 3 +- 13 files changed, 677 insertions(+), 75 deletions(-) rename stacks-signer/src/{v1 => }/signerdb.rs (86%) create mode 100644 stacks-signer/src/v0/signer.rs create mode 100644 stacks-signer/src/v0/stackerdb.rs rename stacks-signer/src/{client => v1}/stackerdb.rs (97%) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 25336b798e..402b3b4c60 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -204,7 +204,8 @@ impl StacksMessageCodecExtensions for HashSet { define_u8_enum!(RejectCodeTypePrefix{ ValidationFailed = 0, - ConnectivityIssues = 1 + ConnectivityIssues = 1, + RejectedInPriorRound = 2 }); impl TryFrom for RejectCodeTypePrefix { @@ -221,6 +222,7 @@ impl From<&RejectCode> for RejectCodeTypePrefix { match reject_code { RejectCode::ValidationFailed(_) => RejectCodeTypePrefix::ValidationFailed, RejectCode::ConnectivityIssues => RejectCodeTypePrefix::ConnectivityIssues, + RejectCode::RejectedInPriorRound => RejectCodeTypePrefix::RejectedInPriorRound, } } } @@ -232,6 +234,8 @@ pub enum RejectCode { ValidationFailed(ValidateRejectCode), /// The block was rejected due to connectivity issues with the signer ConnectivityIssues, + /// The block was rejected in a prior round + RejectedInPriorRound, } /// The response that a signer sends back to observing miners @@ -376,6 +380,7 @@ impl StacksMessageCodec for RejectCode { match self { RejectCode::ValidationFailed(code) => write_next(fd, &(*code as u8))?, RejectCode::ConnectivityIssues => write_next(fd, &1u8)?, + RejectCode::RejectedInPriorRound => write_next(fd, &2u8)?, }; Ok(()) } @@ -393,6 +398,7 @@ impl StacksMessageCodec for RejectCode { })?, ), RejectCodeTypePrefix::ConnectivityIssues => RejectCode::ConnectivityIssues, + RejectCodeTypePrefix::RejectedInPriorRound => RejectCode::RejectedInPriorRound, }; Ok(code) } @@ -406,6 +412,10 @@ impl std::fmt::Display for RejectCode { f, "The block was rejected due to connectivity issues with the signer." ), + RejectCode::RejectedInPriorRound => write!( + f, + "The block was proposed before and rejected by the signer." + ), } } } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 9c4fc652a5..21a9a2f0b3 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -13,9 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . - -/// The stacker db module for communicating with the stackerdb contract -mod stackerdb; /// The stacks node client module for communicating with the stacks node pub(crate) mod stacks_client; @@ -25,7 +22,6 @@ use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::serialization::SerializationError; use libstackerdb::Error as StackerDBError; use slog::slog_debug; -pub use stackerdb::*; pub use stacks_client::*; use stacks_common::codec::Error as CodecError; use stacks_common::debug; @@ -35,6 +31,16 @@ const BACKOFF_INITIAL_INTERVAL: u64 = 128; /// Backoff timer max interval in milliseconds const BACKOFF_MAX_INTERVAL: u64 = 16384; +/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID +#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] +pub struct SignerSlotID(pub u32); + +impl std::fmt::Display for SignerSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + #[derive(thiserror::Error, Debug)] /// Client error type pub enum ClientError { diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index b83964ac0d..ec89e0b007 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -30,6 +30,8 @@ pub mod config; pub mod monitoring; /// The primary runloop for the signer pub mod runloop; +/// The signer state module +pub mod signerdb; /// The traits module pub mod traits; /// The v0 implementation of the signer. This does not include WSTS support diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 275ec79e9a..7af3f38f88 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -263,7 +263,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo if signer.reward_cycle() == prior_reward_cycle { // The signers have been calculated for the next reward cycle. Update the current one debug!("{signer}: Next reward cycle ({reward_cycle}) signer set calculated. Reconfiguring current reward cycle signer."); - signer.update_next_signer_data(&new_signer_config); + signer.update_signer(&new_signer_config); } } } diff --git a/stacks-signer/src/v1/signerdb.rs b/stacks-signer/src/signerdb.rs similarity index 86% rename from stacks-signer/src/v1/signerdb.rs rename to stacks-signer/src/signerdb.rs index f6a2d93dae..4e45903097 100644 --- a/stacks-signer/src/v1/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -16,15 +16,64 @@ use std::path::Path; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; use blockstack_lib::util_lib::db::{ query_row, sqlite_open, table_exists, u64_to_sql, Error as DBError, }; +use libsigner::BlockProposal; use rusqlite::{params, Connection, Error as SqliteError, OpenFlags, NO_PARAMS}; +use serde::{Deserialize, Serialize}; use slog::slog_debug; use stacks_common::debug; use stacks_common::util::hash::Sha512Trunc256Sum; +use wsts::net::NonceRequest; + +/// Additional Info about a proposed block +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct BlockInfo { + /// The block we are considering + pub block: NakamotoBlock, + /// The burn block height at which the block was proposed + pub burn_block_height: u64, + /// The reward cycle the block belongs to + pub reward_cycle: u64, + /// Our vote on the block if we have one yet + pub vote: Option, + /// Whether the block contents are valid + pub valid: Option, + /// The associated packet nonce request if we have one + pub nonce_request: Option, + /// Whether this block is already being signed over + pub signed_over: bool, +} + +impl From for BlockInfo { + fn from(value: BlockProposal) -> Self { + Self { + block: value.block, + burn_block_height: value.burn_height, + reward_cycle: value.reward_cycle, + vote: None, + valid: None, + nonce_request: None, + signed_over: false, + } + } +} +impl BlockInfo { + /// Create a new BlockInfo with an associated nonce request packet + pub fn new_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { + let mut block_info = BlockInfo::from(block_proposal); + block_info.nonce_request = Some(nonce_request); + block_info.signed_over = true; + block_info + } -use crate::v1::signer::BlockInfo; + /// Return the block's signer signature hash + pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { + self.block.header.signer_signature_hash() + } +} /// This struct manages a SQLite database connection /// for the signer. diff --git a/stacks-signer/src/traits.rs b/stacks-signer/src/traits.rs index d398ee5c3d..0966c44a63 100644 --- a/stacks-signer/src/traits.rs +++ b/stacks-signer/src/traits.rs @@ -13,8 +13,8 @@ use crate::runloop::RunLoopCommand; pub trait Signer: Debug + Display { /// Create a new `Signer` instance fn new(config: SignerConfig) -> Self; - /// Update the `Signer` instance's next reward cycle data with the latest `SignerConfig` - fn update_next_signer_data(&mut self, next_signer_config: &SignerConfig); + /// Update the `Signer` instance's with the next reward cycle data `SignerConfig` + fn update_signer(&mut self, next_signer_config: &SignerConfig); /// Get the reward cycle of the signer fn reward_cycle(&self) -> u64; /// Process an event diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index e891573df3..fa98082251 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -13,3 +13,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . + +/// The signer module for processing events +pub mod signer; +/// The stackerdb module for sending messages between signers and miners +pub mod stackerdb; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs new file mode 100644 index 0000000000..a2fd9c6122 --- /dev/null +++ b/stacks-signer/src/v0/signer.rs @@ -0,0 +1,318 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::fmt::Debug; +use std::sync::mpsc::Sender; + +use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use clarity::types::chainstate::StacksPrivateKey; +use clarity::types::PrivateKey; +use libsigner::v0::messages::{BlockResponse, RejectCode, SignerMessage}; +use libsigner::{BlockProposal, SignerEvent}; +use slog::{slog_debug, slog_error, slog_warn}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::{debug, error, warn}; +use wsts::state_machine::OperationResult; + +use super::stackerdb::StackerDB; +use crate::client::{SignerSlotID, StacksClient}; +use crate::config::SignerConfig; +use crate::runloop::RunLoopCommand; +use crate::signerdb::{BlockInfo, SignerDb}; +use crate::traits::Signer as SignerTrait; + +/// The stacks signer registered for the reward cycle +#[derive(Debug)] +pub struct Signer { + /// The private key of the signer + private_key: StacksPrivateKey, + /// The stackerdb client + pub stackerdb: StackerDB, + /// Whether the signer is a mainnet signer or not + pub mainnet: bool, + /// The signer id + pub signer_id: u32, + /// The signer slot ids for the signers in the reward cycle + pub signer_slot_ids: Vec, + /// The addresses of other signers + pub signer_addresses: Vec, + /// The reward cycle this signer belongs to + pub reward_cycle: u64, + /// SignerDB for state management + pub signer_db: SignerDb, +} + +impl std::fmt::Display for Signer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Cycle #{} Signer #{}", self.reward_cycle, self.signer_id,) + } +} + +impl SignerTrait for Signer { + /// Create a new signer from the given configuration + fn new(config: SignerConfig) -> Self { + Self::from(config) + } + + /// Refresh the next signer data from the given configuration data + fn update_signer(&mut self, _new_signer_config: &SignerConfig) { + // do nothing + } + /// Return the reward cycle of the signer + fn reward_cycle(&self) -> u64 { + self.reward_cycle + } + + /// Process the event + fn process_event( + &mut self, + stacks_client: &StacksClient, + event: Option<&SignerEvent>, + _res: Sender>, + current_reward_cycle: u64, + ) { + let event_parity = match event { + Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), + // Block proposal events do have reward cycles, but each proposal has its own cycle, + // and the vec could be heterogenous, so, don't differentiate. + Some(SignerEvent::MinerMessages(..)) + | Some(SignerEvent::NewBurnBlock(_)) + | Some(SignerEvent::StatusCheck) + | None => None, + Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), + }; + let other_signer_parity = (self.reward_cycle + 1) % 2; + if event_parity == Some(other_signer_parity) { + return; + } + debug!("{self}: Processing event: {event:?}"); + match event { + Some(SignerEvent::BlockValidationResponse(block_validate_response)) => { + debug!("{self}: Received a block proposal result from the stacks node..."); + self.handle_block_validate_response(block_validate_response) + } + Some(SignerEvent::SignerMessages(_signer_set, messages)) => { + debug!( + "{self}: Received {} messages from the other signers. Ignoring...", + messages.len() + ); + } + Some(SignerEvent::MinerMessages(messages, _)) => { + if current_reward_cycle != self.reward_cycle { + // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) + debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); + return; + } + debug!( + "{self}: Received {} messages from the miner", + messages.len(); + ); + for message in messages { + if let SignerMessage::BlockProposal(block_proposal) = message { + self.handle_block_proposal(stacks_client, block_proposal); + } + } + } + Some(SignerEvent::StatusCheck) => { + debug!("{self}: Received a status check event.") + } + Some(SignerEvent::NewBurnBlock(height)) => { + debug!("{self}: Receved a new burn block event for block height {height}") + } + None => { + // No event. Do nothing. + debug!("{self}: No event received") + } + } + } + + fn process_command( + &mut self, + _stacks_client: &StacksClient, + _current_reward_cycle: u64, + command: Option, + ) { + if let Some(command) = command { + warn!("{self}: Received a command: {command:?}. V0 Signers do not support commands. Ignoring...") + } + } +} + +impl From for Signer { + fn from(signer_config: SignerConfig) -> Self { + let stackerdb = StackerDB::from(&signer_config); + debug!( + "Reward cycle #{} Signer #{}", + signer_config.reward_cycle, signer_config.signer_id, + ); + let signer_db = + SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); + + Self { + private_key: signer_config.stacks_private_key, + stackerdb, + mainnet: signer_config.mainnet, + signer_id: signer_config.signer_id, + signer_addresses: signer_config + .signer_entries + .signer_ids + .into_keys() + .collect(), + signer_slot_ids: signer_config.signer_slot_ids.clone(), + reward_cycle: signer_config.reward_cycle, + signer_db, + } + } +} + +impl Signer { + /// Determine this signers response to a proposed block + /// Returns a BlockResponse if we have already validated the block + /// Returns None otherwise + fn determine_response(&self, block_info: &mut BlockInfo) -> Option { + let valid = block_info.valid?; + let response = if valid { + debug!("{self}: Accepting block {}", block_info.block.block_id()); + let signature = self + .private_key + .sign(&block_info.signer_signature_hash().0) + .expect("Failed to sign block"); + BlockResponse::accepted(block_info.signer_signature_hash(), signature) + } else { + debug!("{self}: Rejecting block {}", block_info.block.block_id()); + BlockResponse::rejected( + block_info.signer_signature_hash(), + RejectCode::RejectedInPriorRound, + ) + }; + Some(response) + } + + /// Handle block proposal messages submitted to signers stackerdb + fn handle_block_proposal( + &mut self, + stacks_client: &StacksClient, + block_proposal: &BlockProposal, + ) { + debug!("{self}: Received a block proposal: {block_proposal:?}"); + if block_proposal.reward_cycle != self.reward_cycle { + // We are not signing for this reward cycle. Reject the block + warn!( + "{self}: Received a block proposal for a different reward cycle. Ignore it."; + "requested_reward_cycle" => block_proposal.reward_cycle, + ); + return; + } + // TODO: could add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. + let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); + let Some(mut block_info) = self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + .expect("Failed to connect to signer DB") + else { + debug!( + "{self}: received a block proposal for a new block. Submit block for validation. "; + "signer_sighash" => %signer_signature_hash, + ); + let block_info = BlockInfo::from(block_proposal.clone()); + stacks_client + .submit_block_for_validation(block_info.block.clone()) + .unwrap_or_else(|e| { + warn!("{self}: Failed to submit block for validation: {e:?}",); + }); + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + return; + }; + if let Some(block_response) = self.determine_response(&mut block_info) { + // Submit a proposal response to the .signers contract for miners + warn!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + if let Err(e) = self + .stackerdb + .send_message_with_retry(block_response.into()) + { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + } + } + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + } + + /// Handle the block validate response returned from our prior calls to submit a block for validation + fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { + debug!("{self}: Received a block validate response: {block_validate_response:?}"); + let (message, block_info) = match block_validate_response { + BlockValidateResponse::Ok(block_validate_ok) => { + crate::monitoring::increment_block_validation_responses(true); + let signer_signature_hash = block_validate_ok.signer_signature_hash; + // For mutability reasons, we need to take the block_info out of the map and add it back after processing + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + { + Ok(Some(block_info)) => block_info, + Ok(None) => { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}",); + return; + } + }; + block_info.valid = Some(true); + let signature = self + .private_key + .sign(&signer_signature_hash.0) + .expect("Failed to sign block"); + ( + BlockResponse::accepted(signer_signature_hash, signature).into(), + block_info, + ) + } + BlockValidateResponse::Reject(block_validate_reject) => { + crate::monitoring::increment_block_validation_responses(false); + let signer_signature_hash = block_validate_reject.signer_signature_hash; + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + { + Ok(Some(block_info)) => block_info, + Ok(None) => { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}"); + return; + } + }; + block_info.valid = Some(false); + (block_validate_reject.clone().into(), block_info) + } + }; + // Submit a proposal response to the .signers contract for miners + warn!("{self}: Broadcasting a block response to stacks node: {message:?}"); + if let Err(e) = self.stackerdb.send_message_with_retry(message) { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + } + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + } +} diff --git a/stacks-signer/src/v0/stackerdb.rs b/stacks-signer/src/v0/stackerdb.rs new file mode 100644 index 0000000000..15c2c7add2 --- /dev/null +++ b/stacks-signer/src/v0/stackerdb.rs @@ -0,0 +1,268 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +// +use blockstack_lib::net::api::poststackerdbchunk::StackerDBErrorCodes; +use hashbrown::HashMap; +use libsigner::v0::messages::{MessageSlotID, SignerMessage}; +use libsigner::{SignerSession, StackerDBSession}; +use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; +use slog::{slog_debug, slog_warn}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::{debug, warn}; + +use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID}; +use crate::config::SignerConfig; + +/// The StackerDB client for communicating with the .signers contract +#[derive(Debug)] +pub struct StackerDB { + /// The stacker-db sessions for each signer set and message type. + /// Maps message ID to the DB session. + signers_message_stackerdb_sessions: HashMap, + /// The private key used in all stacks node communications + stacks_private_key: StacksPrivateKey, + /// A map of a message ID to last chunk version for each session + slot_versions: HashMap>, + /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. + signer_slot_id: SignerSlotID, + /// The reward cycle of the connecting signer + reward_cycle: u64, +} + +impl From<&SignerConfig> for StackerDB { + fn from(config: &SignerConfig) -> Self { + Self::new( + &config.node_host, + config.stacks_private_key, + config.mainnet, + config.reward_cycle, + config.signer_slot_id, + ) + } +} +impl StackerDB { + /// Create a new StackerDB client + pub fn new( + host: &str, + stacks_private_key: StacksPrivateKey, + is_mainnet: bool, + reward_cycle: u64, + signer_slot_id: SignerSlotID, + ) -> Self { + let mut signers_message_stackerdb_sessions = HashMap::new(); + for msg_id in MessageSlotID::ALL { + signers_message_stackerdb_sessions.insert( + *msg_id, + StackerDBSession::new(host, msg_id.stacker_db_contract(is_mainnet, reward_cycle)), + ); + } + + Self { + signers_message_stackerdb_sessions, + stacks_private_key, + slot_versions: HashMap::new(), + signer_slot_id, + reward_cycle, + } + } + + /// Sends messages to the .signers stacker-db with an exponential backoff retry + pub fn send_message_with_retry( + &mut self, + message: SignerMessage, + ) -> Result { + let msg_id = message.msg_id(); + let message_bytes = message.serialize_to_vec(); + self.send_message_bytes_with_retry(&msg_id, message_bytes) + } + + /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an + /// exponential backoff retry + pub fn send_message_bytes_with_retry( + &mut self, + msg_id: &MessageSlotID, + message_bytes: Vec, + ) -> Result { + let slot_id = self.signer_slot_id; + loop { + let mut slot_version = if let Some(versions) = self.slot_versions.get_mut(msg_id) { + if let Some(version) = versions.get(&slot_id) { + *version + } else { + versions.insert(slot_id, 0); + 1 + } + } else { + let mut versions = HashMap::new(); + versions.insert(slot_id, 0); + self.slot_versions.insert(*msg_id, versions); + 1 + }; + + let mut chunk = StackerDBChunkData::new(slot_id.0, slot_version, message_bytes.clone()); + chunk.sign(&self.stacks_private_key)?; + + let Some(session) = self.signers_message_stackerdb_sessions.get_mut(msg_id) else { + panic!("FATAL: would loop forever trying to send a message with ID {}, for which we don't have a session", msg_id); + }; + + debug!( + "Sending a chunk to stackerdb slot ID {slot_id} with version {slot_version} and message ID {msg_id} to contract {:?}!\n{chunk:?}", + &session.stackerdb_contract_id + ); + + let send_request = || session.put_chunk(&chunk).map_err(backoff::Error::transient); + let chunk_ack: StackerDBChunkAckData = retry_with_exponential_backoff(send_request)?; + + if let Some(versions) = self.slot_versions.get_mut(msg_id) { + // NOTE: per the above, this is always executed + versions.insert(slot_id, slot_version.saturating_add(1)); + } else { + return Err(ClientError::NotConnected); + } + + if chunk_ack.accepted { + debug!("Chunk accepted by stackerdb: {chunk_ack:?}"); + return Ok(chunk_ack); + } else { + warn!("Chunk rejected by stackerdb: {chunk_ack:?}"); + } + if let Some(code) = chunk_ack.code { + match StackerDBErrorCodes::from_code(code) { + Some(StackerDBErrorCodes::DataAlreadyExists) => { + if let Some(slot_metadata) = chunk_ack.metadata { + warn!("Failed to send message to stackerdb due to wrong version number. Attempted {}. Expected {}. Retrying...", slot_version, slot_metadata.slot_version); + slot_version = slot_metadata.slot_version; + } else { + warn!("Failed to send message to stackerdb due to wrong version number. Attempted {}. Expected unknown version number. Incrementing and retrying...", slot_version); + } + if let Some(versions) = self.slot_versions.get_mut(msg_id) { + // NOTE: per the above, this is always executed + versions.insert(slot_id, slot_version.saturating_add(1)); + } else { + return Err(ClientError::NotConnected); + } + } + _ => { + warn!("Failed to send message to stackerdb: {:?}", chunk_ack); + return Err(ClientError::PutChunkRejected( + chunk_ack + .reason + .unwrap_or_else(|| "No reason given".to_string()), + )); + } + } + } + } + } + + /// Retrieve the signer set this stackerdb client is attached to + pub fn get_signer_set(&self) -> u32 { + u32::try_from(self.reward_cycle % 2).expect("FATAL: reward cycle % 2 exceeds u32::MAX") + } + + /// Retrieve the signer slot ID + pub fn get_signer_slot_id(&mut self) -> SignerSlotID { + self.signer_slot_id + } +} + +#[cfg(test)] +mod tests { + use std::thread::spawn; + use std::time::Duration; + + use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; + use blockstack_lib::chainstate::stacks::ThresholdSignature; + use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; + use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; + use clarity::util::secp256k1::MessageSignature; + use libsigner::BlockProposal; + use rand::{thread_rng, RngCore}; + use stacks_common::bitvec::BitVec; + + use super::*; + use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; + use crate::config::{build_signer_config_tomls, GlobalConfig, Network}; + + #[test] + fn send_signer_message_should_succeed() { + let signer_config = build_signer_config_tomls( + &[StacksPrivateKey::new()], + "localhost:20443", + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "1234", + 16, + 3000, + Some(100_000), + None, + Some(9000), + ); + let config = GlobalConfig::load_from_str(&signer_config[0]).unwrap(); + let signer_config = generate_signer_config(&config, 5, 20); + let mut stackerdb = StackerDB::from(&signer_config); + + let header = NakamotoBlockHeader { + version: 1, + chain_length: 2, + burn_spent: 3, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: StacksBlockId([0x05; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + signer_signature: ThresholdSignature::empty(), + signer_bitvec: BitVec::zeros(1).unwrap(), + }; + let mut block = NakamotoBlock { + header, + txs: vec![], + }; + let tx_merkle_root = { + let txid_vecs = block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + block.header.tx_merkle_root = tx_merkle_root; + + let block_proposal = BlockProposal { + block, + burn_height: thread_rng().next_u64(), + reward_cycle: thread_rng().next_u64(), + }; + let signer_message = SignerMessage::BlockProposal(block_proposal); + let ack = StackerDBChunkAckData { + accepted: true, + reason: None, + metadata: None, + code: None, + }; + let mock_server = mock_server_from_config(&config); + let h = spawn(move || stackerdb.send_message_with_retry(signer_message)); + let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); + let payload = serde_json::to_string(&ack).expect("Failed to serialize ack"); + response_bytes.extend(payload.as_bytes()); + std::thread::sleep(Duration::from_millis(500)); + write_response(mock_server, response_bytes.as_slice()); + assert_eq!(ack, h.join().unwrap().unwrap()); + } +} diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs index 7c2477cf2d..ac0a25f623 100644 --- a/stacks-signer/src/v1/mod.rs +++ b/stacks-signer/src/v1/mod.rs @@ -18,8 +18,8 @@ pub mod coordinator; /// The signer module for processing events pub mod signer; -/// The state module for the signer -pub mod signerdb; +/// The stackerdb module for sending messages between signers and miners +pub mod stackerdb; use std::sync::mpsc::{channel, Receiver, Sender}; diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index a2cb010382..eb1e0827f7 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -32,7 +32,6 @@ use libsigner::v1::messages::{ }; use libsigner::{BlockProposal, SignerEvent}; use rand_core::OsRng; -use serde_derive::{Deserialize, Serialize}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; @@ -53,59 +52,13 @@ use wsts::state_machine::{OperationResult, SignError}; use wsts::traits::Signer as _; use wsts::v2; -use crate::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; +use super::stackerdb::StackerDB; +use crate::client::{ClientError, SignerSlotID, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerCommand}; +use crate::signerdb::{BlockInfo, SignerDb}; use crate::traits::Signer as SignerTrait; use crate::v1::coordinator::CoordinatorSelector; -use crate::v1::signerdb::SignerDb; - -/// Additional Info about a proposed block -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct BlockInfo { - /// The block we are considering - pub block: NakamotoBlock, - /// The burn block height at which the block was proposed - pub burn_block_height: u64, - /// The reward cycle the block belongs to - pub reward_cycle: u64, - /// Our vote on the block if we have one yet - pub vote: Option, - /// Whether the block contents are valid - valid: Option, - /// The associated packet nonce request if we have one - nonce_request: Option, - /// Whether this block is already being signed over - pub signed_over: bool, -} - -impl From for BlockInfo { - fn from(value: BlockProposal) -> Self { - Self { - block: value.block, - burn_block_height: value.burn_height, - reward_cycle: value.reward_cycle, - vote: None, - valid: None, - nonce_request: None, - signed_over: false, - } - } -} -impl BlockInfo { - /// Create a new BlockInfo with an associated nonce request packet - pub fn new_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { - let mut block_info = BlockInfo::from(block_proposal); - block_info.nonce_request = Some(nonce_request); - block_info.signed_over = true; - block_info - } - - /// Return the block's signer signature hash - pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { - self.block.header.signer_signature_hash() - } -} /// The specific operations that a signer can perform #[derive(PartialEq, Eq, Debug, Clone)] @@ -188,8 +141,9 @@ impl SignerTrait for Signer { fn new(config: SignerConfig) -> Self { Self::from(config) } + /// Refresh the next signer data from the given configuration data - fn update_next_signer_data(&mut self, new_signer_config: &SignerConfig) { + fn update_signer(&mut self, new_signer_config: &SignerConfig) { self.next_signer_addresses = new_signer_config .signer_entries .signer_ids diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/v1/stackerdb.rs similarity index 97% rename from stacks-signer/src/client/stackerdb.rs rename to stacks-signer/src/v1/stackerdb.rs index f23679b099..69fcc1d829 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/v1/stackerdb.rs @@ -26,20 +26,9 @@ use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::{debug, error, warn}; use wsts::net::Packet; -use super::ClientError; -use crate::client::retry_with_exponential_backoff; +use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID}; use crate::config::SignerConfig; -/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID -#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] -pub struct SignerSlotID(pub u32); - -impl std::fmt::Display for SignerSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - /// The StackerDB client for communicating with the .signers contract #[derive(Debug)] pub struct StackerDB { diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer.rs index 08cb254ec8..8719324a55 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer.rs @@ -37,10 +37,11 @@ use stacks_common::types::chainstate::{ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; -use stacks_signer::client::{SignerSlotID, StackerDB, StacksClient}; +use stacks_signer::client::{SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{RunLoopCommand, SignerCommand}; use stacks_signer::v1::coordinator::CoordinatorSelector; +use stacks_signer::v1::stackerdb::StackerDB; use stacks_signer::v1::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; From 6e373a10603114d693f41d221ed5041a79de24de Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 14 May 2024 18:11:25 +0300 Subject: [PATCH 0005/1400] update message as reminder --- stacks-signer/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 4ecbc12ae6..8ce737308d 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -82,7 +82,7 @@ fn spawn_running_signer(path: &PathBuf) -> SpawnedSigner { let config = GlobalConfig::try_from(path).unwrap(); let endpoint = config.endpoint; warn!( - "The signer is primarily designed for use with a local stacks node. \ + "Reminder: The signer is primarily designed for use with a local or subnet network stacks node. \ It's important to exercise caution if you are communicating with an external node, \ as this could potentially expose sensitive data or functionalities to security risks \ if additional proper security checks are not integrated in place." From ba091656ab61c04d4617e541152be569711173aa Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 14 May 2024 18:54:47 +0300 Subject: [PATCH 0006/1400] add comments to functions for highlighting missing mutants --- Cargo.lock | 1 + clarity/src/vm/ast/parser/v1.rs | 10 ++++++++ clarity/src/vm/ast/parser/v2/mod.rs | 20 +++++++++++++++ clarity/src/vm/ast/sugar_expander/mod.rs | 10 ++++++++ clarity/src/vm/costs/mod.rs | 10 +++++--- clarity/src/vm/types/mod.rs | 12 ++++----- libsigner/Cargo.toml | 1 + libsigner/src/events.rs | 4 +++ pox-locking/Cargo.toml | 1 + pox-locking/src/events.rs | 3 +++ stackslib/Cargo.toml | 1 + stackslib/src/burnchains/db.rs | 3 +++ stackslib/src/burnchains/mod.rs | 3 +++ stackslib/src/chainstate/burn/db/sortdb.rs | 7 ++++++ .../chainstate/burn/operations/stack_stx.rs | 11 ++++++++ stackslib/src/chainstate/coordinator/mod.rs | 3 +++ stackslib/src/chainstate/stacks/boot/mod.rs | 15 +++++++++++ stackslib/src/chainstate/stacks/db/blocks.rs | 16 ++++++++++++ stackslib/src/chainstate/stacks/miner.rs | 25 +++++++++++++++++++ stackslib/src/main.rs | 2 ++ testnet/stacks-node/Cargo.toml | 1 + .../burnchains/bitcoin_regtest_controller.rs | 25 +++++++++++++++++++ testnet/stacks-node/src/config.rs | 7 ++++++ .../stacks-node/src/nakamoto_node/miner.rs | 12 +++++++++ testnet/stacks-node/src/neon_node.rs | 13 ++++++++++ testnet/stacks-node/src/tenure.rs | 4 +++ 26 files changed, 211 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa89992f8e..86c318f3b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2449,6 +2449,7 @@ name = "pox-locking" version = "2.4.0" dependencies = [ "clarity", + "mutants", "slog", "stacks-common", ] diff --git a/clarity/src/vm/ast/parser/v1.rs b/clarity/src/vm/ast/parser/v1.rs index 75c5ea2df9..65f9b66b11 100644 --- a/clarity/src/vm/ast/parser/v1.rs +++ b/clarity/src/vm/ast/parser/v1.rs @@ -508,6 +508,16 @@ fn handle_expression( } } +// TODO: #4587 create default for `SymbolicExpression`, then check if mutation tests are caught for these cases: +// ParseResult::from(vec![Default::default()]) +// ParseResult::from(vec![]) +// ParseResult::new() +// ParseResult::from_iter([vec![Default::default()]]) +// ParseResult::new(vec![Default::default()]) +// ParseResult::new(vec![]) +// ParseResult::from_iter([vec![]]) +// Or keep the skip and remove the comment +#[cfg_attr(test, mutants::skip)] pub fn parse_lexed(input: Vec<(LexItem, u32, u32)>) -> ParseResult> { let mut parse_stack = Vec::new(); diff --git a/clarity/src/vm/ast/parser/v2/mod.rs b/clarity/src/vm/ast/parser/v2/mod.rs index a7ba4eb3c8..8f039eedb1 100644 --- a/clarity/src/vm/ast/parser/v2/mod.rs +++ b/clarity/src/vm/ast/parser/v2/mod.rs @@ -206,6 +206,16 @@ impl<'a> Parser<'a> { } } + // TODO: #4587 create default for `SymbolicExpression`, then check if mutation tests are caught for these cases: + // ParseResult::from_iter([Some(Default::default())]) + // ParseResult::new(None) + // ParseResult::from_iter([None]) + // ParseResult::new(Some(Default::default())) + // ParseResult::from(None) + // ParseResult::from(Some(Default::default())) + // ParseResult::new() + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Process a new child node for an AST expression that is open and waiting for children nodes. For example, /// a list or tuple expression that is waiting for child expressions. /// @@ -275,6 +285,16 @@ impl<'a> Parser<'a> { } } + // TODO: #4587 create default for `SymbolicExpression`, then check if mutation tests are caught for these cases: + // ParseResult::from_iter([Some(Default::default())]) + // ParseResult::new(None) + // ParseResult::from_iter([None]) + // ParseResult::new(Some(Default::default())) + // ParseResult::from(None) + // ParseResult::from(Some(Default::default())) + // ParseResult::new() + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] fn handle_open_tuple( &mut self, open_tuple: &mut OpenTuple, diff --git a/clarity/src/vm/ast/sugar_expander/mod.rs b/clarity/src/vm/ast/sugar_expander/mod.rs index 0f28093932..01528700fa 100644 --- a/clarity/src/vm/ast/sugar_expander/mod.rs +++ b/clarity/src/vm/ast/sugar_expander/mod.rs @@ -58,6 +58,16 @@ impl SugarExpander { Ok(()) } + // TODO: #4587 create default for `SymbolicExpression`, then check if mutation tests are caught for these cases: + // ParseResult::from(vec![Default::default()]) + // ParseResult::from(vec![]) + // ParseResult::new() + // ParseResult::from_iter([vec![Default::default()]]) + // ParseResult::new(vec![Default::default()]) + // ParseResult::new(vec![]) + // ParseResult::from_iter([vec![]]) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] pub fn transform( &self, pre_exprs_iter: PreExpressionsDrain, diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 744b605691..cca71cb55c 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -783,12 +783,12 @@ impl LimitedCostTracker { } impl TrackerData { + // TODO: #4587 add test for Err cases + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// `apply_updates` - tells this function to look for any changes in the cost voting contract /// which would need to be applied. if `false`, just load the last computed cost state in this /// fork. - /// TODO: #4587 add test for Err cases - /// Or keep the skip and remove the comment - #[cfg_attr(test, mutants::skip)] fn load_costs(&mut self, clarity_db: &mut ClarityDatabase, apply_updates: bool) -> Result<()> { clarity_db.begin(); let epoch_id = clarity_db @@ -958,6 +958,10 @@ fn parse_cost( } } +// TODO: #4587 create default for `ExecutionCost`, then check if mutation tests are caught for these case: +// Ok(Default::default()) +// Or keep the skip and remove the comment +#[cfg_attr(test, mutants::skip)] fn compute_cost( cost_tracker: &mut TrackerData, cost_function_reference: ClarityCostFunctionReference, diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 46734dcc51..e8f3be42b4 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -1529,9 +1529,9 @@ impl TupleData { self.data_map.is_empty() } - ///TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: - /// Ok((Default::default())) - /// Or keep the skip and remove the comment + // TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: + // Ok((Default::default())) + // Or keep the skip and remove the comment #[cfg_attr(test, mutants::skip)] pub fn from_data(data: Vec<(ClarityName, Value)>) -> Result { let mut type_map = BTreeMap::new(); @@ -1549,9 +1549,9 @@ impl TupleData { Self::new(TupleTypeSignature::try_from(type_map)?, data_map) } - ///TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: - /// Ok((Default::default())) - /// Or keep the skip and remove the comment + // TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: + // Ok((Default::default())) + // Or keep the skip and remove the comment #[cfg_attr(test, mutants::skip)] pub fn from_data_typed( epoch: &StacksEpochId, diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 7da9801674..2f86d48cf8 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -33,6 +33,7 @@ stackslib = { path = "../stackslib"} thiserror = "1.0" tiny_http = "0.12" wsts = { workspace = true } +mutants = "0.0.3" [dev-dependencies] mutants = "0.0.3" diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 67eb970574..5de1c98f52 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -366,6 +366,10 @@ fn ack_dispatcher(request: HttpRequest) { }; } +// TODO: #4587 create default for `SignerEvent` and `EventError`, then check if mutation tests are caught for these case: +// Ok(Default::default()) +// Or keep the skip and remove the comment +#[cfg_attr(test, mutants::skip)] /// Process a stackerdb event from the node fn process_stackerdb_event( local_addr: Option, diff --git a/pox-locking/Cargo.toml b/pox-locking/Cargo.toml index fd2729048d..199a58d18e 100644 --- a/pox-locking/Cargo.toml +++ b/pox-locking/Cargo.toml @@ -22,6 +22,7 @@ path = "src/lib.rs" clarity = { package = "clarity", path = "../clarity" } stacks_common = { package = "stacks-common", path = "../stacks-common" } slog = { version = "2.5.2", features = [ "max_level_trace" ] } +mutants = "0.0.3" [features] slog_json = ["stacks_common/slog_json", "clarity/slog_json"] diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 49d1ba90b2..2146a15688 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -105,6 +105,9 @@ fn create_event_info_aggregation_code(function_name: &str) -> String { ) } +// TODO: #4587 add tests for `String::new()` and `"xyzzy".into()` returning case. +// Or keep the skip and remove the comment +#[cfg_attr(test, mutants::skip)] /// Craft the code snippet to generate the method-specific `data` payload fn create_event_info_data_code( function_name: &str, diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index be75337115..7c254f0384 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -93,6 +93,7 @@ features = ["serde"] [dependencies.time] version = "0.2.23" features = ["std"] +mutants = "0.0.3" [dev-dependencies] assert-json-diff = "1.0.0" diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 3171ec3c98..1c6d0d2c61 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -946,6 +946,9 @@ impl<'a> BurnchainDBTransaction<'a> { BurnchainDB::inner_get_canonical_chain_tip(&self.sql_tx) } + // TODO: #4587 add tests for `Ok(())` returning case + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// You'd only do this in network emergencies, where node operators are expected to declare an /// anchor block missing (or present). Ideally there'd be a smart contract somewhere for this. pub fn set_override_affirmation_map( diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 23dc50f62c..5f47ddb5ed 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -468,6 +468,9 @@ impl PoxConstants { ) // total liquid supply is 40000000000000000 µSTX } + // TODO: #4587 create default for PoxConstants, then check if the mutation tests are caught: + // Default::default() + #[cfg_attr(test, mutants::skip)] pub fn regtest_default() -> PoxConstants { PoxConstants::new( 5, diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..64ed6b9772 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -307,6 +307,10 @@ impl FromRow for LeaderBlockCommitOp { } impl FromRow for StackStxOp { + // TODO: #4587 create default for `StackStxOp`, then check if mutation tests are caught for these case: + // Ok(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] fn from_row<'a>(row: &'a Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); @@ -5128,6 +5132,9 @@ impl SortitionDB { query_row(conn, sql, args) } + // TODO: #4587 add test for the `None` case returning Ok(false) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Are microblocks disabled by Epoch 2.5 at the height specified /// in `at_burn_height`? pub fn are_microblocks_disabled(conn: &DBConn, at_burn_height: u64) -> Result { diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index 20dca3187a..3e213e8811 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -181,6 +181,10 @@ impl StackStxOp { } } + // TODO: #4587 create default for `ParsedData`, then check if mutation tests are caught for these case: + // Some(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] fn parse_data(data: &Vec) -> Option { /* Wire format: @@ -271,6 +275,10 @@ impl StackStxOp { ) } + // TODO: #4587 create default for `StackStxOp` and `op_error`, then check if mutation tests are caught for these case: + // Ok(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// parse a StackStxOp /// `pox_sunset_ht` is the height at which PoX *disables* pub fn parse_from_tx( @@ -395,6 +403,9 @@ impl StacksMessageCodec for StackStxOp { } impl StackStxOp { + // TODO: #4587 add tests for `Ok(())` returning case. + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] pub fn check(&self) -> Result<(), op_error> { if self.stacked_ustx == 0 { warn!("Invalid StackStxOp, must have positive ustx"); diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 96eae44641..c34eaaafab 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -2414,6 +2414,9 @@ impl< return false; } + // TODO: #4587,create default for BlockHeaderHash, then check if mutation tests are caught for these cases: + // Ok(Some(Default::default)) + #[cfg_attr(test, mutants::skip)] /// Handle a new burnchain block, optionally rolling back the canonical PoX sortition history /// and setting it up to be replayed in the event the network affirms a different history. If /// this happens, *and* if re-processing the new affirmed history is *blocked on* the diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 06b448cd21..a5a3da7899 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -423,6 +423,9 @@ impl StacksChainState { result } + // TODO: #4587 create default for StacksTransactionEvent , then check if mutation tests are caught for these cases: + // Ok(vec![Default::default()]) + #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -435,6 +438,9 @@ impl StacksChainState { Self::handle_pox_cycle_missed_unlocks(clarity, cycle_number, cycle_info, &PoxVersions::Pox2) } + // TODO: #4587 create default for StacksTransactionEvent , then check if mutation tests are caught for these cases: + // Ok(vec![Default::default()]) + #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -447,6 +453,9 @@ impl StacksChainState { Self::handle_pox_cycle_missed_unlocks(clarity, cycle_number, cycle_info, &PoxVersions::Pox3) } + // TODO: #4587 create default for StacksTransactionEvent , then check if mutation tests are caught for these cases: + // Ok(vec![Default::default()]) + #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -460,6 +469,9 @@ impl StacksChainState { Ok(vec![]) } + // TODO: #4587 create default for StacksTransactionEvent , then check if mutation tests are caught for these cases: + // Ok(vec![Default::default()]) + #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -752,6 +764,9 @@ impl StacksChainState { Some(signer_set) } + // TODO: #4587 create default for RewardSet , then check if mutation tests are caught for these cases: + // Default::default() + #[cfg_attr(test, mutants::skip)] /// Given a threshold and set of registered addresses, return a reward set where /// every entry address has stacked more than the threshold, and addresses /// are repeated floor(stacked_amt / threshold) times. diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index dd70fcfb01..160fdcd74b 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4132,6 +4132,10 @@ impl StacksChainState { Ok((applied, receipts)) } + // TODO: #4587 create default for `StacksTransactionReceipt`, then check if mutation tests are caught for these case: + // vec![Default::default()] + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Process any Stacking-related bitcoin operations /// that haven't been processed in this Stacks fork yet. pub fn process_stacking_ops( @@ -4237,6 +4241,11 @@ impl StacksChainState { all_receipts } + // TODO: #4587 add test for `Ok(vec![])` returning case. + // TODO: #4587 create default for `Value`, then check if mutation tests are caught for these case: + // Ok(vec![Default::default()]) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] pub fn collect_pox_4_stacking_args(op: &StackStxOp) -> Result, String> { let signer_key = match op.signer_key { Some(signer_key) => match Value::buff_from(signer_key.as_bytes().to_vec()) { @@ -6021,6 +6030,13 @@ impl StacksChainState { Ok(next_microblocks) } + // TODO: #4587 add default for `StacksEpochReceipt` and `TransactionPayload`, then check if mutation tests are caught for these cases: + // Ok((None, Some(Default::default()))) + // Ok((Some(Default::default()), None)) + // Ok((Some(Default::default()), Some(Default::default()))) + // This is caught: Ok(None, None) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Find and process the next staging block. /// Return the next chain tip if we processed this block, or None if we couldn't. /// Return a poison microblock transaction payload if the microblock stream contains a diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f718a9fb36..516ceb8f34 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -170,6 +170,10 @@ pub struct BlockBuilderSettings { } impl BlockBuilderSettings { + // TODO: #4587 create default for BlockBuilderSettings, then check if mutation tests are caught for these case: + // Default::default() + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] pub fn limited() -> BlockBuilderSettings { BlockBuilderSettings { max_miner_time_ms: u64::MAX, @@ -179,6 +183,10 @@ impl BlockBuilderSettings { } } + // TODO: #4587 create default for BlockBuilderSettings, then check if mutation tests are caught for these case: + // Default::default() + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] pub fn max_value() -> BlockBuilderSettings { BlockBuilderSettings { max_miner_time_ms: u64::MAX, @@ -1793,6 +1801,10 @@ impl StacksBlockBuilder { } } + // TODO: #4587 create default for MinerEpochInfo, then check if mutation tests are caught for these case: + // Ok(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// This function should be called before `epoch_begin`. /// It loads the parent microblock stream, sets the parent microblock, and returns /// data necessary for `epoch_begin`. @@ -2047,6 +2059,10 @@ impl StacksBlockBuilder { Ok((block, size, cost, mblock_opt)) } + // TODO: #4587 create default for `StacksBlockBuilder`, then check if mutation tests are caught for these case: + // Ok(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Create a block builder for mining pub fn make_block_builder( burnchain: &Burnchain, @@ -2101,6 +2117,10 @@ impl StacksBlockBuilder { Ok(builder) } + // TODO: #4587 create default for `StacksBlockBuilder`, then check if mutation tests are caught for these case: + // Ok(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Create a block builder for regtest mining pub fn make_regtest_block_builder( burnchain: &Burnchain, @@ -2381,6 +2401,11 @@ impl StacksBlockBuilder { Ok((blocked, tx_events)) } + // TODO: #4587 create default for `StacksBlock` and `ExecutionCost`, then check if mutation tests are caught for these cases: + // Ok((Default::default(), Default::default(), 1)) + // Ok((Default::default(), Default::default(), 0)) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Given access to the mempool, mine an anchored block with no more than the given execution cost. /// returns the assembled block, and the consumed execution budget. pub fn build_anchored_block( diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index bd441cc023..243f91e7c6 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -91,6 +91,7 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; use stacks_common::util::{get_epoch_time_ms, log, sleep_ms}; +#[cfg_attr(test, mutants::skip)] fn main() { let mut argv: Vec = env::args().collect(); if argv.len() < 2 { @@ -1332,6 +1333,7 @@ simulating a miner. } } +#[cfg_attr(test, mutants::skip)] fn tip_mine() { let argv: Vec = env::args().collect(); if argv.len() < 6 { diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index bceb484cd7..6c20f79dcd 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -31,6 +31,7 @@ wsts = { workspace = true } rand = { workspace = true } rand_core = { workspace = true } hashbrown = { workspace = true } +mutants = "0.0.3" [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 8cf9ac82be..bb6a319651 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -125,6 +125,10 @@ pub fn addr2str(btc_addr: &BitcoinAddress) -> String { format!("{}", &btc_addr) } +// TODO: #4587 create default for `BurnchainParameters`, then check if mutation tests are caught for these case: +// Default::default() +// Or keep the skip and remove the comment +#[cfg_attr(test, mutants::skip)] pub fn burnchain_params_from_config(config: &BurnchainConfig) -> BurnchainParameters { let (network, _) = config.get_bitcoin_network(); let mut params = BurnchainParameters::from_params(&config.chain, &network) @@ -135,6 +139,10 @@ pub fn burnchain_params_from_config(config: &BurnchainConfig) -> BurnchainParame params } +// TODO: #4587 create default for `BitcoinIndexer`, then check if mutation tests are caught for these case: +// Default::default() +// Or keep the skip and remove the comment +#[cfg_attr(test, mutants::skip)] /// Helper method to create a BitcoinIndexer pub fn make_bitcoin_indexer( config: &Config, @@ -272,6 +280,10 @@ impl BitcoinRegtestController { BitcoinRegtestController::with_burnchain(config, coordinator_channel, None, None) } + // TODO: #4587 create default for `BitcoinRegtestController`, then check if mutation tests are caught for these case: + // Default::default() + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] pub fn with_burnchain( config: Config, coordinator_channel: Option, @@ -341,6 +353,10 @@ impl BitcoinRegtestController { } } + // TODO: #4587 create default for `BitcoinRegtestController`, then check if mutation tests are caught for these case: + // Default::default() + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// create a dummy bitcoin regtest controller. /// used just for submitting bitcoin ops. pub fn new_dummy(config: Config) -> Self { @@ -1242,6 +1258,7 @@ impl BitcoinRegtestController { Some(tx) } + #[cfg_attr(test, mutants::skip)] #[cfg(not(test))] fn build_stack_stx_tx( &mut self, @@ -1617,6 +1634,10 @@ impl BitcoinRegtestController { } } + // TODO: #4587 create default for `Transaction` and `UTXOSet`, then check if mutation tests are caught for these case: + // Some((Default::default(), Default::default())) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] fn prepare_tx( &mut self, epoch_id: StacksEpochId, @@ -1984,6 +2005,10 @@ impl BitcoinRegtestController { self.config.miner.segwit = segwit; } + // TODO: #4587 create default for `SerializedTx`, then check if mutation tests are caught for these case: + // Some(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] pub fn make_operation_tx( &mut self, epoch_id: StacksEpochId, diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 60b60cf9f7..3a33a11083 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -827,6 +827,10 @@ impl Config { } } + // TODO: #4587 create default for `StacksEpoch`, then check if mutation tests are caught for these case: + // Ok(vec![Default::default()]) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] fn make_epochs( conf_epochs: &[StacksEpochConfigFile], burn_mode: &str, @@ -1274,6 +1278,9 @@ impl Config { } } + // TODO: #4587 create default for BlockBuilderSettings, then check if mutation tests are caught for these case: + // Default::default() + #[cfg_attr(test, mutants::skip)] pub fn make_block_builder_settings( &self, attempt: u64, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 4b793d0106..1d9a2415d2 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -576,6 +576,10 @@ impl BlockMinerThread { tx_signer.get_tx().unwrap() } + // TODO: #4587 create default for `NakamotoNodeError` and `ParentStacksBlockInfo`, then check if mutation tests are caught for these case: + // Ok(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Load up the parent block info for mining. /// If there's no parent because this is the first block, then return the genesis block's info. /// If we can't find the parent in the DB but we expect one, return None. @@ -679,6 +683,10 @@ impl BlockMinerThread { Some(vrf_proof) } + // TODO: #4587 create default for `NakamotoBlock` and `NakamotoNodeError`, then check if mutation tests are caught for these case: + // Ok(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. fn mine_block(&mut self, stackerdbs: &StackerDBs) -> Result { @@ -832,6 +840,10 @@ impl BlockMinerThread { } impl ParentStacksBlockInfo { + // TODO: #4587 craete default for `NakamotoBlock` and `NakamotoNodeError`, then check if mutation tests are caught for these case: + // Ok(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Determine where in the set of forks to attempt to mine the next anchored block. /// `mine_tip_ch` and `mine_tip_bhh` identify the parent block on top of which to mine. /// `check_burn_block` identifies what we believe to be the burn chain's sortition history tip. diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 28e6552771..749fc9e1fc 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1507,6 +1507,11 @@ impl BlockMinerThread { Some((*best_tip).clone()) } + // TODO: #4587 create default for `ParentStacksBlockInfo`, then check if mutation tests are caught for these cases: + // (Some(Default::default()), true) + // (Some(Default::default()), false) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Load up the parent block info for mining. /// If there's no parent because this is the first block, then return the genesis block's info. /// If we can't find the parent in the DB but we expect one, return None. @@ -2222,6 +2227,10 @@ impl BlockMinerThread { return false; } + // TODO: #4587 create default for `MinerThreadResult`, then check if mutation tests are caught for these case: + // Some(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block data as /// well as the microblock private key to use to produce microblocks. @@ -3096,6 +3105,8 @@ impl RelayerThread { (true, miner_tip) } + // TODO: #4587 add tests for `true` and `false` returning cases + #[cfg_attr(test, mutants::skip)] /// Process all new tenures that we're aware of. /// Clear out stale tenure artifacts as well. /// Update the miner tip if we won the highest tenure (or clear it if we didn't). @@ -3550,6 +3561,8 @@ impl RelayerThread { true } + // TODO: #4587 add tests for `true` and `false` returning cases + #[cfg_attr(test, mutants::skip)] /// See if we should run a microblock tenure now. /// Return true if so; false if not fn can_run_microblock_tenure(&mut self) -> bool { diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index fd7683f569..30f49e1773 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -72,6 +72,10 @@ impl<'a> Tenure { } } + // TODO: #4587 create default for `TenureArtifacts` , then check if mutation tests are caught for these case: + // Some(Default::default()) + // Or keep the skip and remove the comment + #[cfg_attr(test, mutants::skip)] pub fn run(&mut self, burn_dbconn: &SortitionDBConn) -> Option { info!("Node starting new tenure with VRF {:?}", self.vrf_seed); From b2ceb58a282374647051af9a289310e720874ada Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 14 May 2024 09:43:19 -0700 Subject: [PATCH 0007/1400] Replace custom NakamotoBlockHeaders throughout signer with empty() Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/stackerdb.rs | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/stacks-signer/src/v0/stackerdb.rs b/stacks-signer/src/v0/stackerdb.rs index 15c2c7add2..d254239dfd 100644 --- a/stacks-signer/src/v0/stackerdb.rs +++ b/stacks-signer/src/v0/stackerdb.rs @@ -187,13 +187,9 @@ mod tests { use std::time::Duration; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; - use blockstack_lib::chainstate::stacks::ThresholdSignature; - use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; - use clarity::util::secp256k1::MessageSignature; use libsigner::BlockProposal; use rand::{thread_rng, RngCore}; - use stacks_common::bitvec::BitVec; use super::*; use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; @@ -217,18 +213,7 @@ mod tests { let signer_config = generate_signer_config(&config, 5, 20); let mut stackerdb = StackerDB::from(&signer_config); - let header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; + let header = NakamotoBlockHeader::empty(); let mut block = NakamotoBlock { header, txs: vec![], From 1a537e5d0f277b8b074128bc8da410379b1ffb4e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 14 May 2024 15:57:07 -0700 Subject: [PATCH 0008/1400] CRC: Remove unnecessary reward cycle check in process_event MinerMessage and fix logging and use bits() when signing Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 54 ++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index edc0bf7e89..ebe0778b11 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -18,6 +18,7 @@ use std::sync::mpsc::Sender; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::PrivateKey; +use clarity::util::hash::MerkleHashFunc; use libsigner::v0::messages::{BlockResponse, RejectCode, SignerMessage}; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_warn}; @@ -109,11 +110,6 @@ impl SignerTrait for Signer { ); } Some(SignerEvent::MinerMessages(messages, _)) => { - if current_reward_cycle != self.reward_cycle { - // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) - debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); - return; - } debug!( "{self}: Received {} messages from the miner", messages.len(); @@ -180,13 +176,13 @@ impl Signer { /// Determine this signers response to a proposed block /// Returns a BlockResponse if we have already validated the block /// Returns None otherwise - fn determine_response(&self, block_info: &mut BlockInfo) -> Option { + fn determine_response(&self, block_info: &BlockInfo) -> Option { let valid = block_info.valid?; let response = if valid { debug!("{self}: Accepting block {}", block_info.block.block_id()); let signature = self .private_key - .sign(&block_info.signer_signature_hash().0) + .sign(block_info.signer_signature_hash().bits()) .expect("Failed to sign block"); BlockResponse::accepted(block_info.signer_signature_hash(), signature) } else { @@ -210,20 +206,39 @@ impl Signer { // We are not signing for this reward cycle. Reject the block warn!( "{self}: Received a block proposal for a different reward cycle. Ignore it."; - "requested_reward_cycle" => block_proposal.reward_cycle, + "requested_reward_cycle" => block_proposal.reward_cycle ); return; } - // TODO: could add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. + // TODO: should add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. + // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - let Some(mut block_info) = self + if let Some(block_info) = self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) .expect("Failed to connect to signer DB") - else { + { + let Some(block_response) = self.determine_response(&block_info) else { + // We are still waiting for a response for this block. Do nothing. + debug!("{self}: Received a block proposal for a block we are already validating."; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id() + ); + return; + }; + // Submit a proposal response to the .signers contract for miners + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + if let Err(e) = self + .stackerdb + .send_message_with_retry(block_response.into()) + { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + } + } else { debug!( "{self}: received a block proposal for a new block. Submit block for validation. "; "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), ); let block_info = BlockInfo::from(block_proposal.clone()); stacks_client @@ -234,21 +249,7 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - return; - }; - if let Some(block_response) = self.determine_response(&mut block_info) { - // Submit a proposal response to the .signers contract for miners - warn!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - if let Err(e) = self - .stackerdb - .send_message_with_retry(block_response.into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } } - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } /// Handle the block validate response returned from our prior calls to submit a block for validation @@ -275,6 +276,7 @@ impl Signer { } }; block_info.valid = Some(true); + // TODO: do not sign the block if it fails signer state checks (forks, etc.) let signature = self .private_key .sign(&signer_signature_hash.0) @@ -307,7 +309,7 @@ impl Signer { } }; // Submit a proposal response to the .signers contract for miners - warn!("{self}: Broadcasting a block response to stacks node: {message:?}"); + debug!("{self}: Broadcasting a block response to stacks node: {message:?}"); if let Err(e) = self.stackerdb.send_message_with_retry(message) { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } From 2c46e6f90086940511b8340703056d6c232714c5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 15 May 2024 11:55:18 -0700 Subject: [PATCH 0009/1400] CRC: reuse stackerdb.rs code where possible Signed-off-by: Jacinta Ferrant --- libsigner/src/libsigner.rs | 21 ++ libsigner/src/v0/messages.rs | 30 ++- libsigner/src/v1/messages.rs | 17 +- stacks-signer/src/client/mod.rs | 4 + stacks-signer/src/{v0 => client}/stackerdb.rs | 70 +++++-- stacks-signer/src/v0/mod.rs | 2 - stacks-signer/src/v0/signer.rs | 25 ++- stacks-signer/src/v1/mod.rs | 2 +- stacks-signer/src/v1/signer.rs | 53 +++-- .../v1/{stackerdb.rs => stackerdb_manager.rs} | 197 ++++-------------- testnet/stacks-node/src/tests/signer.rs | 6 +- 11 files changed, 202 insertions(+), 225 deletions(-) rename stacks-signer/src/{v0 => client}/stackerdb.rs (82%) rename stacks-signer/src/v1/{stackerdb.rs => stackerdb_manager.rs} (60%) diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 874ebad1f4..e08aa20daa 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -47,6 +47,13 @@ pub mod v0; /// v1 signer related code pub mod v1; +use std::cmp::Eq; +use std::fmt::Debug; +use std::hash::Hash; + +use clarity::codec::StacksMessageCodec; +use clarity::vm::types::QualifiedContractIdentifier; + pub use crate::error::{EventError, RPCError}; pub use crate::events::{ BlockProposal, EventReceiver, EventStopSignaler, SignerEvent, SignerEventReceiver, @@ -55,3 +62,17 @@ pub use crate::events::{ pub use crate::runloop::{RunningSigner, Signer, SignerRunLoop}; pub use crate::session::{SignerSession, StackerDBSession}; pub use crate::signer_set::{Error as ParseSignerEntriesError, SignerEntries}; + +/// A trait for message slots used for signer communication +pub trait MessageSlotID: Sized + Eq + Hash + Debug + Copy { + /// The contract identifier for the message slot in stacker db + fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier; + /// All possible Message Slot values + fn all() -> Vec; +} + +/// A trait for signer messages used in signer communciation +pub trait SignerMessage: StacksMessageCodec { + /// The contract identifier for the message slot in stacker db + fn msg_id(&self) -> T; +} diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 0542c8d2ef..177e293dd4 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -55,7 +55,10 @@ use tiny_http::{ }; use crate::http::{decode_http_body, decode_http_request}; -use crate::{BlockProposal, EventError}; +use crate::{ + BlockProposal, EventError, MessageSlotID as MessageSlotIDTrait, + SignerMessage as SignerMessageTrait, +}; define_u8_enum!( /// Enum representing the stackerdb message identifier: this is @@ -67,6 +70,21 @@ MessageSlotID { BlockResponse = 1 }); +impl MessageSlotIDTrait for MessageSlotID { + fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { + NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) + } + fn all() -> Vec { + MessageSlotID::ALL.to_vec() + } +} + +impl SignerMessageTrait for SignerMessage { + fn msg_id(&self) -> MessageSlotID { + self.msg_id() + } +} + define_u8_enum!( /// Enum representing the SignerMessage type prefix SignerMessageTypePrefix { @@ -444,15 +462,9 @@ impl From for SignerMessage { } } -impl From for SignerMessage { - fn from(block_rejection: BlockRejection) -> Self { - Self::BlockResponse(BlockResponse::Rejected(block_rejection)) - } -} - -impl From for SignerMessage { +impl From for BlockResponse { fn from(rejection: BlockValidateReject) -> Self { - Self::BlockResponse(BlockResponse::Rejected(rejection.into())) + Self::Rejected(rejection.into()) } } diff --git a/libsigner/src/v1/messages.rs b/libsigner/src/v1/messages.rs index a84ba98c64..0ec6ee7614 100644 --- a/libsigner/src/v1/messages.rs +++ b/libsigner/src/v1/messages.rs @@ -64,7 +64,7 @@ use wsts::schnorr::ID; use wsts::state_machine::{signer, SignError}; use crate::http::{decode_http_body, decode_http_request}; -use crate::EventError; +use crate::{EventError, MessageSlotID as MessageSlotIDTrait, SignerMessage as SignerMessageTrait}; define_u8_enum!( /// Enum representing the stackerdb message identifier: this is @@ -100,6 +100,21 @@ MessageSlotID { EncryptedSignerState = 13 }); +impl MessageSlotIDTrait for MessageSlotID { + fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { + NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) + } + fn all() -> Vec { + MessageSlotID::ALL.to_vec() + } +} + +impl SignerMessageTrait for SignerMessage { + fn msg_id(&self) -> MessageSlotID { + self.msg_id() + } +} + define_u8_enum!( /// Enum representing the signer message type prefix SignerMessageTypePrefix { diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 21a9a2f0b3..71eae4e6eb 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -13,6 +13,9 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . + +/// The stackerdb module for sending messages between signers and miners +pub(crate) mod stackerdb; /// The stacks node client module for communicating with the stacks node pub(crate) mod stacks_client; @@ -22,6 +25,7 @@ use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::serialization::SerializationError; use libstackerdb::Error as StackerDBError; use slog::slog_debug; +pub use stackerdb::*; pub use stacks_client::*; use stacks_common::codec::Error as CodecError; use stacks_common::debug; diff --git a/stacks-signer/src/v0/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs similarity index 82% rename from stacks-signer/src/v0/stackerdb.rs rename to stacks-signer/src/client/stackerdb.rs index d254239dfd..34fc274738 100644 --- a/stacks-signer/src/v0/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -15,12 +15,11 @@ // along with this program. If not, see . // use blockstack_lib::net::api::poststackerdbchunk::StackerDBErrorCodes; +use clarity::codec::read_next; use hashbrown::HashMap; -use libsigner::v0::messages::{MessageSlotID, SignerMessage}; -use libsigner::{SignerSession, StackerDBSession}; +use libsigner::{MessageSlotID, SignerMessage, SignerSession, StackerDBSession}; use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use slog::{slog_debug, slog_warn}; -use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::{debug, warn}; @@ -29,21 +28,21 @@ use crate::config::SignerConfig; /// The StackerDB client for communicating with the .signers contract #[derive(Debug)] -pub struct StackerDB { +pub struct StackerDB { /// The stacker-db sessions for each signer set and message type. /// Maps message ID to the DB session. - signers_message_stackerdb_sessions: HashMap, + pub signers_message_stackerdb_sessions: HashMap, /// The private key used in all stacks node communications - stacks_private_key: StacksPrivateKey, + pub stacks_private_key: StacksPrivateKey, /// A map of a message ID to last chunk version for each session - slot_versions: HashMap>, + pub slot_versions: HashMap>, /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. - signer_slot_id: SignerSlotID, + pub signer_slot_id: SignerSlotID, /// The reward cycle of the connecting signer - reward_cycle: u64, + pub reward_cycle: u64, } -impl From<&SignerConfig> for StackerDB { +impl From<&SignerConfig> for StackerDB { fn from(config: &SignerConfig) -> Self { Self::new( &config.node_host, @@ -54,7 +53,8 @@ impl From<&SignerConfig> for StackerDB { ) } } -impl StackerDB { + +impl StackerDB { /// Create a new StackerDB client pub fn new( host: &str, @@ -64,11 +64,10 @@ impl StackerDB { signer_slot_id: SignerSlotID, ) -> Self { let mut signers_message_stackerdb_sessions = HashMap::new(); - for msg_id in MessageSlotID::ALL { - signers_message_stackerdb_sessions.insert( - *msg_id, - StackerDBSession::new(host, msg_id.stacker_db_contract(is_mainnet, reward_cycle)), - ); + for msg_id in M::all() { + let session = + StackerDBSession::new(host, msg_id.stacker_db_contract(is_mainnet, reward_cycle)); + signers_message_stackerdb_sessions.insert(msg_id, session); } Self { @@ -81,9 +80,9 @@ impl StackerDB { } /// Sends messages to the .signers stacker-db with an exponential backoff retry - pub fn send_message_with_retry( + pub fn send_message_with_retry>( &mut self, - message: SignerMessage, + message: T, ) -> Result { let msg_id = message.msg_id(); let message_bytes = message.serialize_to_vec(); @@ -94,7 +93,7 @@ impl StackerDB { /// exponential backoff retry pub fn send_message_bytes_with_retry( &mut self, - msg_id: &MessageSlotID, + msg_id: &M, message_bytes: Vec, ) -> Result { let slot_id = self.signer_slot_id; @@ -117,11 +116,11 @@ impl StackerDB { chunk.sign(&self.stacks_private_key)?; let Some(session) = self.signers_message_stackerdb_sessions.get_mut(msg_id) else { - panic!("FATAL: would loop forever trying to send a message with ID {}, for which we don't have a session", msg_id); + panic!("FATAL: would loop forever trying to send a message with ID {msg_id:?}, for which we don't have a session"); }; debug!( - "Sending a chunk to stackerdb slot ID {slot_id} with version {slot_version} and message ID {msg_id} to contract {:?}!\n{chunk:?}", + "Sending a chunk to stackerdb slot ID {slot_id} with version {slot_version} and message ID {msg_id:?} to contract {:?}!\n{chunk:?}", &session.stackerdb_contract_id ); @@ -170,6 +169,34 @@ impl StackerDB { } } + /// Get all signer messages from stackerdb for the given slot IDs + pub fn get_messages>( + session: &mut StackerDBSession, + slot_ids: &[u32], + ) -> Result, ClientError> { + let mut messages = vec![]; + let send_request = || { + session + .get_latest_chunks(slot_ids) + .map_err(backoff::Error::transient) + }; + let chunk_ack = retry_with_exponential_backoff(send_request)?; + for (i, chunk) in chunk_ack.iter().enumerate() { + let Some(data) = chunk else { + continue; + }; + let Ok(message) = read_next::(&mut &data[..]) else { + if !data.is_empty() { + warn!("Failed to deserialize chunk data into a SignerMessage"); + debug!("slot #{i}: Failed chunk ({}): {data:?}", &data.len(),); + } + continue; + }; + messages.push(message); + } + Ok(messages) + } + /// Retrieve the signer set this stackerdb client is attached to pub fn get_signer_set(&self) -> u32 { u32::try_from(self.reward_cycle % 2).expect("FATAL: reward cycle % 2 exceeds u32::MAX") @@ -188,6 +215,7 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; + use libsigner::v0::messages::SignerMessage; use libsigner::BlockProposal; use rand::{thread_rng, RngCore}; diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index fa98082251..95dfb76e4b 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -16,5 +16,3 @@ /// The signer module for processing events pub mod signer; -/// The stackerdb module for sending messages between signers and miners -pub mod stackerdb; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index ebe0778b11..57fdf8a6bc 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -19,15 +19,14 @@ use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::PrivateKey; use clarity::util::hash::MerkleHashFunc; -use libsigner::v0::messages::{BlockResponse, RejectCode, SignerMessage}; +use libsigner::v0::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, warn}; use wsts::state_machine::OperationResult; -use super::stackerdb::StackerDB; -use crate::client::{SignerSlotID, StacksClient}; +use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::RunLoopCommand; use crate::signerdb::{BlockInfo, SignerDb}; @@ -39,7 +38,7 @@ pub struct Signer { /// The private key of the signer private_key: StacksPrivateKey, /// The stackerdb client - pub stackerdb: StackerDB, + pub stackerdb: StackerDB, /// Whether the signer is a mainnet signer or not pub mainnet: bool, /// The signer id @@ -230,7 +229,7 @@ impl Signer { debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); if let Err(e) = self .stackerdb - .send_message_with_retry(block_response.into()) + .send_message_with_retry::(block_response.into()) { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } @@ -255,7 +254,7 @@ impl Signer { /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { debug!("{self}: Received a block validate response: {block_validate_response:?}"); - let (message, block_info) = match block_validate_response { + let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; @@ -282,7 +281,7 @@ impl Signer { .sign(&signer_signature_hash.0) .expect("Failed to sign block"); ( - BlockResponse::accepted(signer_signature_hash, signature).into(), + BlockResponse::accepted(signer_signature_hash, signature), block_info, ) } @@ -305,12 +304,18 @@ impl Signer { } }; block_info.valid = Some(false); - (block_validate_reject.clone().into(), block_info) + ( + BlockResponse::from(block_validate_reject.clone()), + block_info, + ) } }; // Submit a proposal response to the .signers contract for miners - debug!("{self}: Broadcasting a block response to stacks node: {message:?}"); - if let Err(e) = self.stackerdb.send_message_with_retry(message) { + debug!("{self}: Broadcasting a block response to stacks node: {response:?}"); + if let Err(e) = self + .stackerdb + .send_message_with_retry::(response.into()) + { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } self.signer_db diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs index ac0a25f623..eee9efeb14 100644 --- a/stacks-signer/src/v1/mod.rs +++ b/stacks-signer/src/v1/mod.rs @@ -19,7 +19,7 @@ pub mod coordinator; /// The signer module for processing events pub mod signer; /// The stackerdb module for sending messages between signers and miners -pub mod stackerdb; +pub mod stackerdb_manager; use std::sync::mpsc::{channel, Receiver, Sender}; diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 94636cb9a3..ee93448932 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -52,7 +52,7 @@ use wsts::state_machine::{OperationResult, SignError}; use wsts::traits::Signer as _; use wsts::v2; -use super::stackerdb::StackerDB; +use super::stackerdb_manager::StackerDBManager; use crate::client::{ClientError, SignerSlotID, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerCommand}; @@ -91,8 +91,8 @@ pub struct Signer { pub state: State, /// Received Commands that need to be processed pub commands: VecDeque, - /// The stackerdb client - pub stackerdb: StackerDB, + /// The stackerdb client session manager + pub stackerdb_manager: StackerDBManager, /// Whether the signer is a mainnet signer or not pub mainnet: bool, /// The signer id @@ -203,7 +203,7 @@ impl SignerTrait for Signer { ) } Some(SignerEvent::SignerMessages(signer_set, messages)) => { - if *signer_set != self.stackerdb.get_signer_set() { + if *signer_set != self.stackerdb_manager.get_signer_set() { debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); return; } @@ -346,7 +346,7 @@ impl Signer { return Ok(()); } let ordered_packets = self - .stackerdb + .stackerdb_manager .get_dkg_packets(&self.signer_slot_ids)? .iter() .filter_map(|packet| { @@ -374,7 +374,7 @@ impl Signer { impl From for Signer { fn from(signer_config: SignerConfig) -> Self { - let mut stackerdb = StackerDB::from(&signer_config); + let mut stackerdb_manager = StackerDBManager::from(&signer_config); let num_signers = signer_config .signer_entries @@ -432,7 +432,7 @@ impl From for Signer { ); if let Some(state) = load_encrypted_signer_state( - &mut stackerdb, + &mut stackerdb_manager, signer_config.signer_slot_id, &state_machine.network_private_key, ).or_else(|err| { @@ -450,7 +450,7 @@ impl From for Signer { state_machine, state: State::Uninitialized, commands: VecDeque::new(), - stackerdb, + stackerdb_manager, mainnet: signer_config.mainnet, signer_id: signer_config.signer_id, signer_addresses: signer_config @@ -531,7 +531,7 @@ impl Signer { ); match self.coordinator.start_dkg_round() { Ok(msg) => { - let ack = self.stackerdb.send_message_with_retry(msg.into()); + let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); debug!("{self}: ACK: {ack:?}",); self.update_operation(Operation::Dkg); } @@ -573,7 +573,7 @@ impl Signer { *merkle_root, ) { Ok(msg) => { - let ack = self.stackerdb.send_message_with_retry(msg.into()); + let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); debug!("{self}: ACK: {ack:?}",); block_info.signed_over = true; self.signer_db @@ -656,7 +656,7 @@ impl Signer { // to observe so they know to send another block and to prove signers are doing work); warn!("{self}: Broadcasting a block rejection due to stacks node validation failure..."); if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_validate_reject.clone().into()) { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); @@ -949,7 +949,7 @@ impl Signer { ); // Submit signature result to miners to observe if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_rejection.into()) { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); @@ -965,10 +965,10 @@ impl Signer { ); // Submit signature result to miners to observe if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_rejection.into()) { - warn!("{self}: Failed to send block submission to stacker-db: {e:?}",); + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } false } @@ -980,7 +980,7 @@ impl Signer { nonces: &std::collections::HashMap, ) -> Result, ClientError> { let transactions: Vec<_> = self - .stackerdb + .stackerdb_manager .get_current_transactions()? .into_iter() .filter_map(|tx| { @@ -1005,7 +1005,7 @@ impl Signer { // Get all the account nonces for the next signers let account_nonces = self.get_account_nonces(stacks_client, &self.next_signer_addresses); let transactions: Vec<_> = self - .stackerdb + .stackerdb_manager .get_next_transactions(&self.next_signer_slot_ids)?; let mut filtered_transactions = std::collections::HashMap::new(); NakamotoSigners::update_filtered_transactions( @@ -1137,7 +1137,7 @@ impl Signer { error!("{}: Failed to serialize DKGResults message for StackerDB, will continue operating.", self.signer_id; "error" => %e); } else if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) { error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; @@ -1193,7 +1193,7 @@ impl Signer { dkg_public_key: Point, ) -> Result { let mut unsigned_tx = stacks_client.build_unsigned_vote_for_aggregate_public_key( - self.stackerdb.get_signer_slot_id().0, + self.stackerdb_manager.get_signer_slot_id().0, self.coordinator.current_dkg_id, dkg_public_key, self.reward_cycle, @@ -1268,7 +1268,8 @@ impl Signer { // For all Pox-4 epochs onwards, broadcast the results also to stackerDB for other signers/miners to observe signer_transactions.push(new_transaction); let signer_message = SignerMessage::Transactions(signer_transactions); - self.stackerdb.send_message_with_retry(signer_message)?; + self.stackerdb_manager + .send_message_with_retry(signer_message)?; crate::monitoring::increment_dkg_votes_submitted(); info!("{self}: Broadcasted DKG vote transaction ({txid}) to stacker DB"); Ok(()) @@ -1297,7 +1298,7 @@ impl Signer { // Submit signature result to miners to observe info!("{self}: Submit block response: {block_submission}"); if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_submission.into()) { warn!("{self}: Failed to send block submission to stacker-db: {e:?}"); @@ -1337,7 +1338,7 @@ impl Signer { debug!("{self}: Broadcasting block rejection: {block_rejection:?}"); // Submit signature result to miners to observe if let Err(e) = self - .stackerdb + .stackerdb_manager .send_message_with_retry(block_rejection.into()) { warn!("{self}: Failed to send block rejection submission to stacker-db: {e:?}"); @@ -1390,9 +1391,7 @@ impl Signer { encrypted_state: Vec, ) -> Result<(), PersistenceError> { let message = SignerMessage::EncryptedSignerState(encrypted_state); - - self.stackerdb.send_message_with_retry(message)?; - + self.stackerdb_manager.send_message_with_retry(message)?; Ok(()) } @@ -1420,7 +1419,7 @@ impl Signer { outbound_messages.len() ); for msg in outbound_messages { - let ack = self.stackerdb.send_message_with_retry(msg.into()); + let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); if let Ok(ack) = ack { debug!("{self}: send outbound ACK: {ack:?}"); } else { @@ -1568,7 +1567,7 @@ impl Signer { // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes let account_nonce = stacks_client.get_account_nonce(signer_address).unwrap_or(0); - let old_transactions = self.stackerdb.get_current_transactions()?; + let old_transactions = self.stackerdb_manager.get_current_transactions()?; // Check if we have an existing vote transaction for the same round and reward cycle for transaction in old_transactions.iter() { // We should not consider other signer transactions and should ignore invalid transaction versions @@ -1635,7 +1634,7 @@ trait SignerStateStorage { ) -> Result>, PersistenceError>; } -impl SignerStateStorage for &mut StackerDB { +impl SignerStateStorage for &mut StackerDBManager { type IdType = SignerSlotID; fn get_encrypted_signer_state( diff --git a/stacks-signer/src/v1/stackerdb.rs b/stacks-signer/src/v1/stackerdb_manager.rs similarity index 60% rename from stacks-signer/src/v1/stackerdb.rs rename to stacks-signer/src/v1/stackerdb_manager.rs index 69fcc1d829..733d0bd936 100644 --- a/stacks-signer/src/v1/stackerdb.rs +++ b/stacks-signer/src/v1/stackerdb_manager.rs @@ -15,51 +15,44 @@ // along with this program. If not, see . // use blockstack_lib::chainstate::stacks::StacksTransaction; -use blockstack_lib::net::api::poststackerdbchunk::StackerDBErrorCodes; -use hashbrown::HashMap; +use clarity::types::chainstate::StacksPrivateKey; use libsigner::v1::messages::{MessageSlotID, SignerMessage}; use libsigner::{SignerSession, StackerDBSession}; -use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; +use libstackerdb::StackerDBChunkAckData; use slog::{slog_debug, slog_error, slog_warn}; -use stacks_common::codec::{read_next, StacksMessageCodec}; -use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::codec::read_next; use stacks_common::{debug, error, warn}; use wsts::net::Packet; +use crate::client::stackerdb::StackerDB; use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID}; use crate::config::SignerConfig; -/// The StackerDB client for communicating with the .signers contract +/// The session manager for communicating with the .signers contracts for the current and next reward cycle #[derive(Debug)] -pub struct StackerDB { - /// The stacker-db sessions for each signer set and message type. - /// Maps message ID to the DB session. - signers_message_stackerdb_sessions: HashMap, - /// The private key used in all stacks node communications - stacks_private_key: StacksPrivateKey, - /// A map of a message ID to last chunk version for each session - slot_versions: HashMap>, - /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. - signer_slot_id: SignerSlotID, - /// The reward cycle of the connecting signer - reward_cycle: u64, +pub struct StackerDBManager { /// The stacker-db transaction msg session for the NEXT reward cycle next_transaction_session: StackerDBSession, + /// The stacker-db sessions for each signer set and message type. + stackerdb: StackerDB, } -impl From<&SignerConfig> for StackerDB { +impl From<&SignerConfig> for StackerDBManager { fn from(config: &SignerConfig) -> Self { - Self::new( + let stackerdb = StackerDB::from(config); + let next_transaction_session = StackerDBSession::new( &config.node_host, - config.stacks_private_key, - config.mainnet, - config.reward_cycle, - config.signer_slot_id, - ) + MessageSlotID::Transactions + .stacker_db_contract(config.mainnet, config.reward_cycle.wrapping_add(1)), + ); + Self { + next_transaction_session, + stackerdb, + } } } -impl StackerDB { - /// Create a new StackerDB client +impl StackerDBManager { + /// Create a new StackerDB Manager pub fn new( host: &str, stacks_private_key: StacksPrivateKey, @@ -67,37 +60,30 @@ impl StackerDB { reward_cycle: u64, signer_slot_id: SignerSlotID, ) -> Self { - let mut signers_message_stackerdb_sessions = HashMap::new(); - for msg_id in MessageSlotID::ALL { - signers_message_stackerdb_sessions.insert( - *msg_id, - StackerDBSession::new(host, msg_id.stacker_db_contract(is_mainnet, reward_cycle)), - ); - } + let stackerdb = StackerDB::new( + host, + stacks_private_key, + is_mainnet, + reward_cycle, + signer_slot_id, + ); let next_transaction_session = StackerDBSession::new( host, MessageSlotID::Transactions .stacker_db_contract(is_mainnet, reward_cycle.wrapping_add(1)), ); - Self { - signers_message_stackerdb_sessions, - stacks_private_key, - slot_versions: HashMap::new(), - signer_slot_id, - reward_cycle, next_transaction_session, + stackerdb, } } - /// Sends messages to the .signers stacker-db with an exponential backoff retry + /// Send a message to the stackerdb with retry pub fn send_message_with_retry( &mut self, message: SignerMessage, ) -> Result { - let msg_id = message.msg_id(); - let message_bytes = message.serialize_to_vec(); - self.send_message_bytes_with_retry(&msg_id, message_bytes) + self.stackerdb.send_message_with_retry(message) } /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an @@ -107,105 +93,8 @@ impl StackerDB { msg_id: &MessageSlotID, message_bytes: Vec, ) -> Result { - let slot_id = self.signer_slot_id; - loop { - let mut slot_version = if let Some(versions) = self.slot_versions.get_mut(msg_id) { - if let Some(version) = versions.get(&slot_id) { - *version - } else { - versions.insert(slot_id, 0); - 1 - } - } else { - let mut versions = HashMap::new(); - versions.insert(slot_id, 0); - self.slot_versions.insert(*msg_id, versions); - 1 - }; - - let mut chunk = StackerDBChunkData::new(slot_id.0, slot_version, message_bytes.clone()); - chunk.sign(&self.stacks_private_key)?; - - let Some(session) = self.signers_message_stackerdb_sessions.get_mut(msg_id) else { - panic!("FATAL: would loop forever trying to send a message with ID {}, for which we don't have a session", msg_id); - }; - - debug!( - "Sending a chunk to stackerdb slot ID {slot_id} with version {slot_version} and message ID {msg_id} to contract {:?}!\n{chunk:?}", - &session.stackerdb_contract_id - ); - - let send_request = || session.put_chunk(&chunk).map_err(backoff::Error::transient); - let chunk_ack: StackerDBChunkAckData = retry_with_exponential_backoff(send_request)?; - - if let Some(versions) = self.slot_versions.get_mut(msg_id) { - // NOTE: per the above, this is always executed - versions.insert(slot_id, slot_version.saturating_add(1)); - } else { - return Err(ClientError::NotConnected); - } - - if chunk_ack.accepted { - debug!("Chunk accepted by stackerdb: {chunk_ack:?}"); - return Ok(chunk_ack); - } else { - warn!("Chunk rejected by stackerdb: {chunk_ack:?}"); - } - if let Some(code) = chunk_ack.code { - match StackerDBErrorCodes::from_code(code) { - Some(StackerDBErrorCodes::DataAlreadyExists) => { - if let Some(slot_metadata) = chunk_ack.metadata { - warn!("Failed to send message to stackerdb due to wrong version number. Attempted {}. Expected {}. Retrying...", slot_version, slot_metadata.slot_version); - slot_version = slot_metadata.slot_version; - } else { - warn!("Failed to send message to stackerdb due to wrong version number. Attempted {}. Expected unknown version number. Incrementing and retrying...", slot_version); - } - if let Some(versions) = self.slot_versions.get_mut(msg_id) { - // NOTE: per the above, this is always executed - versions.insert(slot_id, slot_version.saturating_add(1)); - } else { - return Err(ClientError::NotConnected); - } - } - _ => { - warn!("Failed to send message to stackerdb: {:?}", chunk_ack); - return Err(ClientError::PutChunkRejected( - chunk_ack - .reason - .unwrap_or_else(|| "No reason given".to_string()), - )); - } - } - } - } - } - - /// Get all signer messages from stackerdb for the given slot IDs - fn get_messages( - session: &mut StackerDBSession, - slot_ids: &[u32], - ) -> Result, ClientError> { - let mut messages = vec![]; - let send_request = || { - session - .get_latest_chunks(slot_ids) - .map_err(backoff::Error::transient) - }; - let chunk_ack = retry_with_exponential_backoff(send_request)?; - for (i, chunk) in chunk_ack.iter().enumerate() { - let Some(data) = chunk else { - continue; - }; - let Ok(message) = read_next::(&mut &data[..]) else { - if !data.is_empty() { - warn!("Failed to deserialize chunk data into a SignerMessage"); - debug!("slot #{i}: Failed chunk ({}): {data:?}", &data.len(),); - } - continue; - }; - messages.push(message); - } - Ok(messages) + self.stackerdb + .send_message_bytes_with_retry(msg_id, message_bytes) } /// Get the ordered DKG packets from stackerdb for the signer slot IDs. @@ -225,10 +114,11 @@ impl StackerDB { let mut packets = vec![]; for packet_slot in packet_slots { let session = self + .stackerdb .signers_message_stackerdb_sessions .get_mut(packet_slot) .ok_or(ClientError::NotConnected)?; - let messages = Self::get_messages(session, &slot_ids)?; + let messages = StackerDB::get_messages(session, &slot_ids)?; for message in messages { let SignerMessage::Packet(packet) = message else { warn!("Found an unexpected type in a packet slot {packet_slot}"); @@ -246,7 +136,7 @@ impl StackerDB { signer_ids: &[SignerSlotID], ) -> Result, ClientError> { let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); - let messages = Self::get_messages(transactions_session, &slot_ids)?; + let messages = StackerDB::get_messages(transactions_session, &slot_ids)?; let mut transactions = vec![]; for message in messages { let SignerMessage::Transactions(chunk_transactions) = message else { @@ -261,12 +151,13 @@ impl StackerDB { /// Get this signer's latest transactions from stackerdb pub fn get_current_transactions(&mut self) -> Result, ClientError> { let Some(transactions_session) = self + .stackerdb .signers_message_stackerdb_sessions .get_mut(&MessageSlotID::Transactions) else { return Err(ClientError::NotConnected); }; - Self::get_transactions(transactions_session, &[self.signer_slot_id]) + Self::get_transactions(transactions_session, &[self.stackerdb.signer_slot_id]) } /// Get the latest signer transactions from signer ids for the next reward cycle @@ -285,6 +176,7 @@ impl StackerDB { ) -> Result>, ClientError> { debug!("Getting the persisted encrypted state for signer {signer_id}"); let Some(state_session) = self + .stackerdb .signers_message_stackerdb_sessions .get_mut(&MessageSlotID::EncryptedSignerState) else { @@ -325,12 +217,12 @@ impl StackerDB { /// Retrieve the signer set this stackerdb client is attached to pub fn get_signer_set(&self) -> u32 { - u32::try_from(self.reward_cycle % 2).expect("FATAL: reward cycle % 2 exceeds u32::MAX") + self.stackerdb.get_signer_set() } /// Retrieve the signer slot ID pub fn get_signer_slot_id(&mut self) -> SignerSlotID { - self.signer_slot_id + self.stackerdb.signer_slot_id } } @@ -344,6 +236,9 @@ mod tests { TransactionSmartContract, TransactionVersion, }; use blockstack_lib::util_lib::strings::StacksString; + use clarity::codec::StacksMessageCodec; + use clarity::types::chainstate::StacksPrivateKey; + use libstackerdb::StackerDBChunkAckData; use super::*; use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; @@ -353,7 +248,7 @@ mod tests { fn get_signer_transactions_should_succeed() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); let signer_config = generate_signer_config(&config, 5, 20); - let mut stackerdb = StackerDB::from(&signer_config); + let mut manager = StackerDBManager::from(&signer_config); let sk = StacksPrivateKey::new(); let tx = StacksTransaction { version: TransactionVersion::Testnet, @@ -375,7 +270,7 @@ mod tests { let message = signer_message.serialize_to_vec(); let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; - let h = spawn(move || stackerdb.get_next_transactions(&signer_slot_ids)); + let h = spawn(move || manager.get_next_transactions(&signer_slot_ids)); let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); response_bytes.extend(message); let mock_server = mock_server_from_config(&config); @@ -396,7 +291,7 @@ mod tests { fn send_signer_message_should_succeed() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-1.toml").unwrap(); let signer_config = generate_signer_config(&config, 5, 20); - let mut stackerdb = StackerDB::from(&signer_config); + let mut stackerdb = StackerDBManager::from(&signer_config); let sk = StacksPrivateKey::new(); let tx = StacksTransaction { diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer.rs index 13ab2af301..7fef735de0 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer.rs @@ -41,7 +41,7 @@ use stacks_signer::client::{SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{RunLoopCommand, SignerCommand}; use stacks_signer::v1::coordinator::CoordinatorSelector; -use stacks_signer::v1::stackerdb::StackerDB; +use stacks_signer::v1::stackerdb_manager::StackerDBManager; use stacks_signer::v1::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -1143,7 +1143,7 @@ fn stackerdb_delayed_dkg() { let mut stackerdbs: Vec<_> = signer_slot_ids .iter() .map(|i| { - StackerDB::new( + StackerDBManager::new( &signer_test.running_nodes.conf.node.rpc_bind, StacksPrivateKey::new(), // Doesn't matter what key we use. We are just reading, not writing false, @@ -1469,7 +1469,7 @@ fn stackerdb_filter_bad_transactions() { let next_reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); // Must submit to the NEXT reward cycle slots as they are the ones looked at by the CURRENT miners let signer_index = signer_test.get_signer_index(next_reward_cycle); - let mut stackerdb = StackerDB::new( + let mut stackerdb = StackerDBManager::new( &signer_test.running_nodes.conf.node.rpc_bind, signer_private_key, false, From f8ef795b81f111d441a2411284d9c15e377e69fb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 15 May 2024 12:00:54 -0700 Subject: [PATCH 0010/1400] Move SignerSlotID to stackerdb.rs mod Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 10 ---------- stacks-signer/src/client/stackerdb.rs | 12 +++++++++++- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 71eae4e6eb..3075015715 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -35,16 +35,6 @@ const BACKOFF_INITIAL_INTERVAL: u64 = 128; /// Backoff timer max interval in milliseconds const BACKOFF_MAX_INTERVAL: u64 = 16384; -/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID -#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] -pub struct SignerSlotID(pub u32); - -impl std::fmt::Display for SignerSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - #[derive(thiserror::Error, Debug)] /// Client error type pub enum ClientError { diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 34fc274738..ec1147ac90 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -23,9 +23,19 @@ use slog::{slog_debug, slog_warn}; use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::{debug, warn}; -use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID}; +use crate::client::{retry_with_exponential_backoff, ClientError}; use crate::config::SignerConfig; +/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID +#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] +pub struct SignerSlotID(pub u32); + +impl std::fmt::Display for SignerSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + /// The StackerDB client for communicating with the .signers contract #[derive(Debug)] pub struct StackerDB { From 07fd475bf29d01a398fa4924b9b16954d6b136fd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 15 May 2024 12:07:17 -0700 Subject: [PATCH 0011/1400] Cleanup stackerdb to not expose internals Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 2 +- stacks-signer/src/client/stackerdb.rs | 17 +++++++++++------ stacks-signer/src/v1/stackerdb_manager.rs | 19 ++++++++----------- 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 3075015715..3ce771309e 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -/// The stackerdb module for sending messages between signers and miners +/// The stacker db module for communicating with the stackerdb contract pub(crate) mod stackerdb; /// The stacks node client module for communicating with the stacks node pub(crate) mod stacks_client; diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index ec1147ac90..9dd3ea5be7 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -41,15 +41,15 @@ impl std::fmt::Display for SignerSlotID { pub struct StackerDB { /// The stacker-db sessions for each signer set and message type. /// Maps message ID to the DB session. - pub signers_message_stackerdb_sessions: HashMap, + signers_message_stackerdb_sessions: HashMap, /// The private key used in all stacks node communications - pub stacks_private_key: StacksPrivateKey, + stacks_private_key: StacksPrivateKey, /// A map of a message ID to last chunk version for each session - pub slot_versions: HashMap>, + slot_versions: HashMap>, /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. - pub signer_slot_id: SignerSlotID, + signer_slot_id: SignerSlotID, /// The reward cycle of the connecting signer - pub reward_cycle: u64, + reward_cycle: u64, } impl From<&SignerConfig> for StackerDB { @@ -213,9 +213,14 @@ impl StackerDB { } /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&mut self) -> SignerSlotID { + pub fn get_signer_slot_id(&self) -> SignerSlotID { self.signer_slot_id } + + /// Get the session corresponding to the given message ID if it exists + pub fn get_session_mut(&mut self, msg_id: &M) -> Option<&mut StackerDBSession> { + self.signers_message_stackerdb_sessions.get_mut(msg_id) + } } #[cfg(test)] diff --git a/stacks-signer/src/v1/stackerdb_manager.rs b/stacks-signer/src/v1/stackerdb_manager.rs index 733d0bd936..cf5e484022 100644 --- a/stacks-signer/src/v1/stackerdb_manager.rs +++ b/stacks-signer/src/v1/stackerdb_manager.rs @@ -115,8 +115,7 @@ impl StackerDBManager { for packet_slot in packet_slots { let session = self .stackerdb - .signers_message_stackerdb_sessions - .get_mut(packet_slot) + .get_session_mut(packet_slot) .ok_or(ClientError::NotConnected)?; let messages = StackerDB::get_messages(session, &slot_ids)?; for message in messages { @@ -150,14 +149,13 @@ impl StackerDBManager { /// Get this signer's latest transactions from stackerdb pub fn get_current_transactions(&mut self) -> Result, ClientError> { - let Some(transactions_session) = self - .stackerdb - .signers_message_stackerdb_sessions - .get_mut(&MessageSlotID::Transactions) + let signer_slot_id = self.get_signer_slot_id(); + let Some(transactions_session) = + self.stackerdb.get_session_mut(&MessageSlotID::Transactions) else { return Err(ClientError::NotConnected); }; - Self::get_transactions(transactions_session, &[self.stackerdb.signer_slot_id]) + Self::get_transactions(transactions_session, &[signer_slot_id]) } /// Get the latest signer transactions from signer ids for the next reward cycle @@ -177,8 +175,7 @@ impl StackerDBManager { debug!("Getting the persisted encrypted state for signer {signer_id}"); let Some(state_session) = self .stackerdb - .signers_message_stackerdb_sessions - .get_mut(&MessageSlotID::EncryptedSignerState) + .get_session_mut(&MessageSlotID::EncryptedSignerState) else { return Err(ClientError::NotConnected); }; @@ -221,8 +218,8 @@ impl StackerDBManager { } /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&mut self) -> SignerSlotID { - self.stackerdb.signer_slot_id + pub fn get_signer_slot_id(&self) -> SignerSlotID { + self.stackerdb.get_signer_slot_id() } } From 55addaf28f253388f5168c965a86fda5cccb4e35 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 15 May 2024 15:20:44 -0700 Subject: [PATCH 0012/1400] Make SpawnedSigner generic so we can have either a v1 or v2 signer test easily spun up with duplicate code minimized Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 14 +- stacks-signer/src/lib.rs | 73 ++- stacks-signer/src/main.rs | 4 +- stacks-signer/src/v0/mod.rs | 7 + stacks-signer/src/v1/mod.rs | 74 +-- testnet/stacks-node/src/tests/signer/mod.rs | 519 ++++++++++++++++++ testnet/stacks-node/src/tests/signer/v0.rs | 14 + .../src/tests/{signer.rs => signer/v1.rs} | 514 ++--------------- 8 files changed, 655 insertions(+), 564 deletions(-) create mode 100644 testnet/stacks-node/src/tests/signer/mod.rs create mode 100644 testnet/stacks-node/src/tests/signer/v0.rs rename testnet/stacks-node/src/tests/{signer.rs => signer/v1.rs} (69%) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index ac763b02ab..e117d61bdf 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -82,15 +82,15 @@ jobs: - tests::nakamoto_integrations::vote_for_aggregate_key_burn_op - tests::nakamoto_integrations::follower_bootup - tests::nakamoto_integrations::forked_tenure_is_ignored - - tests::signer::stackerdb_dkg - - tests::signer::stackerdb_sign_request_rejected - - tests::signer::stackerdb_block_proposal - - tests::signer::stackerdb_filter_bad_transactions + - tests::signer::v1::dkg + - tests::signer::v1::sign_request_rejected + - tests::signer::v1::block_proposal + - tests::signer::v1::filter_bad_transactions + - tests::signer::v1::delayed_dkg # TODO: enable these once v1 signer is fixed - # - tests::signer::stackerdb_mine_2_nakamoto_reward_cycles - # - tests::signer::stackerdb_sign_after_signer_reboot + # - tests::signer::v1::mine_2_nakamoto_reward_cycles + # - tests::signer::v1::sign_after_signer_reboot - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - - tests::signer::stackerdb_delayed_dkg # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 5ce8793296..af0e8781a0 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -37,16 +37,19 @@ pub mod v0; /// The v1 implementation of the singer. This includes WSTS support pub mod v1; use std::fmt::{Debug, Display}; -use std::sync::mpsc::Sender; +use std::sync::mpsc::{channel, Receiver, Sender}; -use libsigner::{SignerEvent, SignerEventTrait}; +use config::GlobalConfig; +use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait}; +use slog::slog_info; +use stacks_common::info; use wsts::state_machine::OperationResult; use crate::client::StacksClient; use crate::config::SignerConfig; -use crate::runloop::RunLoopCommand; +use crate::runloop::{RunLoop, RunLoopCommand}; -/// A trait which provides a common `Signer` interface for `v1` and `v2` +/// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { /// Create a new `Signer` instance fn new(config: SignerConfig) -> Self; @@ -70,3 +73,65 @@ pub trait Signer: Debug + Display { command: Option, ); } + +/// A wrapper around the running signer type for the signer +pub type RunningSigner = + libsigner::RunningSigner, Vec, T>; + +/// The wrapper for the runloop signer type +type RunLoopSigner = libsigner::Signer< + RunLoopCommand, + Vec, + RunLoop, + SignerEventReceiver, + T, +>; + +/// The spawned signer +pub struct SpawnedSigner + Send, T: SignerEventTrait> { + /// The underlying running signer thread handle + running_signer: RunningSigner, + /// The command sender for interacting with the running signer + pub cmd_send: Sender, + /// The result receiver for interacting with the running signer + pub res_recv: Receiver>, + /// Phantom data for the signer type + _phantom: std::marker::PhantomData, +} + +impl + Send, T: SignerEventTrait> SpawnedSigner { + /// Stop the signer thread and return the final state + pub fn stop(self) -> Option> { + self.running_signer.stop() + } + + /// Wait for the signer to terminate, and get the final state. WARNING: This will hang forever if the event receiver stop signal was never sent/no error occurred. + pub fn join(self) -> Option> { + self.running_signer.join() + } +} + +impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner { + /// Create a new spawned signer + pub fn new(config: GlobalConfig) -> Self { + let endpoint = config.endpoint; + info!("Starting signer with config: {:?}", config); + let (cmd_send, cmd_recv) = channel(); + let (res_send, res_recv) = channel(); + let ev = SignerEventReceiver::new(config.network.is_mainnet()); + #[cfg(feature = "monitoring_prom")] + { + crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); + } + let runloop = RunLoop::new(config); + let mut signer: RunLoopSigner = + libsigner::Signer::new(runloop, ev, cmd_recv, res_send); + let running_signer = signer.spawn(endpoint).unwrap(); + SpawnedSigner { + running_signer, + cmd_send, + res_recv, + _phantom: std::marker::PhantomData, + } + } +} diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 75514fd2eb..b5a481a324 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -42,7 +42,7 @@ use stacks_signer::cli::{ RunSignerArgs, StackerDBArgs, }; use stacks_signer::config::GlobalConfig; -use stacks_signer::v1; +use stacks_signer::v1::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -103,7 +103,7 @@ fn handle_put_chunk(args: PutChunkArgs) { fn handle_run(args: RunSignerArgs) { debug!("Running signer..."); let config = GlobalConfig::try_from(&args.config).unwrap(); - let spawned_signer = v1::SpawnedSigner::from(config); + let spawned_signer = SpawnedSigner::new(config); println!("Signer spawned successfully. Waiting for messages to process..."); // Wait for the spawned signer to stop (will only occur if an error occurs) let _ = spawned_signer.join(); diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 95dfb76e4b..520fb36ca1 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -16,3 +16,10 @@ /// The signer module for processing events pub mod signer; + +use libsigner::v0::messages::SignerMessage; + +use crate::v0::signer::Signer; + +/// A v0 spawned signer +pub type SpawnedSigner = crate::SpawnedSigner; diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs index eee9efeb14..ed1d980016 100644 --- a/stacks-signer/src/v1/mod.rs +++ b/stacks-signer/src/v1/mod.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use libsigner::v1::messages::SignerMessage; + +use crate::v1::signer::Signer; + /// The coordinator selector for the signer pub mod coordinator; /// The signer module for processing events @@ -21,71 +25,5 @@ pub mod signer; /// The stackerdb module for sending messages between signers and miners pub mod stackerdb_manager; -use std::sync::mpsc::{channel, Receiver, Sender}; - -use libsigner::v1::messages::SignerMessage; -use libsigner::SignerEventReceiver; -use slog::slog_info; -use stacks_common::info; -use wsts::state_machine::OperationResult; - -use crate::config::GlobalConfig; -use crate::runloop::{RunLoop, RunLoopCommand}; -use crate::v1::signer::Signer; - -/// The signer type for the v1 signer -pub type RunningSigner = libsigner::RunningSigner< - SignerEventReceiver, - Vec, - SignerMessage, ->; - -/// The spawned signer type for the v1 signer -pub struct SpawnedSigner { - /// The underlying running signer thread handle - running_signer: RunningSigner, - /// The command sender for interacting with the running signer - pub cmd_send: Sender, - /// The result receiver for interacting with the running signer - pub res_recv: Receiver>, -} - -impl From for SpawnedSigner { - fn from(config: GlobalConfig) -> Self { - let endpoint = config.endpoint; - info!("Starting signer with config: {}", config); - let (cmd_send, cmd_recv) = channel(); - let (res_send, res_recv) = channel(); - let ev = SignerEventReceiver::new(config.network.is_mainnet()); - #[cfg(feature = "monitoring_prom")] - { - crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); - } - let runloop = RunLoop::new(config); - let mut signer: libsigner::Signer< - RunLoopCommand, - Vec, - RunLoop, - SignerEventReceiver, - SignerMessage, - > = libsigner::Signer::new(runloop, ev, cmd_recv, res_send); - let running_signer = signer.spawn(endpoint).unwrap(); - SpawnedSigner { - running_signer, - cmd_send, - res_recv, - } - } -} - -impl SpawnedSigner { - /// Stop the signer thread and return the final state - pub fn stop(self) -> Option> { - self.running_signer.stop() - } - - /// Wait for the signer to terminate, and get the final state. WARNING: This will hang forever if the event receiver stop signal was never sent/no error occurred. - pub fn join(self) -> Option> { - self.running_signer.join() - } -} +/// A v1 spawned signer +pub type SpawnedSigner = crate::SpawnedSigner; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs new file mode 100644 index 0000000000..61bc54f1bc --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -0,0 +1,519 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +mod v0; +mod v1; + +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; +use std::thread; +use std::time::{Duration, Instant}; + +use clarity::boot_util::boot_code_id; +use libsigner::{SignerEntries, SignerEventTrait}; +use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; +use stacks::chainstate::stacks::boot::SIGNERS_NAME; +use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; +use stacks::core::StacksEpoch; +use stacks::net::api::postblock_proposal::BlockValidateResponse; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::consts::SIGNER_SLOTS_PER_USER; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; +use stacks_signer::client::{SignerSlotID, StacksClient}; +use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; +use stacks_signer::{Signer, SpawnedSigner}; +use wsts::curve::point::Point; +use wsts::state_machine::PublicKeys; + +use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; +use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::neon::Counters; +use crate::run_loop::boot_nakamoto; +use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::nakamoto_integrations::{ + naka_neon_integration_conf, next_block_and_mine_commit, POX_4_DEFAULT_STACKER_BALANCE, +}; +use crate::tests::neon_integrations::{ + next_block_and_wait, run_until_burnchain_height, test_observer, wait_for_runloop, +}; +use crate::tests::to_addr; +use crate::{BitcoinRegtestController, BurnchainController}; + +// Helper struct for holding the btc and stx neon nodes +#[allow(dead_code)] +struct RunningNodes { + pub btc_regtest_controller: BitcoinRegtestController, + pub btcd_controller: BitcoinCoreController, + pub run_loop_thread: thread::JoinHandle<()>, + pub run_loop_stopper: Arc, + pub vrfs_submitted: Arc, + pub commits_submitted: Arc, + pub blocks_processed: Arc, + pub coord_channel: Arc>, + pub conf: NeonConfig, +} + +/// A test harness for running a v0 or v1 signer integration test +pub struct SignerTest { + // The stx and bitcoin nodes and their run loops + running_nodes: RunningNodes, + // The spawned signers and their threads + pub spawned_signers: Vec, + // the private keys of the signers + pub signer_stacks_private_keys: Vec, + // link to the stacks node + pub stacks_client: StacksClient, + // Unique number used to isolate files created during the test + pub run_stamp: u16, +} + +impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { + fn new(num_signers: usize) -> Self { + // Generate Signer Data + let signer_stacks_private_keys = (0..num_signers) + .map(|_| StacksPrivateKey::new()) + .collect::>(); + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + // So the combination is... one, two, three, four, five? That's the stupidest combination I've ever heard in my life! + // That's the kind of thing an idiot would have on his luggage! + let password = "12345"; + naka_conf.connection_options.block_proposal_token = Some(password.to_string()); + + let run_stamp = rand::random(); + + // Setup the signer and coordinator configurations + let signer_configs = build_signer_config_tomls( + &signer_stacks_private_keys, + &naka_conf.node.rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + password, + run_stamp, + 3000, + Some(100_000), + None, + Some(9000), + ); + + let spawned_signers: Vec<_> = (0..num_signers) + .into_iter() + .map(|i| { + info!("spawning signer"); + let signer_config = + SignerConfig::load_from_str(&signer_configs[i as usize]).unwrap(); + SpawnedSigner::new(signer_config) + }) + .collect(); + + // Setup the nodes and deploy the contract to it + let node = setup_stx_btc_node(naka_conf, &signer_stacks_private_keys, &signer_configs); + let config = SignerConfig::load_from_str(&signer_configs[0]).unwrap(); + let stacks_client = StacksClient::from(&config); + + Self { + running_nodes: node, + spawned_signers, + signer_stacks_private_keys, + stacks_client, + run_stamp, + } + } + + fn nmb_blocks_to_reward_set_calculation(&mut self) -> u64 { + let prepare_phase_len = self + .running_nodes + .conf + .get_burnchain() + .pox_constants + .prepare_length as u64; + let current_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height() + .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 + let curr_reward_cycle = self.get_current_reward_cycle(); + let next_reward_cycle = curr_reward_cycle.saturating_add(1); + let next_reward_cycle_height = self + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle); + let next_reward_cycle_reward_set_calculation = next_reward_cycle_height + .saturating_sub(prepare_phase_len) + .saturating_add(1); // +1 as the reward calculation occurs in the SECOND block of the prepare phase/ + + next_reward_cycle_reward_set_calculation.saturating_sub(current_block_height) + } + + fn nmb_blocks_to_reward_cycle_boundary(&mut self, reward_cycle: u64) -> u64 { + let current_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height() + .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 + let reward_cycle_height = self + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(reward_cycle); + reward_cycle_height.saturating_sub(current_block_height) + } + + fn mine_and_verify_confirmed_naka_block( + &mut self, + agg_key: &Point, + timeout: Duration, + ) -> MinedNakamotoBlockEvent { + let new_block = self.mine_nakamoto_block(timeout); + let signer_sighash = new_block.signer_signature_hash.clone(); + let signature = self.wait_for_confirmed_block(&signer_sighash, timeout); + assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); + new_block + } + + fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { + let commits_submitted = self.running_nodes.commits_submitted.clone(); + let mined_block_time = Instant::now(); + next_block_and_mine_commit( + &mut self.running_nodes.btc_regtest_controller, + timeout.as_secs(), + &self.running_nodes.coord_channel, + &commits_submitted, + ) + .unwrap(); + + let t_start = Instant::now(); + while test_observer::get_mined_nakamoto_blocks().is_empty() { + assert!( + t_start.elapsed() < timeout, + "Timed out while waiting for mined nakamoto block event" + ); + thread::sleep(Duration::from_secs(1)); + } + let mined_block_elapsed_time = mined_block_time.elapsed(); + info!( + "Nakamoto block mine time elapsed: {:?}", + mined_block_elapsed_time + ); + test_observer::get_mined_nakamoto_blocks().pop().unwrap() + } + + fn wait_for_confirmed_block( + &mut self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + ) -> ThresholdSignature { + let t_start = Instant::now(); + while t_start.elapsed() <= timeout { + let blocks = test_observer::get_blocks(); + if let Some(signature) = blocks.iter().find_map(|block_json| { + let block_obj = block_json.as_object().unwrap(); + let sighash = block_obj + // use the try operator because non-nakamoto blocks + // do not supply this field + .get("signer_signature_hash")? + .as_str() + .unwrap(); + if sighash != &format!("0x{block_signer_sighash}") { + return None; + } + let signer_signature_hex = + block_obj.get("signer_signature").unwrap().as_str().unwrap(); + let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); + let signer_signature = ThresholdSignature::consensus_deserialize( + &mut signer_signature_bytes.as_slice(), + ) + .unwrap(); + Some(signer_signature) + }) { + return signature; + } + thread::sleep(Duration::from_millis(500)); + } + panic!("Timed out while waiting for confirmation of block with signer sighash = {block_signer_sighash}") + } + + fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { + // Wait for the block to show up in the test observer (Don't have to wait long as if we have received a mined block already, + // we know that the signers have already received their block proposal events via their event observers) + let t_start = Instant::now(); + while test_observer::get_proposal_responses().is_empty() { + assert!( + t_start.elapsed() < timeout, + "Timed out while waiting for block proposal event" + ); + thread::sleep(Duration::from_secs(1)); + } + let validate_response = test_observer::get_proposal_responses() + .pop() + .expect("No block proposal"); + match validate_response { + BlockValidateResponse::Ok(block_validated) => block_validated.signer_signature_hash, + _ => panic!("Unexpected response"), + } + } + + fn run_until_epoch_3_boundary(&mut self) { + let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = + &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + + let epoch_30_boundary = epoch_3.start_height - 1; + // advance to epoch 3.0 and trigger a sign round (cannot vote on blocks in pre epoch 3.0) + run_until_burnchain_height( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + epoch_30_boundary, + &self.running_nodes.conf, + ); + info!("Advanced to Nakamoto epoch 3.0 boundary {epoch_30_boundary}! Ready to Sign Blocks!"); + } + + fn get_current_reward_cycle(&self) -> u64 { + let block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + self.running_nodes + .btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap() + } + + fn get_signer_index(&self, reward_cycle: u64) -> SignerSlotID { + let valid_signer_set = + u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); + + self.stacks_client + .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) + .expect("FATAL: failed to get signer slots from stackerdb") + .iter() + .position(|(address, _)| address == self.stacks_client.get_signer_address()) + .map(|pos| { + SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) + }) + .expect("FATAL: signer not registered") + } + + fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { + let entries = self + .stacks_client + .get_reward_set_signers(reward_cycle) + .unwrap() + .unwrap(); + let entries = SignerEntries::parse(false, &entries).unwrap(); + entries.public_keys + } + + #[allow(dead_code)] + fn get_signer_metrics(&self) -> String { + #[cfg(feature = "monitoring_prom")] + { + let client = reqwest::blocking::Client::new(); + let res = client + .get("http://localhost:9000/metrics") + .send() + .unwrap() + .text() + .unwrap(); + + return res; + } + #[cfg(not(feature = "monitoring_prom"))] + return String::new(); + } + + /// Kills the signer runloop at index `signer_idx` + /// and returns the private key of the killed signer. + /// + /// # Panics + /// Panics if `signer_idx` is out of bounds + pub fn stop_signer(&mut self, signer_idx: usize) -> StacksPrivateKey { + let spawned_signer = self.spawned_signers.remove(signer_idx); + let signer_key = self.signer_stacks_private_keys.remove(signer_idx); + + spawned_signer.stop(); + signer_key + } + + /// (Re)starts a new signer runloop with the given private key + pub fn restart_signer(&mut self, signer_idx: usize, signer_private_key: StacksPrivateKey) { + let signer_config = build_signer_config_tomls( + &[signer_private_key], + &self.running_nodes.conf.node.rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "12345", // It worked sir, we have the combination! -Great, what's the combination? + self.run_stamp, + 3000 + signer_idx, + Some(100_000), + None, + Some(9000 + signer_idx), + ) + .pop() + .unwrap(); + + info!("Restarting signer"); + let config = SignerConfig::load_from_str(&signer_config).unwrap(); + let signer = SpawnedSigner::new(config); + self.spawned_signers.insert(signer_idx, signer); + } + + pub fn shutdown(self) { + self.running_nodes + .coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + + self.running_nodes + .run_loop_stopper + .store(false, Ordering::SeqCst); + // Stop the signers before the node to prevent hanging + for signer in self.spawned_signers { + assert!(signer.stop().is_none()); + } + self.running_nodes.run_loop_thread.join().unwrap(); + } +} + +fn setup_stx_btc_node( + mut naka_conf: NeonConfig, + signer_stacks_private_keys: &[StacksPrivateKey], + signer_config_tomls: &[String], +) -> RunningNodes { + // Spawn the endpoints for observing signers + for toml in signer_config_tomls { + let signer_config = SignerConfig::load_from_str(toml).unwrap(); + + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("{}", signer_config.endpoint), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::BurnchainBlocks, + ], + }); + } + + // Spawn a test observer for verification purposes + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::MinedBlocks, + ], + }); + + // The signers need some initial balances in order to pay for epoch 2.5 transaction votes + let mut initial_balances = Vec::new(); + + // TODO: separate keys for stacking and signing (because they'll be different in prod) + for key in signer_stacks_private_keys { + initial_balances.push(InitialBalance { + address: to_addr(key).into(), + amount: POX_4_DEFAULT_STACKER_BALANCE, + }); + } + naka_conf.initial_balances.append(&mut initial_balances); + naka_conf.node.stacker = true; + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + + for signer_set in 0..2 { + for message_id in 0..SIGNER_SLOTS_PER_USER { + let contract_id = + NakamotoSigners::make_signers_db_contract_id(signer_set, message_id, false); + if !naka_conf.node.stacker_dbs.contains(&contract_id) { + debug!("A miner/stacker must subscribe to the {contract_id} stacker db contract. Forcibly subscribing..."); + naka_conf.node.stacker_dbs.push(contract_id); + } + } + } + info!("Make new BitcoinCoreController"); + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + info!("Make new BitcoinRegtestController"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + + info!("Bootstraping..."); + btc_regtest_controller.bootstrap_chain(201); + + info!("Chain bootstrapped..."); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + + // Give the run loop some time to start up! + info!("Wait for runloop..."); + wait_for_runloop(&blocks_processed); + + // First block wakes up the run loop. + info!("Mine first block..."); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Second block will hold our VRF registration. + info!("Mine second block..."); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Third block will be the first mined Stacks block. + info!("Mine third block..."); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + RunningNodes { + btcd_controller, + btc_regtest_controller, + run_loop_thread, + run_loop_stopper, + vrfs_submitted: vrfs_submitted.0, + commits_submitted: commits_submitted.0, + blocks_processed: blocks_processed.0, + coord_channel, + conf: naka_conf, + } +} diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs new file mode 100644 index 0000000000..36abf9bab1 --- /dev/null +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -0,0 +1,14 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer/v1.rs similarity index 69% rename from testnet/stacks-node/src/tests/signer.rs rename to testnet/stacks-node/src/tests/signer/v1.rs index 7fef735de0..5966a188fc 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -1,23 +1,32 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . use std::collections::HashSet; use std::net::ToSocketAddrs; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use std::sync::{Arc, Mutex}; +use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; use clarity::boot_util::boot_code_id; use clarity::vm::Value; use libsigner::v1::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; -use libsigner::{BlockProposal, SignerEntries}; +use libsigner::BlockProposal; use rand::thread_rng; use rand_core::RngCore; use stacks::burnchains::Txid; -use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; -use stacks::chainstate::stacks::boot::{ - SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, -}; +use stacks::chainstate::stacks::boot::{SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME}; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ @@ -25,20 +34,16 @@ use stacks::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; -use stacks::core::StacksEpoch; -use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::util_lib::strings::StacksString; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; +use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPublicKey, TrieHash, }; -use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use stacks_signer::client::{SignerSlotID, StacksClient}; -use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{RunLoopCommand, SignerCommand}; use stacks_signer::v1::coordinator::CoordinatorSelector; use stacks_signer::v1::stackerdb_manager::StackerDBManager; @@ -48,104 +53,17 @@ use tracing_subscriber::{fmt, EnvFilter}; use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; use wsts::net::Message; -use wsts::state_machine::{OperationResult, PublicKeys}; +use wsts::state_machine::OperationResult; -use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::neon::Counters; -use crate::run_loop::boot_nakamoto; -use crate::tests::bitcoin_regtest::BitcoinCoreController; +use super::SignerTest; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, - naka_neon_integration_conf, next_block_and, next_block_and_mine_commit, - POX_4_DEFAULT_STACKER_BALANCE, -}; -use crate::tests::neon_integrations::{ - next_block_and_wait, run_until_burnchain_height, test_observer, wait_for_runloop, + boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, next_block_and, }; +use crate::tests::neon_integrations::{next_block_and_wait, test_observer}; use crate::tests::to_addr; -use crate::{BitcoinRegtestController, BurnchainController}; - -// Helper struct for holding the btc and stx neon nodes -#[allow(dead_code)] -struct RunningNodes { - pub btc_regtest_controller: BitcoinRegtestController, - pub btcd_controller: BitcoinCoreController, - pub run_loop_thread: thread::JoinHandle<()>, - pub run_loop_stopper: Arc, - pub vrfs_submitted: Arc, - pub commits_submitted: Arc, - pub blocks_processed: Arc, - pub coord_channel: Arc>, - pub conf: NeonConfig, -} - -struct SignerTest { - // The stx and bitcoin nodes and their run loops - pub running_nodes: RunningNodes, - // The spawned signers and their threads - pub spawned_signers: Vec, - // the private keys of the signers - pub signer_stacks_private_keys: Vec, - // link to the stacks node - pub stacks_client: StacksClient, - // Unique number used to isolate files created during the test - pub run_stamp: u16, -} - -impl SignerTest { - fn new(num_signers: usize) -> Self { - // Generate Signer Data - let signer_stacks_private_keys = (0..num_signers) - .map(|_| StacksPrivateKey::new()) - .collect::>(); - - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - // So the combination is... one, two, three, four, five? That's the stupidest combination I've ever heard in my life! - // That's the kind of thing an idiot would have on his luggage! - let password = "12345"; - naka_conf.connection_options.block_proposal_token = Some(password.to_string()); - - let run_stamp = rand::random(); - - // Setup the signer and coordinator configurations - let signer_configs = build_signer_config_tomls( - &signer_stacks_private_keys, - &naka_conf.node.rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - password, - run_stamp, - 3000, - Some(100_000), - None, - Some(9000), - ); - - let spawned_signers: Vec<_> = (0..num_signers) - .into_iter() - .map(|i| { - info!("spawning signer"); - let signer_config = - SignerConfig::load_from_str(&signer_configs[i as usize]).unwrap(); - SpawnedSigner::from(signer_config) - }) - .collect(); - - // Setup the nodes and deploy the contract to it - let node = setup_stx_btc_node(naka_conf, &signer_stacks_private_keys, &signer_configs); - let config = SignerConfig::load_from_str(&signer_configs[0]).unwrap(); - let stacks_client = StacksClient::from(&config); - - Self { - running_nodes: node, - spawned_signers, - signer_stacks_private_keys, - stacks_client, - run_stamp, - } - } +use crate::BurnchainController; +impl SignerTest { fn boot_to_epoch_3(&mut self, timeout: Duration) -> Point { boot_to_epoch_3_reward_set( &self.running_nodes.conf, @@ -189,46 +107,6 @@ impl SignerTest { set_dkg } - fn nmb_blocks_to_reward_set_calculation(&mut self) -> u64 { - let prepare_phase_len = self - .running_nodes - .conf - .get_burnchain() - .pox_constants - .prepare_length as u64; - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let curr_reward_cycle = self.get_current_reward_cycle(); - let next_reward_cycle = curr_reward_cycle.saturating_add(1); - let next_reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle); - let next_reward_cycle_reward_set_calculation = next_reward_cycle_height - .saturating_sub(prepare_phase_len) - .saturating_add(1); // +1 as the reward calculation occurs in the SECOND block of the prepare phase/ - - next_reward_cycle_reward_set_calculation.saturating_sub(current_block_height) - } - - fn nmb_blocks_to_reward_cycle_boundary(&mut self, reward_cycle: u64) -> u64 { - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(reward_cycle); - reward_cycle_height.saturating_sub(current_block_height) - } - // Only call after already past the epoch 3.0 boundary fn run_to_dkg(&mut self, timeout: Duration) -> Option { let curr_reward_cycle = self.get_current_reward_cycle(); @@ -319,100 +197,6 @@ impl SignerTest { points } - fn mine_and_verify_confirmed_naka_block( - &mut self, - agg_key: &Point, - timeout: Duration, - ) -> MinedNakamotoBlockEvent { - let new_block = self.mine_nakamoto_block(timeout); - let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block(&signer_sighash, timeout); - assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); - new_block - } - - fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { - let commits_submitted = self.running_nodes.commits_submitted.clone(); - let mined_block_time = Instant::now(); - next_block_and_mine_commit( - &mut self.running_nodes.btc_regtest_controller, - timeout.as_secs(), - &self.running_nodes.coord_channel, - &commits_submitted, - ) - .unwrap(); - - let t_start = Instant::now(); - while test_observer::get_mined_nakamoto_blocks().is_empty() { - assert!( - t_start.elapsed() < timeout, - "Timed out while waiting for mined nakamoto block event" - ); - thread::sleep(Duration::from_secs(1)); - } - let mined_block_elapsed_time = mined_block_time.elapsed(); - info!( - "Nakamoto block mine time elapsed: {:?}", - mined_block_elapsed_time - ); - test_observer::get_mined_nakamoto_blocks().pop().unwrap() - } - - fn wait_for_confirmed_block( - &mut self, - block_signer_sighash: &Sha512Trunc256Sum, - timeout: Duration, - ) -> ThresholdSignature { - let t_start = Instant::now(); - while t_start.elapsed() <= timeout { - let blocks = test_observer::get_blocks(); - if let Some(signature) = blocks.iter().find_map(|block_json| { - let block_obj = block_json.as_object().unwrap(); - let sighash = block_obj - // use the try operator because non-nakamoto blocks - // do not supply this field - .get("signer_signature_hash")? - .as_str() - .unwrap(); - if sighash != &format!("0x{block_signer_sighash}") { - return None; - } - let signer_signature_hex = - block_obj.get("signer_signature").unwrap().as_str().unwrap(); - let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); - let signer_signature = ThresholdSignature::consensus_deserialize( - &mut signer_signature_bytes.as_slice(), - ) - .unwrap(); - Some(signer_signature) - }) { - return signature; - } - thread::sleep(Duration::from_millis(500)); - } - panic!("Timed out while waiting for confirmation of block with signer sighash = {block_signer_sighash}") - } - - fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { - // Wait for the block to show up in the test observer (Don't have to wait long as if we have received a mined block already, - // we know that the signers have already received their block proposal events via their event observers) - let t_start = Instant::now(); - while test_observer::get_proposal_responses().is_empty() { - assert!( - t_start.elapsed() < timeout, - "Timed out while waiting for block proposal event" - ); - thread::sleep(Duration::from_secs(1)); - } - let validate_response = test_observer::get_proposal_responses() - .pop() - .expect("No block proposal"); - match validate_response { - BlockValidateResponse::Ok(block_validated) => block_validated.signer_signature_hash, - _ => panic!("Unexpected response"), - } - } - fn wait_for_dkg(&mut self, timeout: Duration) -> Point { debug!("Waiting for DKG..."); let mut key = Point::default(); @@ -445,78 +229,6 @@ impl SignerTest { key } - fn run_until_epoch_3_boundary(&mut self) { - let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = - &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - - let epoch_30_boundary = epoch_3.start_height - 1; - // advance to epoch 3.0 and trigger a sign round (cannot vote on blocks in pre epoch 3.0) - run_until_burnchain_height( - &mut self.running_nodes.btc_regtest_controller, - &self.running_nodes.blocks_processed, - epoch_30_boundary, - &self.running_nodes.conf, - ); - info!("Advanced to Nakamoto epoch 3.0 boundary {epoch_30_boundary}! Ready to Sign Blocks!"); - } - - fn get_current_reward_cycle(&self) -> u64 { - let block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height(); - self.running_nodes - .btc_regtest_controller - .get_burnchain() - .block_height_to_reward_cycle(block_height) - .unwrap() - } - - fn get_signer_index(&self, reward_cycle: u64) -> SignerSlotID { - let valid_signer_set = - u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); - let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); - - self.stacks_client - .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) - .expect("FATAL: failed to get signer slots from stackerdb") - .iter() - .position(|(address, _)| address == self.stacks_client.get_signer_address()) - .map(|pos| { - SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) - }) - .expect("FATAL: signer not registered") - } - - fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { - let entries = self - .stacks_client - .get_reward_set_signers(reward_cycle) - .unwrap() - .unwrap(); - let entries = SignerEntries::parse(false, &entries).unwrap(); - entries.public_keys - } - - #[allow(dead_code)] - fn get_signer_metrics(&self) -> String { - #[cfg(feature = "monitoring_prom")] - { - let client = reqwest::blocking::Client::new(); - let res = client - .get("http://localhost:9000/metrics") - .send() - .unwrap() - .text() - .unwrap(); - - return res; - } - #[cfg(not(feature = "monitoring_prom"))] - return String::new(); - } - fn generate_invalid_transactions(&self) -> Vec { let host = self .running_nodes @@ -719,170 +431,6 @@ impl SignerTest { }) .collect() } - - /// Kills the signer runloop at index `signer_idx` - /// and returns the private key of the killed signer. - /// - /// # Panics - /// Panics if `signer_idx` is out of bounds - fn stop_signer(&mut self, signer_idx: usize) -> StacksPrivateKey { - let spawned_signer = self.spawned_signers.remove(signer_idx); - let signer_key = self.signer_stacks_private_keys.remove(signer_idx); - - spawned_signer.stop(); - signer_key - } - - /// (Re)starts a new signer runloop with the given private key - fn restart_signer(&mut self, signer_idx: usize, signer_private_key: StacksPrivateKey) { - let signer_config = build_signer_config_tomls( - &[signer_private_key], - &self.running_nodes.conf.node.rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - "12345", // It worked sir, we have the combination! -Great, what's the combination? - self.run_stamp, - 3000 + signer_idx, - Some(100_000), - None, - Some(9000 + signer_idx), - ) - .pop() - .unwrap(); - - info!("Restarting signer"); - let config = SignerConfig::load_from_str(&signer_config).unwrap(); - let signer = SpawnedSigner::from(config); - self.spawned_signers.insert(signer_idx, signer); - } - - fn shutdown(self) { - self.running_nodes - .coord_channel - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - - self.running_nodes - .run_loop_stopper - .store(false, Ordering::SeqCst); - // Stop the signers before the node to prevent hanging - for signer in self.spawned_signers { - assert!(signer.stop().is_none()); - } - self.running_nodes.run_loop_thread.join().unwrap(); - } -} - -fn setup_stx_btc_node( - mut naka_conf: NeonConfig, - signer_stacks_private_keys: &[StacksPrivateKey], - signer_config_tomls: &[String], -) -> RunningNodes { - // Spawn the endpoints for observing signers - for toml in signer_config_tomls { - let signer_config = SignerConfig::load_from_str(toml).unwrap(); - - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("{}", signer_config.endpoint), - events_keys: vec![ - EventKeyType::StackerDBChunks, - EventKeyType::BlockProposal, - EventKeyType::BurnchainBlocks, - ], - }); - } - - // Spawn a test observer for verification purposes - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![ - EventKeyType::StackerDBChunks, - EventKeyType::BlockProposal, - EventKeyType::MinedBlocks, - ], - }); - - // The signers need some initial balances in order to pay for epoch 2.5 transaction votes - let mut initial_balances = Vec::new(); - - // TODO: separate keys for stacking and signing (because they'll be different in prod) - for key in signer_stacks_private_keys { - initial_balances.push(InitialBalance { - address: to_addr(key).into(), - amount: POX_4_DEFAULT_STACKER_BALANCE, - }); - } - naka_conf.initial_balances.append(&mut initial_balances); - naka_conf.node.stacker = true; - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); - - for signer_set in 0..2 { - for message_id in 0..SIGNER_SLOTS_PER_USER { - let contract_id = - NakamotoSigners::make_signers_db_contract_id(signer_set, message_id, false); - if !naka_conf.node.stacker_dbs.contains(&contract_id) { - debug!("A miner/stacker must subscribe to the {contract_id} stacker db contract. Forcibly subscribing..."); - naka_conf.node.stacker_dbs.push(contract_id); - } - } - } - info!("Make new BitcoinCoreController"); - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - info!("Make new BitcoinRegtestController"); - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - - info!("Bootstraping..."); - btc_regtest_controller.bootstrap_chain(201); - - info!("Chain bootstrapped..."); - - let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); - let run_loop_stopper = run_loop.get_termination_switch(); - let Counters { - blocks_processed, - naka_submitted_vrfs: vrfs_submitted, - naka_submitted_commits: commits_submitted, - .. - } = run_loop.counters(); - - let coord_channel = run_loop.coordinator_channels(); - let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - - // Give the run loop some time to start up! - info!("Wait for runloop..."); - wait_for_runloop(&blocks_processed); - - // First block wakes up the run loop. - info!("Mine first block..."); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Second block will hold our VRF registration. - info!("Mine second block..."); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Third block will be the first mined Stacks block. - info!("Mine third block..."); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - RunningNodes { - btcd_controller, - btc_regtest_controller, - run_loop_thread, - run_loop_stopper, - vrfs_submitted: vrfs_submitted.0, - commits_submitted: commits_submitted.0, - blocks_processed: blocks_processed.0, - coord_channel, - conf: naka_conf, - } } fn operation_panic_message(result: &OperationResult) -> String { @@ -908,7 +456,7 @@ fn operation_panic_message(result: &OperationResult) -> String { #[test] #[ignore] /// Test the signer can respond to external commands to perform DKG -fn stackerdb_dkg() { +fn dkg() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -959,7 +507,7 @@ fn stackerdb_dkg() { #[test] #[ignore] /// Test the signer rejects requests to sign that do not come from a miner -fn stackerdb_sign_request_rejected() { +fn sign_request_rejected() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1109,7 +657,7 @@ fn stackerdb_sign_request_rejected() { #[ignore] /// Test that a signer can be offline when a DKG round has commenced and /// can rejoin the DKG round after it has restarted -fn stackerdb_delayed_dkg() { +fn delayed_dkg() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1302,7 +850,7 @@ pub fn find_block_response(chunk_events: Vec) -> Option Date: Mon, 13 May 2024 20:46:53 -0400 Subject: [PATCH 0013/1400] feat: access current burn chain state in epoch 3 In epoch 2, a Stacks block can only access the burn block associated with its parent, since the block is buily before its burn block is known. In epoch 3, all Nakamoto blocks can access the current burn block. --- clarity/src/vm/database/clarity_db.rs | 131 +++++--- clarity/src/vm/test_util/mod.rs | 8 + stackslib/src/clarity_vm/database/mod.rs | 19 ++ .../src/tests/nakamoto_integrations.rs | 280 +++++++++++++++++- 4 files changed, 376 insertions(+), 62 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 7a1aa3e3bc..5394842a1c 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -102,6 +102,11 @@ pub trait HeadersDB { } pub trait BurnStateDB { + /// Get the burn chain height at the current tip. + fn get_tip_burn_block_height(&self) -> Option; + /// Get the sortition id for the current tip. + fn get_tip_sortition_id(&self) -> Option; + fn get_v1_unlock_height(&self) -> u32; fn get_v2_unlock_height(&self) -> u32; fn get_v3_unlock_height(&self) -> u32; @@ -187,6 +192,14 @@ impl HeadersDB for &dyn HeadersDB { } impl BurnStateDB for &dyn BurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + (*self).get_tip_burn_block_height() + } + + fn get_tip_sortition_id(&self) -> Option { + (*self).get_tip_sortition_id() + } + fn get_v1_unlock_height(&self) -> u32 { (*self).get_v1_unlock_height() } @@ -339,6 +352,14 @@ impl HeadersDB for NullHeadersDB { #[allow(clippy::panic)] impl BurnStateDB for NullBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + None + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } @@ -964,26 +985,33 @@ impl<'a> ClarityDatabase<'a> { /// `get_current_block_height`). pub fn get_current_burnchain_block_height(&mut self) -> Result { let cur_stacks_height = self.store.get_current_block_height(); - let last_mined_bhh = if cur_stacks_height == 0 { - return Ok(self.burn_state_db.get_burn_start_height()); - } else { - self.get_index_block_header_hash(cur_stacks_height.checked_sub(1).ok_or_else( - || { - InterpreterError::Expect( - "BUG: cannot eval burn-block-height in boot code".into(), - ) - }, - )?)? - }; - self.get_burnchain_block_height(&last_mined_bhh) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "Block header hash '{}' must return for provided stacks block height {}", - &last_mined_bhh, cur_stacks_height - )) - .into() - }) + // In epoch 2, we can only access the burn block associated with the last block + if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + let last_mined_bhh = if cur_stacks_height == 0 { + return Ok(self.burn_state_db.get_burn_start_height()); + } else { + // Safety note: normal subtraction is safe here, because we've already checked + // that cur_stacks_height > 0. + self.get_index_block_header_hash(cur_stacks_height - 1)? + }; + + self.get_burnchain_block_height(&last_mined_bhh) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "Block header hash '{}' must return for provided stacks block height {}", + &last_mined_bhh, cur_stacks_height + )) + .into() + }) + } else { + // In epoch 3+, we can access the current burnchain block + self.burn_state_db + .get_tip_burn_block_height() + .ok_or_else(|| { + InterpreterError::Expect("Failed to get burnchain tip height.".into()).into() + }) + } } pub fn get_block_header_hash(&mut self, block_height: u32) -> Result { @@ -1010,46 +1038,53 @@ impl<'a> ClarityDatabase<'a> { .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } + /// In Epoch 2.x: /// 1. Get the current Stacks tip height (which is in the process of being evaluated) /// 2. Get the parent block's StacksBlockId, which is SHA512-256(consensus_hash, block_hash). /// This is the highest Stacks block in this fork whose consensus hash is known. /// 3. Resolve the parent StacksBlockId to its consensus hash /// 4. Resolve the consensus hash to the associated SortitionId + /// In Epoch 3+: + /// 1. Get the SortitionId of the current Stacks tip fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { - let current_stacks_height = self.get_current_block_height(); + if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + let current_stacks_height = self.get_current_block_height(); - if current_stacks_height < 1 { - // we are in the Stacks genesis block - return Ok(None); - } + if current_stacks_height < 1 { + // we are in the Stacks genesis block + return Ok(None); + } - // this is the StacksBlockId of the last block evaluated in this fork - let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1)?; + // this is the StacksBlockId of the last block evaluated in this fork + let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1)?; - // infallible, since we always store the consensus hash with the StacksBlockId in the - // headers DB - let consensus_hash = self - .headers_db - .get_consensus_hash_for_block(&parent_id_bhh) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "FATAL: no consensus hash found for StacksBlockId {}", - &parent_id_bhh - )) - })?; + // infallible, since we always store the consensus hash with the StacksBlockId in the + // headers DB + let consensus_hash = self + .headers_db + .get_consensus_hash_for_block(&parent_id_bhh) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "FATAL: no consensus hash found for StacksBlockId {}", + &parent_id_bhh + )) + })?; - // infallible, since every sortition has a consensus hash - let sortition_id = self - .burn_state_db - .get_sortition_id_from_consensus_hash(&consensus_hash) - .ok_or_else(|| { - InterpreterError::Expect(format!( - "FATAL: no SortitionID found for consensus hash {}", - &consensus_hash - )) - })?; + // infallible, since every sortition has a consensus hash + let sortition_id = self + .burn_state_db + .get_sortition_id_from_consensus_hash(&consensus_hash) + .ok_or_else(|| { + InterpreterError::Expect(format!( + "FATAL: no SortitionID found for consensus hash {}", + &consensus_hash + )) + })?; - Ok(Some(sortition_id)) + Ok(Some(sortition_id)) + } else { + Ok(self.burn_state_db.get_tip_sortition_id()) + } } /// Fetch the burnchain block header hash for a given burnchain height. diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index b7e58919aa..e566f5013d 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -199,6 +199,14 @@ impl HeadersDB for UnitTestHeaderDB { } impl BurnStateDB for UnitTestBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + None + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index c9c21957f3..410c59ba81 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -19,6 +19,7 @@ use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, get_ancestor_sort_id_tx, SortitionDB, SortitionDBConn, SortitionHandle, SortitionHandleConn, SortitionHandleTx, }; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::PoxStartCycleInfo; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::{ @@ -448,6 +449,14 @@ impl SortitionDBRef for SortitionDBConn<'_> { } impl BurnStateDB for SortitionHandleTx<'_> { + fn get_tip_burn_block_height(&self) -> Option { + self.get_burn_block_height(&self.context.chain_tip) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(self.context.chain_tip.clone()) + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { match SortitionDB::get_block_snapshot(self.tx(), sortition_id) { Ok(Some(x)) => Some(x.block_height as u32), @@ -570,6 +579,16 @@ impl BurnStateDB for SortitionHandleTx<'_> { } impl BurnStateDB for SortitionDBConn<'_> { + fn get_tip_burn_block_height(&self) -> Option { + let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; + tip.block_height.try_into().ok() + } + + fn get_tip_sortition_id(&self) -> Option { + let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; + Some(tip.sortition_id) + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { match SortitionDB::get_block_snapshot(self.conn(), sortition_id) { Ok(Some(x)) => Some(x.block_height as u32), diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 55eb6753bf..d417d00476 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -24,7 +24,7 @@ use std::{env, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use clarity::vm::ClarityVersion; +use clarity::vm::{ClarityVersion, Value}; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v1::messages::SignerMessage; @@ -44,7 +44,9 @@ use stacks::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use stacks::chainstate::stacks::db::StacksChainState; -use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; +use stacks::chainstate::stacks::miner::{ + BlockBuilder, BlockLimitFunction, TransactionEvent, TransactionResult, TransactionSuccessEvent, +}; use stacks::chainstate::stacks::{StacksTransaction, ThresholdSignature, TransactionPayload}; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, @@ -3501,13 +3503,6 @@ fn check_block_heights() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - }); - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller .start_bitcoind() @@ -3604,11 +3599,9 @@ fn check_block_heights() { }) .unwrap(); - let info = get_chain_info_result(&naka_conf).unwrap(); - println!("Chain info: {:?}", info); - let mut last_burn_block_height = info.burn_block_height as u128; - let mut last_stacks_block_height = info.stacks_tip_height as u128; - let mut last_tenure_height = last_stacks_block_height as u128; + let mut last_burn_block_height = 0; + let mut last_stacks_block_height = 0; + let mut last_tenure_height = 0; let heights0_value = call_read_only( &naka_conf, @@ -3895,3 +3888,262 @@ fn check_block_heights() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test is testing the burn state of the Stacks blocks. In Stacks 2.x, +/// the burn block state accessed in a Clarity contract is the burn block of +/// the block's parent, since the block is built before its burn block is +/// mined. In Nakamoto, there is no longer this race condition, so Clarity +/// contracts access the state of the current burn block. +/// We should verify: +/// - `burn-block-height` in epoch 3.x is the burn block of the Stacks block +/// - `get-burn-block-info` is able to access info of the current burn block +/// in epoch 3.x +fn clarity_burn_state() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let tx_fee = 1000; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + deploy_fee + tx_fee * tenure_count + tx_fee * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::MinedBlocks], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + Some(&signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let mut sender_nonce = 0; + + // This version uses the Clarity 1 / 2 keywords + let contract_name = "test-contract"; + let contract = r#" + (define-read-only (foo (expected-height uint)) + (begin + (asserts! (is-eq expected-height burn-block-height) (err burn-block-height)) + (asserts! (is-some (get-burn-block-info? header-hash burn-block-height)) (err u0)) + (ok true) + ) + ) + (define-public (bar (expected-height uint)) + (foo expected-height) + ) + "#; + + let contract_tx = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract_name, + contract, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx); + + let mut burn_block_height = 0; + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {}", tenure_ix); + + // Don't submit this tx on the first iteration, because the contract is not published yet. + if tenure_ix > 0 { + // Call the read-only function and see if we see the correct burn block height + let expected_height = Value::UInt(burn_block_height); + let arg = expected_height.serialize_to_hex().unwrap(); + let result = call_read_only(&naka_conf, &sender_addr, contract_name, "foo", vec![&arg]); + result.expect_result_ok().expect("Read-only call failed"); + + // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) + let call_tx = tests::make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + &sender_addr, + contract_name, + "bar", + &[Value::UInt(burn_block_height + 1)], + ); + sender_nonce += 1; + submit_tx(&http_origin, &call_tx); + } + + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let info = get_chain_info(&naka_conf); + burn_block_height = info.burn_block_height as u128; + info!("Expecting burn block height to be {}", burn_block_height); + + // Assert that the contract call was successful + test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .for_each(|event| match event { + TransactionEvent::Success(TransactionSuccessEvent { result, fee, .. }) => { + // Ignore coinbase and tenure transactions + if *fee == 0 { + return; + } + + info!("Contract call result: {}", result); + result.clone().expect_result_ok().expect("Ok result"); + } + _ => { + info!("Unsuccessful event: {:?}", event); + panic!("Expected a successful transaction"); + } + }); + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + info!("Mining interim block {interim_block_ix}"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // Call the read-only function and see if we see the correct burn block height + let expected_height = Value::UInt(burn_block_height); + let arg = expected_height.serialize_to_hex().unwrap(); + let result = call_read_only(&naka_conf, &sender_addr, contract_name, "foo", vec![&arg]); + info!("Read-only result: {:?}", result); + result.expect_result_ok().expect("Read-only call failed"); + + // Submit a tx to trigger the next block + let call_tx = tests::make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + &sender_addr, + contract_name, + "bar", + &[expected_height], + ); + sender_nonce += 1; + submit_tx(&http_origin, &call_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + // Assert that the contract call was successful + test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .for_each(|event| match event { + TransactionEvent::Success(TransactionSuccessEvent { result, .. }) => { + info!("Contract call result: {}", result); + result.clone().expect_result_ok().expect("Ok result"); + } + _ => { + info!("Unsuccessful event: {:?}", event); + panic!("Expected a successful transaction"); + } + }); + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From cbff7f0904099ca7ed2d0a0eed53b63aa46dad68 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 13 May 2024 21:02:44 -0400 Subject: [PATCH 0014/1400] test: update `check_block_heights` for new behavior --- .../src/tests/nakamoto_integrations.rs | 35 ++++++------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d417d00476..7804781c11 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3599,9 +3599,11 @@ fn check_block_heights() { }) .unwrap(); - let mut last_burn_block_height = 0; - let mut last_stacks_block_height = 0; - let mut last_tenure_height = 0; + let info = get_chain_info_result(&naka_conf).unwrap(); + println!("Chain info: {:?}", info); + let mut last_burn_block_height = info.burn_block_height as u128; + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height as u128; let heights0_value = call_read_only( &naka_conf, @@ -3702,15 +3704,7 @@ fn check_block_heights() { .expect_u128() .unwrap(); assert_eq!(bbh1, bbh3, "Burn block heights should match"); - if tenure_ix == 0 { - // Add two for the 2 blocks with no tenure during Nakamoto bootup - last_burn_block_height = bbh1 + 2; - } else { - assert_eq!( - bbh1, last_burn_block_height, - "Burn block height should not have changed yet" - ); - } + last_burn_block_height = bbh1; let bh1 = heights1 .get("block-height") @@ -3805,19 +3799,10 @@ fn check_block_heights() { .expect_u128() .unwrap(); assert_eq!(bbh1, bbh3, "Burn block heights should match"); - if interim_block_ix == 0 { - assert_eq!( - bbh1, - last_burn_block_height + 1, - "Burn block heights should have incremented" - ); - last_burn_block_height = bbh1; - } else { - assert_eq!( - bbh1, last_burn_block_height, - "Burn block heights should not have incremented" - ); - } + assert_eq!( + bbh1, last_burn_block_height, + "Burn block heights should not have incremented" + ); let bh1 = heights1 .get("block-height") From ecf176309ccd3743217fb39199fa0054e924b603 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 13 May 2024 21:06:03 -0400 Subject: [PATCH 0015/1400] docs: update readme --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d03444994..0068086840 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,10 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Downgraded log messages about transactions from warning to info (#4697) +### Fixed + +- Allow Nakamoto blocks to access the burn block associated with the current tenure (#4333) + ## [2.5.0.0.3] This release fixes a regression in `2.5.0.0.0` from `2.4.0.1.0` caused by git merge From b5e90694c98548bda5f7163848b501250686940d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 13 May 2024 21:06:56 -0400 Subject: [PATCH 0016/1400] tests: add new integration test to bitcoin-tests.yml --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 2680a3194f..81c5afb752 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -92,6 +92,7 @@ jobs: - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::signer::stackerdb_delayed_dkg - tests::nakamoto_integrations::check_block_heights + - tests::nakamoto_integrations::clarity_burn_state # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From f3bed349f5c81339f24201f14bc8ff43d4f24f7d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 14 May 2024 07:27:42 -0400 Subject: [PATCH 0017/1400] fix: impl missing methods in docs --- clarity/src/vm/docs/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 940b2f2f6a..e009b5c117 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2766,6 +2766,14 @@ mod test { const DOC_POX_STATE_DB: DocBurnStateDB = DocBurnStateDB {}; impl BurnStateDB for DocBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0x9abc) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { Some(5678) } From 18bac17865fb47b68dcafdbaeecb174feddad566 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 14 May 2024 07:49:00 -0400 Subject: [PATCH 0018/1400] test: fill in methods in test structs --- .../src/chainstate/stacks/boot/contract_tests.rs | 8 ++++++++ .../src/chainstate/stacks/db/transactions.rs | 16 ++++++++++++++++ stackslib/src/clarity_vm/clarity.rs | 8 ++++++++ 3 files changed, 32 insertions(+) diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 5d8588836e..b00eec7244 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -347,6 +347,14 @@ fn cost_2_contract_is_arithmetic_only() { } impl BurnStateDB for TestSimBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + panic!("Not implemented in TestSim"); + } + + fn get_tip_sortition_id(&self) -> Option { + panic!("Not implemented in TestSim"); + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { panic!("Not implemented in TestSim"); } diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e0cd93d9dc..0ba335afca 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -8620,6 +8620,14 @@ pub mod test { struct MockedBurnDB {} impl BurnStateDB for MockedBurnDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_v1_unlock_height(&self) -> u32 { 2 } @@ -8842,6 +8850,14 @@ pub mod test { struct MockedBurnDB {} impl BurnStateDB for MockedBurnDB { + fn get_tip_burn_block_height(&self) -> Option { + Some(0) + } + + fn get_tip_sortition_id(&self) -> Option { + Some(SortitionId([0u8; 32])) + } + fn get_v1_unlock_height(&self) -> u32 { 2 } diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index ac764e0e91..be8a1c12c0 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -2691,6 +2691,14 @@ mod tests { pub struct BlockLimitBurnStateDB {} impl BurnStateDB for BlockLimitBurnStateDB { + fn get_tip_burn_block_height(&self) -> Option { + None + } + + fn get_tip_sortition_id(&self) -> Option { + None + } + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { None } From 228179b60e30f5b8049f5dfe41c21cd3f75e1f8d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 14 May 2024 11:42:48 -0400 Subject: [PATCH 0019/1400] test: update `test_block_heights` --- .../chainstate/stacks/boot/contract_tests.rs | 32 ++++--- stackslib/src/clarity_vm/tests/contracts.rs | 83 +++++++++++++++---- 2 files changed, 86 insertions(+), 29 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index b00eec7244..a308e5b339 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -142,6 +142,10 @@ impl ClarityTestSim { } } + pub fn burn_block_height(&self) -> u64 { + self.tenure_height + 100 + } + pub fn execute_next_block_as_conn_with_tenure(&mut self, new_tenure: bool, f: F) -> R where F: FnOnce(&mut ClarityBlockConnection) -> R, @@ -152,8 +156,13 @@ impl ClarityTestSim { &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), ); + self.block_height += 1; + if new_tenure { + self.tenure_height += 1; + } + let headers_db = TestSimHeadersDB { - height: self.block_height + 1, + height: self.block_height, }; let burn_db = TestSimBurnStateDB { epoch_bounds: self.epoch_bounds.clone(), @@ -166,7 +175,7 @@ impl ClarityTestSim { let mut db = store.as_clarity_db(&headers_db, &burn_db); if cur_epoch >= StacksEpochId::Epoch30 { db.begin(); - db.set_tenure_height(self.tenure_height as u32 + if new_tenure { 1 } else { 0 }) + db.set_tenure_height(self.tenure_height as u32) .expect("FAIL: unable to set tenure height in Clarity database"); db.commit() .expect("FAIL: unable to commit tenure height in Clarity database"); @@ -180,10 +189,6 @@ impl ClarityTestSim { r }; - self.block_height += 1; - if new_tenure { - self.tenure_height += 1; - } r } @@ -203,9 +208,14 @@ impl ClarityTestSim { &StacksBlockId(test_sim_height_to_hash(self.block_height + 1, self.fork)), ); + self.block_height += 1; + if new_tenure { + self.tenure_height += 1; + } + let r = { let headers_db = TestSimHeadersDB { - height: self.block_height + 1, + height: self.block_height, }; let burn_db = TestSimBurnStateDB { epoch_bounds: self.epoch_bounds.clone(), @@ -219,7 +229,7 @@ impl ClarityTestSim { let mut db = store.as_clarity_db(&headers_db, &burn_db); if cur_epoch >= StacksEpochId::Epoch30 { db.begin(); - db.set_tenure_height(self.tenure_height as u32 + if new_tenure { 1 } else { 0 }) + db.set_tenure_height(self.tenure_height as u32) .expect("FAIL: unable to set tenure height in Clarity database"); db.commit() .expect("FAIL: unable to commit tenure height in Clarity database"); @@ -229,10 +239,6 @@ impl ClarityTestSim { }; store.test_commit(); - self.block_height += 1; - if new_tenure { - self.tenure_height += 1; - } r } @@ -348,7 +354,7 @@ fn cost_2_contract_is_arithmetic_only() { impl BurnStateDB for TestSimBurnStateDB { fn get_tip_burn_block_height(&self) -> Option { - panic!("Not implemented in TestSim"); + Some(self.height as u32) } fn get_tip_sortition_id(&self) -> Option { diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 0cdc1ad8bf..017662d93c 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -898,6 +898,8 @@ fn test_block_heights() { } let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); assert_eq!(epoch, StacksEpochId::Epoch30); @@ -1010,17 +1012,17 @@ fn test_block_heights() { let mut tx = conn.start_transaction_processing(); assert_eq!( Value::Tuple(TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), - ("block-height".into(), Value::UInt(block_height + 1)) + ("burn-block-height".into(), Value::UInt(burn_block_height + 1)), + ("block-height".into(), Value::UInt(tenure_height + 1)) ]).unwrap()), tx.eval_read_only(&contract_identifier1, "(test-func)") .unwrap() ); assert_eq!( Value::Tuple(TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), + ("burn-block-height".into(), Value::UInt(burn_block_height + 1)), ("stacks-block-height".into(), Value::UInt(block_height + 1)), - ("tenure-height".into(), Value::UInt(block_height + 1)) + ("tenure-height".into(), Value::UInt(tenure_height + 1)) ]).unwrap()), tx.eval_read_only(&contract_identifier2, "(test-func)") .unwrap() @@ -1029,13 +1031,18 @@ fn test_block_heights() { // Call the contracts in the next block and validate the results let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; sim.execute_next_block_as_conn(|conn| { let mut tx = conn.start_transaction_processing(); assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), - ("block-height".into(), Value::UInt(block_height + 1)), + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), + ("block-height".into(), Value::UInt(tenure_height + 1)), ]) .unwrap() ), @@ -1045,9 +1052,12 @@ fn test_block_heights() { assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), ("stacks-block-height".into(), Value::UInt(block_height + 1)), - ("tenure-height".into(), Value::UInt(block_height + 1)) + ("tenure-height".into(), Value::UInt(tenure_height + 1)) ]) .unwrap() ), @@ -1058,13 +1068,15 @@ fn test_block_heights() { // Call the contracts in the next block with no new tenure and validate the results let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; sim.execute_next_block_as_conn_with_tenure(false, |conn| { let mut tx = conn.start_transaction_processing(); assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), - ("block-height".into(), Value::UInt(block_height)) + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("block-height".into(), Value::UInt(tenure_height)) ]) .unwrap() ), @@ -1074,9 +1086,9 @@ fn test_block_heights() { assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), + ("burn-block-height".into(), Value::UInt(burn_block_height)), ("stacks-block-height".into(), Value::UInt(block_height + 1)), - ("tenure-height".into(), Value::UInt(block_height)) + ("tenure-height".into(), Value::UInt(tenure_height)) ]) .unwrap() ), @@ -1087,13 +1099,49 @@ fn test_block_heights() { // Call the contracts in the next block with no new tenure and validate the results let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; + sim.execute_next_block_as_conn_with_tenure(false, |conn| { + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("block-height".into(), Value::UInt(tenure_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier1, "(test-func)") + .unwrap() + ); + assert_eq!( + Value::Tuple( + TupleData::from_data(vec![ + ("burn-block-height".into(), Value::UInt(burn_block_height)), + ("stacks-block-height".into(), Value::UInt(block_height + 1)), + ("tenure-height".into(), Value::UInt(tenure_height)) + ]) + .unwrap() + ), + tx.eval_read_only(&contract_identifier2, "(test-func)") + .unwrap() + ); + }); + + // Call the contracts in the next block with a new tenure and validate the results + let block_height = sim.block_height as u128; + let burn_block_height = sim.burn_block_height() as u128; + let tenure_height = sim.tenure_height as u128; sim.execute_next_block_as_conn(|conn| { let mut tx = conn.start_transaction_processing(); assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), - ("block-height".into(), Value::UInt(block_height)) + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), + ("block-height".into(), Value::UInt(tenure_height + 1)) ]) .unwrap() ), @@ -1103,9 +1151,12 @@ fn test_block_heights() { assert_eq!( Value::Tuple( TupleData::from_data(vec![ - ("burn-block-height".into(), Value::UInt(block_height)), + ( + "burn-block-height".into(), + Value::UInt(burn_block_height + 1) + ), ("stacks-block-height".into(), Value::UInt(block_height + 1)), - ("tenure-height".into(), Value::UInt(block_height)) + ("tenure-height".into(), Value::UInt(tenure_height + 1)) ]) .unwrap() ), From a316dac4d1d46a5703287e554972e5ff10a6b8a4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 14 May 2024 13:45:23 -0400 Subject: [PATCH 0020/1400] fix: update `test_get_burn_block_info_eval` --- stackslib/src/clarity_vm/tests/contracts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 017662d93c..a14d47b906 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -169,7 +169,7 @@ fn test_get_burn_block_info_eval() { // burnchain is 100 blocks ahead of stacks chain in this sim assert_eq!( Value::Optional(OptionalData { data: None }), - tx.eval_read_only(&contract_identifier, "(test-func u103)") + tx.eval_read_only(&contract_identifier, "(test-func u203)") .unwrap() ); }); From 029d5ef454d7ef65e50c3e3b9bca72fcf8aca031 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 15 May 2024 15:54:51 -0400 Subject: [PATCH 0021/1400] refactor: simplify test setup --- clarity/src/vm/tests/mod.rs | 17 ++++++++++++----- clarity/src/vm/tests/variables.rs | 18 ------------------ 2 files changed, 12 insertions(+), 23 deletions(-) diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index c60377ba3d..9a21596ca9 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -162,7 +162,15 @@ pub fn tl_env_factory() -> TopLevelMemoryEnvironmentGenerator { pub struct MemoryEnvironmentGenerator(MemoryBackingStore); impl MemoryEnvironmentGenerator { fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { - let mut owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); + let mut db = self.0.as_clarity_db(); + db.begin(); + db.set_clarity_epoch_version(epoch).unwrap(); + db.set_tenure_height(0).unwrap(); + if epoch >= StacksEpochId::Epoch30 { + db.set_tenure_height(1).unwrap(); + } + db.commit().unwrap(); + let mut owned_env = OwnedEnvironment::new(db, epoch); // start an initial transaction. owned_env.begin(); owned_env @@ -175,12 +183,11 @@ impl TopLevelMemoryEnvironmentGenerator { let mut db = self.0.as_clarity_db(); db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); - db.commit().unwrap(); - let mut owned_env = OwnedEnvironment::new(db, epoch); if epoch >= StacksEpochId::Epoch30 { - owned_env.set_tenure_height(1); + db.set_tenure_height(1).unwrap(); } - owned_env + db.commit().unwrap(); + OwnedEnvironment::new(db, epoch) } } diff --git a/clarity/src/vm/tests/variables.rs b/clarity/src/vm/tests/variables.rs index 41b880afe9..5b392bb678 100644 --- a/clarity/src/vm/tests/variables.rs +++ b/clarity/src/vm/tests/variables.rs @@ -58,12 +58,6 @@ fn test_block_height( assert!(analysis.is_ok()); } - // If we're testing epoch 3, we need to simulate the tenure height being - // set at the transition. - if epoch >= StacksEpochId::Epoch30 { - owned_env.set_tenure_height(1); - } - // Initialize the contract // Note that we're ignoring the analysis failure here so that we can test // the runtime behavior. In Clarity 3, if this case somehow gets past the @@ -122,12 +116,6 @@ fn test_stacks_block_height( assert!(analysis.is_ok()); } - // If we're testing epoch 3, we need to simulate the tenure height being - // set at the transition. - if epoch >= StacksEpochId::Epoch30 { - owned_env.set_tenure_height(1); - } - // Initialize the contract // Note that we're ignoring the analysis failure here so that we can test // the runtime behavior. In Clarity 3, if this case somehow gets past the @@ -188,12 +176,6 @@ fn test_tenure_height( assert!(analysis.is_ok()); } - // If we're testing epoch 3, we need to simulate the tenure height being - // set at the transition. - if epoch >= StacksEpochId::Epoch30 { - owned_env.set_tenure_height(1); - } - // Initialize the contract // Note that we're ignoring the analysis failure here so that we can test // the runtime behavior. In Clarity 3, if this case somehow gets past the From 9439cdc525f5a31dede5b6a16ae35acf36a71ea2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 15 May 2024 16:13:03 -0400 Subject: [PATCH 0022/1400] fix: set default block height in test implementation --- clarity/src/vm/database/clarity_db.rs | 2 +- clarity/src/vm/tests/mod.rs | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 5394842a1c..4ba8fc097f 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -353,7 +353,7 @@ impl HeadersDB for NullHeadersDB { #[allow(clippy::panic)] impl BurnStateDB for NullBurnStateDB { fn get_tip_burn_block_height(&self) -> Option { - None + Some(0) } fn get_tip_sortition_id(&self) -> Option { diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 9a21596ca9..715c205475 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -165,11 +165,12 @@ impl MemoryEnvironmentGenerator { let mut db = self.0.as_clarity_db(); db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); - db.set_tenure_height(0).unwrap(); + db.commit().unwrap(); if epoch >= StacksEpochId::Epoch30 { + db.begin(); db.set_tenure_height(1).unwrap(); + db.commit().unwrap(); } - db.commit().unwrap(); let mut owned_env = OwnedEnvironment::new(db, epoch); // start an initial transaction. owned_env.begin(); @@ -183,10 +184,12 @@ impl TopLevelMemoryEnvironmentGenerator { let mut db = self.0.as_clarity_db(); db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); + db.commit().unwrap(); if epoch >= StacksEpochId::Epoch30 { + db.begin(); db.set_tenure_height(1).unwrap(); + db.commit().unwrap(); } - db.commit().unwrap(); OwnedEnvironment::new(db, epoch) } } From 9e270b7934eaa35522e88349e9d7608e39346fd7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 15 May 2024 22:34:49 -0400 Subject: [PATCH 0023/1400] refactor: remove unnecessary `impl`s --- clarity/src/vm/database/clarity_db.rs | 120 ------------------ .../src/chainstate/stacks/db/transactions.rs | 42 +++--- stackslib/src/clarity_vm/clarity.rs | 21 ++- 3 files changed, 31 insertions(+), 152 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4ba8fc097f..67f4209e00 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -155,126 +155,6 @@ pub trait BurnStateDB { ) -> Option<(Vec, u128)>; } -impl HeadersDB for &dyn HeadersDB { - fn get_stacks_block_header_hash_for_block( - &self, - id_bhh: &StacksBlockId, - ) -> Option { - (*self).get_stacks_block_header_hash_for_block(id_bhh) - } - fn get_burn_header_hash_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_header_hash_for_block(bhh) - } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_consensus_hash_for_block(id_bhh) - } - fn get_vrf_seed_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_vrf_seed_for_block(bhh) - } - fn get_burn_block_time_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_block_time_for_block(bhh) - } - fn get_burn_block_height_for_block(&self, bhh: &StacksBlockId) -> Option { - (*self).get_burn_block_height_for_block(bhh) - } - fn get_miner_address(&self, bhh: &StacksBlockId) -> Option { - (*self).get_miner_address(bhh) - } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_burnchain_tokens_spent_for_block(id_bhh) - } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_burnchain_tokens_spent_for_winning_block(id_bhh) - } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - (*self).get_tokens_earned_for_block(id_bhh) - } -} - -impl BurnStateDB for &dyn BurnStateDB { - fn get_tip_burn_block_height(&self) -> Option { - (*self).get_tip_burn_block_height() - } - - fn get_tip_sortition_id(&self) -> Option { - (*self).get_tip_sortition_id() - } - - fn get_v1_unlock_height(&self) -> u32 { - (*self).get_v1_unlock_height() - } - - fn get_v2_unlock_height(&self) -> u32 { - (*self).get_v2_unlock_height() - } - - fn get_v3_unlock_height(&self) -> u32 { - (*self).get_v3_unlock_height() - } - - fn get_pox_3_activation_height(&self) -> u32 { - (*self).get_pox_3_activation_height() - } - - fn get_pox_4_activation_height(&self) -> u32 { - (*self).get_pox_4_activation_height() - } - - fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { - (*self).get_burn_block_height(sortition_id) - } - - fn get_sortition_id_from_consensus_hash( - &self, - consensus_hash: &ConsensusHash, - ) -> Option { - (*self).get_sortition_id_from_consensus_hash(consensus_hash) - } - - fn get_burn_start_height(&self) -> u32 { - (*self).get_burn_start_height() - } - - fn get_burn_header_hash( - &self, - height: u32, - sortition_id: &SortitionId, - ) -> Option { - (*self).get_burn_header_hash(height, sortition_id) - } - - fn get_stacks_epoch(&self, height: u32) -> Option { - (*self).get_stacks_epoch(height) - } - - fn get_pox_prepare_length(&self) -> u32 { - (*self).get_pox_prepare_length() - } - - fn get_pox_reward_cycle_length(&self) -> u32 { - (*self).get_pox_reward_cycle_length() - } - - fn get_pox_rejection_fraction(&self) -> u64 { - (*self).get_pox_rejection_fraction() - } - fn get_stacks_epoch_by_epoch_id(&self, epoch_id: &StacksEpochId) -> Option { - (*self).get_stacks_epoch_by_epoch_id(epoch_id) - } - - fn get_ast_rules(&self, height: u32) -> ASTRules { - (*self).get_ast_rules(height) - } - - fn get_pox_payout_addrs( - &self, - height: u32, - sortition_id: &SortitionId, - ) -> Option<(Vec, u128)> { - (*self).get_pox_payout_addrs(height, sortition_id) - } -} - pub struct NullHeadersDB {} pub struct NullBurnStateDB { epoch: StacksEpochId, diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 0ba335afca..d80792a822 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1716,7 +1716,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -1946,7 +1946,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2060,7 +2060,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2151,7 +2151,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2214,7 +2214,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2322,7 +2322,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2413,7 +2413,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2532,7 +2532,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2646,7 +2646,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // process both let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2785,7 +2785,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // process both let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -2895,7 +2895,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3020,7 +3020,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3130,7 +3130,7 @@ pub mod test { for (dbi, burn_db) in PRE_21_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3343,7 +3343,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -3886,7 +3886,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // make sure costs-3 is instantiated, so as-contract works in 2.1 let mut conn = chainstate.test_genesis_block_begin_2_1( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -4609,7 +4609,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { // make sure costs-3 is installed so as-contract will work in epoch 2.1 let mut conn = chainstate.test_genesis_block_begin_2_1( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -4984,7 +4984,7 @@ pub mod test { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8116,7 +8116,7 @@ pub mod test { // which leads to an InvalidFee error for (dbi, burn_db) in PRE_21_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8273,7 +8273,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8394,7 +8394,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), @@ -8488,7 +8488,7 @@ pub mod test { for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { let mut conn = chainstate.block_begin( - burn_db, + *burn_db, &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, &ConsensusHash([(dbi + 1) as u8; 20]), diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index be8a1c12c0..812d39bb97 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -638,8 +638,7 @@ impl<'a, 'b> ClarityConnection for ClarityBlockConnection<'a, 'b> { where F: FnOnce(ClarityDatabase) -> (R, ClarityDatabase), { - let mut db = - ClarityDatabase::new(&mut self.datastore, &self.header_db, &self.burn_state_db); + let mut db = ClarityDatabase::new(&mut self.datastore, self.header_db, self.burn_state_db); db.begin(); let (result, mut db) = to_do(db); db.roll_back() @@ -672,7 +671,7 @@ impl ClarityConnection for ClarityReadOnlyConnection<'_> { { let mut db = self .datastore - .as_clarity_db(&self.header_db, &self.burn_state_db); + .as_clarity_db(self.header_db, self.burn_state_db); db.begin(); let (result, mut db) = to_do(db); db.roll_back() @@ -1528,8 +1527,8 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; let cost_track = &mut self.cost_track; - let header_db = &self.header_db; - let burn_state_db = &self.burn_state_db; + let header_db = self.header_db; + let burn_state_db = self.burn_state_db; let mainnet = self.mainnet; let chain_id = self.chain_id; let mut log = RollbackWrapperPersistedLog::new(); @@ -1608,8 +1607,8 @@ impl<'a, 'b> ClarityConnection for ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); db.begin(); let (r, mut db) = to_do(db); @@ -1673,8 +1672,8 @@ impl<'a, 'b> TransactionConnection for ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); // wrap the whole contract-call in a claritydb transaction, @@ -1741,8 +1740,8 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { let rollback_wrapper = RollbackWrapper::from_persisted_log(self.store, log); let mut db = ClarityDatabase::new_with_rollback_wrapper( rollback_wrapper, - &self.header_db, - &self.burn_state_db, + self.header_db, + self.burn_state_db, ); db.begin(); From 44dd3dc754dfb165f2126956f16aeea9abd12e8c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 15 May 2024 22:36:36 -0400 Subject: [PATCH 0024/1400] refactor: simplify expression Co-authored-by: Jeff Bencin --- clarity/src/vm/database/clarity_db.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 67f4209e00..4c419dcb2a 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -868,13 +868,12 @@ impl<'a> ClarityDatabase<'a> { // In epoch 2, we can only access the burn block associated with the last block if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { - let last_mined_bhh = if cur_stacks_height == 0 { + if cur_stacks_height == 0 { return Ok(self.burn_state_db.get_burn_start_height()); - } else { - // Safety note: normal subtraction is safe here, because we've already checked - // that cur_stacks_height > 0. - self.get_index_block_header_hash(cur_stacks_height - 1)? }; + // Safety note: normal subtraction is safe here, because we've already checked + // that cur_stacks_height > 0. + let last_mined_bhh = self.get_index_block_header_hash(cur_stacks_height - 1)?; self.get_burnchain_block_height(&last_mined_bhh) .ok_or_else(|| { From 32c548030360fe550490a868a2673c6e77bb7a2f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 16 May 2024 09:27:08 -0400 Subject: [PATCH 0025/1400] fix: Rename `.cargo/config` -> `.cargo/config.toml` to fix warning in Rust 1.78 --- .cargo/{config => config.toml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .cargo/{config => config.toml} (100%) diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml From 0904dec57bed8e9769841a080e7738123054887e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 8 May 2024 11:11:13 -0700 Subject: [PATCH 0026/1400] fix: add missing methods to `generate-stacking-signature` --- stacks-signer/src/cli.rs | 53 +++++++++++++++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 1cc51bfe68..29e1e5f232 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -123,7 +123,7 @@ pub struct RunSignerArgs { pub config: PathBuf, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] /// Wrapper around `Pox4SignatureTopic` to implement `ValueEnum` pub struct StackingSignatureMethod(Pox4SignatureTopic); @@ -150,22 +150,27 @@ impl ValueEnum for StackingSignatureMethod { Self(Pox4SignatureTopic::StackStx), Self(Pox4SignatureTopic::StackExtend), Self(Pox4SignatureTopic::AggregationCommit), + Self(Pox4SignatureTopic::AggregationIncrease), + Self(Pox4SignatureTopic::StackIncrease), ] } fn from_str(input: &str, _ignore_case: bool) -> Result { let topic = match input { - "stack-stx" => Pox4SignatureTopic::StackStx, - "stack-extend" => Pox4SignatureTopic::StackExtend, "aggregation-commit" => Pox4SignatureTopic::AggregationCommit, - "agg-commit" => Pox4SignatureTopic::AggregationCommit, - _ => return Err(format!("Invalid topic: {}", input)), + "aggregation-increase" => Pox4SignatureTopic::AggregationIncrease, + method => match Pox4SignatureTopic::lookup_by_name(method) { + Some(topic) => topic, + None => { + return Err(format!("Invalid topic: {}", input)); + } + }, }; Ok(topic.into()) } } -#[derive(Parser, Debug, Clone)] +#[derive(Parser, Debug, Clone, PartialEq)] /// Arguments for the generate-stacking-signature command pub struct GenerateStackingSignatureArgs { /// BTC address used to receive rewards @@ -404,4 +409,40 @@ mod tests { _ => panic!("Invalid parsed address"), } } + + #[test] + fn test_parse_stacking_method() { + assert_eq!( + StackingSignatureMethod::from_str("agg-increase", true).unwrap(), + Pox4SignatureTopic::AggregationIncrease.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("agg-commit", true).unwrap(), + Pox4SignatureTopic::AggregationCommit.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("stack-increase", true).unwrap(), + Pox4SignatureTopic::StackIncrease.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("stack-extend", true).unwrap(), + Pox4SignatureTopic::StackExtend.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("stack-stx", true).unwrap(), + Pox4SignatureTopic::StackStx.into() + ); + + // These don't exactly match the enum, but are accepted if passed as + // CLI args + + assert_eq!( + StackingSignatureMethod::from_str("aggregation-increase", true).unwrap(), + Pox4SignatureTopic::AggregationIncrease.into() + ); + assert_eq!( + StackingSignatureMethod::from_str("aggregation-commit", true).unwrap(), + Pox4SignatureTopic::AggregationCommit.into() + ); + } } From c8bdbcf17d16b7888de76f6e3e18350f5804ea08 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 8 May 2024 07:40:58 -0700 Subject: [PATCH 0027/1400] fix: consistently use compressed public keys in signer config --- stacks-signer/src/config.rs | 57 +++++++++++++++++++++++++++++++------ stacks-signer/src/main.rs | 10 ++++--- 2 files changed, 54 insertions(+), 13 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index f36ae91c26..00221c4811 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -21,14 +21,16 @@ use std::path::PathBuf; use std::time::Duration; use blockstack_lib::chainstate::stacks::TransactionVersion; +use clarity::util::hash::to_hex; use libsigner::SignerEntries; use serde::Deserialize; use stacks_common::address::{ - AddressHashMode, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use stacks_common::types::PrivateKey; +use stacks_common::util::hash::Hash160; use wsts::curve::scalar::Scalar; use crate::client::SignerSlotID; @@ -286,13 +288,9 @@ impl TryFrom for GlobalConfig { ) })?; let stacks_public_key = StacksPublicKey::from_private(&stacks_private_key); - let stacks_address = StacksAddress::from_public_keys( - raw_data.network.to_address_version(), - &AddressHashMode::SerializeP2PKH, - 1, - &vec![stacks_public_key], - ) - .ok_or(ConfigError::UnsupportedAddressVersion)?; + let signer_hash = Hash160::from_data(stacks_public_key.to_bytes_compressed().as_slice()); + let stacks_address = + StacksAddress::p2pkh_from_hash(raw_data.network.is_mainnet(), signer_hash); let event_timeout = Duration::from_millis(raw_data.event_timeout_ms.unwrap_or(EVENT_TIMEOUT_MS)); let dkg_end_timeout = raw_data.dkg_end_timeout_ms.map(Duration::from_millis); @@ -381,7 +379,9 @@ Metrics endpoint: {metrics_endpoint} node_host = self.node_host, endpoint = self.endpoint, stacks_address = self.stacks_address, - public_key = StacksPublicKey::from_private(&self.stacks_private_key).to_hex(), + public_key = to_hex( + &StacksPublicKey::from_private(&self.stacks_private_key).to_bytes_compressed() + ), network = self.network, db_path = self.db_path.to_str().unwrap_or_default(), tx_fee = tx_fee, @@ -645,4 +645,43 @@ Metrics endpoint: 0.0.0.0:9090 ) ); } + + #[test] + // Test the same private key twice, with and without a compression flag. + // Ensure that the address is the same in both cases. + fn test_stacks_addr_from_priv_key() { + // 64 bytes, no compression flag + let sk_hex = "2de4e77aab89c0c2570bb8bb90824f5cf2a5204a975905fee450ff9dad0fcf28"; + + let expected_addr = "SP1286C62P3TAWVQV2VM2CEGTRBQZSZ6MHMS9RW05"; + + let config_toml = format!( + r#" +stacks_private_key = "{sk_hex}" +node_host = "localhost" +endpoint = "localhost:30000" +network = "mainnet" +auth_password = "abcd" +db_path = ":memory:" + "# + ); + let config = GlobalConfig::load_from_str(&config_toml).unwrap(); + assert_eq!(config.stacks_address.to_string(), expected_addr); + + // 65 bytes (with compression flag) + let sk_hex = "2de4e77aab89c0c2570bb8bb90824f5cf2a5204a975905fee450ff9dad0fcf2801"; + + let config_toml = format!( + r#" +stacks_private_key = "{sk_hex}" +node_host = "localhost" +endpoint = "localhost:30000" +network = "mainnet" +auth_password = "abcd" +db_path = ":memory:" + "# + ); + let config = GlobalConfig::load_from_str(&config_toml).unwrap(); + assert_eq!(config.stacks_address.to_string(), expected_addr); + } } diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 75514fd2eb..1a7c4d048a 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -30,13 +30,14 @@ use std::io::{self, Write}; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; +use clarity::types::chainstate::StacksPublicKey; use clarity::vm::types::QualifiedContractIdentifier; use libsigner::{SignerSession, StackerDBSession}; use libstackerdb::StackerDBChunkData; use slog::slog_debug; use stacks_common::debug; use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; +use stacks_common::util::secp256k1::MessageSignature; use stacks_signer::cli::{ Cli, Command, GenerateStackingSignatureArgs, GetChunkArgs, GetLatestChunkArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, @@ -116,7 +117,8 @@ fn handle_generate_stacking_signature( let config = GlobalConfig::try_from(&args.config).unwrap(); let private_key = config.stacks_private_key; - let public_key = Secp256k1PublicKey::from_private(&private_key); + let public_key = StacksPublicKey::from_private(&private_key); + let pk_hex = to_hex(&public_key.to_bytes_compressed()); let signature = make_pox_4_signer_key_signature( &args.pox_address, @@ -132,7 +134,7 @@ fn handle_generate_stacking_signature( let output_str = if args.json { serde_json::to_string(&serde_json::json!({ - "signerKey": to_hex(&public_key.to_bytes_compressed()), + "signerKey": pk_hex, "signerSignature": to_hex(signature.to_rsv().as_slice()), "authId": format!("{}", args.auth_id), "rewardCycle": args.reward_cycle, @@ -145,7 +147,7 @@ fn handle_generate_stacking_signature( } else { format!( "Signer Public Key: 0x{}\nSigner Key Signature: 0x{}\n\n", - to_hex(&public_key.to_bytes_compressed()), + pk_hex, to_hex(signature.to_rsv().as_slice()) // RSV is needed for Clarity ) }; From c71d90f5e11ce4a4ba318e7eb02eec592dd06d0a Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 16 May 2024 17:24:35 +0300 Subject: [PATCH 0028/1400] Use debug log for private IP range --- stackslib/src/net/neighbors/walk.rs | 21 +++++++++++++++------ stackslib/src/net/stackerdb/sync.rs | 6 +++++- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index 8a0e370ba8..71fdd8c1de 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -1012,12 +1012,21 @@ impl NeighborWalk { continue; } Err(e) => { - info!( - "{:?}: Failed to connect to {:?}: {:?}", - network.get_local_peer(), - &nk, - &e - ); + if na.addrbytes.is_in_private_range() { + debug!( + "{:?}: Failed to connect to {:?}: {:?}", + network.get_local_peer(), + &nk, + &e + ); + } else { + info!( + "{:?}: Failed to connect to {:?}: {:?}", + network.get_local_peer(), + &nk, + &e + ); + } continue; } } diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 66ad54601a..ffe0c7636b 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -657,7 +657,11 @@ impl StackerDBSync { self.replicas.insert(naddr); } Err(_e) => { - info!("Failed to begin session with {:?}: {:?}", &naddr, &_e); + if naddr.addrbytes.is_in_private_range() { + debug!("Failed to begin session with {:?}: {:?}", &naddr, &_e); + } else { + info!("Failed to begin session with {:?}: {:?}", &naddr, &_e); + } } } } From bad27ac66a7d7553e42aa8397f259c0132e799bb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 16 May 2024 09:08:59 -0700 Subject: [PATCH 0029/1400] WIP: add a block_proposal_rejection test to v0 signer Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/client/stacks_client.rs | 2 +- testnet/stacks-node/src/tests/signer/mod.rs | 81 ++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 175 ++++++++++++++++++++ testnet/stacks-node/src/tests/signer/v1.rs | 10 +- 5 files changed, 254 insertions(+), 15 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e117d61bdf..1f673563ba 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -82,6 +82,7 @@ jobs: - tests::nakamoto_integrations::vote_for_aggregate_key_burn_op - tests::nakamoto_integrations::follower_bootup - tests::nakamoto_integrations::forked_tenure_is_ignored + - tests::signer::v0::block_proposal_rejection - tests::signer::v1::dkg - tests::signer::v1::sign_request_rejected - tests::signer::v1::block_proposal diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 6f07bb362b..b4b5d8a3a1 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -142,7 +142,7 @@ impl StacksClient { } /// Helper function that attempts to deserialize a clarity hext string as a list of signer slots and their associated number of signer slots - fn parse_signer_slots( + pub fn parse_signer_slots( &self, value: ClarityValue, ) -> Result, ClientError> { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 61bc54f1bc..3097922cb6 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -38,7 +38,7 @@ use clarity::boot_util::boot_code_id; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::stacks::boot::SIGNERS_NAME; +use stacks::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::BlockValidateResponse; @@ -68,7 +68,7 @@ use crate::{BitcoinRegtestController, BurnchainController}; // Helper struct for holding the btc and stx neon nodes #[allow(dead_code)] -struct RunningNodes { +pub struct RunningNodes { pub btc_regtest_controller: BitcoinRegtestController, pub btcd_controller: BitcoinCoreController, pub run_loop_thread: thread::JoinHandle<()>, @@ -83,7 +83,7 @@ struct RunningNodes { /// A test harness for running a v0 or v1 signer integration test pub struct SignerTest { // The stx and bitcoin nodes and their run loops - running_nodes: RunningNodes, + pub running_nodes: RunningNodes, // The spawned signers and their threads pub spawned_signers: Vec, // the private keys of the signers @@ -261,26 +261,39 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Sha512Trunc256Sum { - // Wait for the block to show up in the test observer (Don't have to wait long as if we have received a mined block already, - // we know that the signers have already received their block proposal events via their event observers) + fn wait_for_block_validate_response(&mut self, timeout: Duration) -> BlockValidateResponse { + // Wait for the block to show up in the test observer let t_start = Instant::now(); while test_observer::get_proposal_responses().is_empty() { assert!( t_start.elapsed() < timeout, - "Timed out while waiting for block proposal event" + "Timed out while waiting for block proposal response event" ); thread::sleep(Duration::from_secs(1)); } - let validate_response = test_observer::get_proposal_responses() + test_observer::get_proposal_responses() .pop() - .expect("No block proposal"); + .expect("No block proposal") + } + + fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { + let validate_response = self.wait_for_block_validate_response(timeout); match validate_response { BlockValidateResponse::Ok(block_validated) => block_validated.signer_signature_hash, _ => panic!("Unexpected response"), } } + fn wait_for_validate_reject_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { + // Wait for the block to show up in the test observer + let validate_response = self.wait_for_block_validate_response(timeout); + match validate_response { + BlockValidateResponse::Reject(block_rejection) => block_rejection.signer_signature_hash, + _ => panic!("Unexpected response"), + } + } + + // Must be called AFTER booting the chainstate fn run_until_epoch_3_boundary(&mut self) { let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); let epoch_3 = @@ -325,6 +338,56 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { + let valid_signer_set = + u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); + + self.stacks_client + .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) + .expect("FATAL: failed to get signer slots from stackerdb") + .iter() + .enumerate() + .map(|(pos, _)| { + SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) + }) + .collect() + } + + fn get_miner_index(&self) -> SignerSlotID { + let miners_contract_id = boot_code_id(MINERS_NAME, false); + let value = self + .stacks_client + .read_only_contract_call( + &miners_contract_id.issuer.into(), + &miners_contract_id.name, + &"stackerdb-get-signer-slots".into(), + &[], + ) + .expect("Failed to get miner indices"); + let miner_indices = self + .stacks_client + .parse_signer_slots(value) + .expect("Failed to parse miner indices"); + miner_indices + .iter() + .position(|(address, _)| { + address + == &to_addr( + &self + .running_nodes + .conf + .miner + .mining_key + .expect("no mining key specificed"), + ) + }) + .map(|pos| { + SignerSlotID(u32::try_from(pos).expect("FATAL: number of miners exceeds u32::MAX")) + }) + .expect("FATAL: miner not registered") + } + fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { let entries = self .stacks_client diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 36abf9bab1..92cbc187c3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12,3 +12,178 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . + +use std::env; +use std::time::Duration; + +use libsigner::v0::messages::{ + BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, +}; +use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use stacks::chainstate::stacks::boot::MINERS_NAME; +use stacks::codec::StacksMessageCodec; +use stacks::libstackerdb::StackerDBChunkData; +use stacks::types::chainstate::StacksPrivateKey; +use stacks::util_lib::boot::boot_code_id; +use stacks_signer::client::{SignerSlotID, StackerDB}; +use stacks_signer::v0::SpawnedSigner; +use tracing_subscriber::prelude::*; +use tracing_subscriber::{fmt, EnvFilter}; + +use super::SignerTest; +use crate::tests::nakamoto_integrations::boot_to_epoch_3_reward_set; +use crate::BurnchainController; + +impl SignerTest { + /// Run the test until the epoch 3 boundary + fn boot_to_epoch_3(&mut self) { + boot_to_epoch_3_reward_set( + &self.running_nodes.conf, + &self.running_nodes.blocks_processed, + &self.signer_stacks_private_keys, + &self.signer_stacks_private_keys, + &mut self.running_nodes.btc_regtest_controller, + ); + + self.run_until_epoch_3_boundary(); + } +} +#[test] +#[ignore] +/// Test that a signer can respond to an invalid block proposal +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// +/// Test Execution: +/// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. +/// An invalid block proposal is forcibly written to the miner's slot to simulate the miner proposing a block. +/// The signers process the invalid block by first verifying it against the stacks node block proposal endpoint. +/// The signers then broadcast a rejection of the miner's proposed block back to the respective .signers-XXX-YYY contract. +/// +/// Test Assertion: +/// Each signer successfully rejects the invalid block proposal. +fn block_proposal_rejection() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers); + signer_test.boot_to_epoch_3(); + let short_timeout = Duration::from_secs(30); + + info!("------------------------- Send Block Proposal To Signers -------------------------"); + let miners_contract_id = boot_code_id(MINERS_NAME, false); + let mut session = StackerDBSession::new( + &signer_test.running_nodes.conf.node.rpc_bind, + miners_contract_id.clone(), + ); + let block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + let block_signer_signature_hash = block.header.signer_signature_hash(); + let burn_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let reward_cycle = signer_test.get_current_reward_cycle(); + let next_reward_cycle = reward_cycle + 1; + let message = SignerMessage::BlockProposal(BlockProposal { + block, + burn_height, + reward_cycle: next_reward_cycle, + }); + // Just attempt to submit a chunk to all possible slots + // We just need one to be successful + // let mut results = vec![]; + // for i in 0..2 { + // let mut chunk = StackerDBChunkData::new(i, 0, message.serialize_to_vec()); + // chunk + // .sign( + // &signer_test + // .running_nodes + // .conf + // .miner + // .mining_key + // .expect("No mining key"), + // ) + // .expect("Failed to sign message chunk"); + // let result = session.put_chunk(&chunk).expect("Failed to put chunk"); + // debug!("Test Put Chunk ACK: {result:?}"); + // results.push(result); + // } + // assert!( + // results.iter().any(|result| result.accepted), + // "Failed to submit block proposal to signers" + // ); + + let miner_index = signer_test.get_miner_index(); + let mut chunk = StackerDBChunkData::new(miner_index.0, 0, message.serialize_to_vec()); + chunk + .sign( + &signer_test + .running_nodes + .conf + .miner + .mining_key + .expect("No mining key"), + ) + .expect("Failed to sign message chunk"); + let result = session.put_chunk(&chunk).expect("Failed to put chunk"); + debug!("Test Put Chunk ACK: {result:?}"); + assert!( + result.accepted, + "Failed to submit block proposal to signers" + ); + + info!("------------------------- Test Block Proposal Rejected -------------------------"); + // Verify that the node correctly rejected the node + let proposed_signer_signature_hash = + signer_test.wait_for_validate_reject_response(short_timeout); + assert_eq!(proposed_signer_signature_hash, block_signer_signature_hash); + + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(next_reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + next_reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + + let messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + for message in messages { + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason: _reason, + reason_code, + signer_signature_hash, + })) = message + { + assert_eq!(signer_signature_hash, block_signer_signature_hash); + assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); + } else { + panic!("Unexpected message type"); + } + } + signer_test.shutdown(); +} diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 5966a188fc..83a03fee00 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -575,7 +575,7 @@ fn sign_request_rejected() { block2.header.tx_merkle_root = tx_merkle_root2; let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10); + let mut signer_test: SignerTest = SignerTest::new(10); let _key = signer_test.boot_to_epoch_3(timeout); info!("------------------------- Test Sign -------------------------"); @@ -862,7 +862,7 @@ fn block_proposal() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test = SignerTest::new(num_signers); + let mut signer_test: SignerTest = SignerTest::new(num_signers); let timeout = Duration::from_secs(30); let short_timeout = Duration::from_secs(30); @@ -920,7 +920,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; - let mut signer_test = SignerTest::new(5); + let mut signer_test: SignerTest = SignerTest::new(5); let timeout = Duration::from_secs(200); let first_dkg = signer_test.boot_to_epoch_3(timeout); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -995,7 +995,7 @@ fn filter_bad_transactions() { info!("------------------------- Test Setup -------------------------"); // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test = SignerTest::new(5); + let mut signer_test: SignerTest = SignerTest::new(5); let timeout = Duration::from_secs(200); let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); let next_signers_dkg = signer_test @@ -1083,7 +1083,7 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Setup -------------------------"); let num_signers = 3; - let mut signer_test = SignerTest::new(num_signers); + let mut signer_test: SignerTest = SignerTest::new(num_signers); let timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(30); From ab872bf110fc9a40299df91b9a022e4382482194 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 16 May 2024 10:27:11 -0700 Subject: [PATCH 0030/1400] WIP: remove stale code Signed-off-by: Jacinta Ferrant --- stackslib/src/net/stackerdb/db.rs | 9 +++ testnet/stacks-node/src/tests/signer/mod.rs | 36 +----------- testnet/stacks-node/src/tests/signer/v0.rs | 65 +++++++++------------ 3 files changed, 38 insertions(+), 72 deletions(-) diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 6cdebb69d9..3ec091d0bf 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -303,7 +303,16 @@ impl<'a> StackerDBTx<'a> { ) -> Result<(), net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let mut total_slots_read = 0u32; + debug!( + "Reconfiguring {} slots of StackerDB: {:?}", + slots.len(), + smart_contract + ); for (principal, slot_count) in slots.iter() { + debug!( + "Reconfigure StackerDB slot: ({}, {})", + &principal, slot_count + ); total_slots_read = total_slots_read .checked_add(*slot_count) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 3097922cb6..e5e8f54f94 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -38,7 +38,7 @@ use clarity::boot_util::boot_code_id; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; +use stacks::chainstate::stacks::boot::SIGNERS_NAME; use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::BlockValidateResponse; @@ -354,40 +354,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest SignerSlotID { - let miners_contract_id = boot_code_id(MINERS_NAME, false); - let value = self - .stacks_client - .read_only_contract_call( - &miners_contract_id.issuer.into(), - &miners_contract_id.name, - &"stackerdb-get-signer-slots".into(), - &[], - ) - .expect("Failed to get miner indices"); - let miner_indices = self - .stacks_client - .parse_signer_slots(value) - .expect("Failed to parse miner indices"); - miner_indices - .iter() - .position(|(address, _)| { - address - == &to_addr( - &self - .running_nodes - .conf - .miner - .mining_key - .expect("no mining key specificed"), - ) - }) - .map(|pos| { - SignerSlotID(u32::try_from(pos).expect("FATAL: number of miners exceeds u32::MAX")) - }) - .expect("FATAL: miner not registered") - } - fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { let entries = self .stacks_client diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 92cbc187c3..8fc95d800c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -33,6 +33,7 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::tests::nakamoto_integrations::boot_to_epoch_3_reward_set; +use crate::tests::neon_integrations::test_observer; use crate::BurnchainController; impl SignerTest { @@ -80,6 +81,15 @@ fn block_proposal_rejection() { signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); + info!("------------------------- Wait for StackerDB Initialization -------------------------"); + let time = std::time::Instant::now(); + while test_observer::get_stackerdb_chunks().is_empty() { + std::thread::sleep(Duration::from_secs(1)); + assert!( + time.elapsed() < short_timeout, + "Failed to get StackerDB chunks" + ); + } info!("------------------------- Send Block Proposal To Signers -------------------------"); let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = StackerDBSession::new( @@ -102,46 +112,27 @@ fn block_proposal_rejection() { burn_height, reward_cycle: next_reward_cycle, }); + let mut miner_sk = signer_test + .running_nodes + .conf + .miner + .mining_key + .expect("No mining key"); + miner_sk.set_compress_public(true); + // Just attempt to submit a chunk to all possible slots + let mut results = vec![]; // We just need one to be successful - // let mut results = vec![]; - // for i in 0..2 { - // let mut chunk = StackerDBChunkData::new(i, 0, message.serialize_to_vec()); - // chunk - // .sign( - // &signer_test - // .running_nodes - // .conf - // .miner - // .mining_key - // .expect("No mining key"), - // ) - // .expect("Failed to sign message chunk"); - // let result = session.put_chunk(&chunk).expect("Failed to put chunk"); - // debug!("Test Put Chunk ACK: {result:?}"); - // results.push(result); - // } - // assert!( - // results.iter().any(|result| result.accepted), - // "Failed to submit block proposal to signers" - // ); - - let miner_index = signer_test.get_miner_index(); - let mut chunk = StackerDBChunkData::new(miner_index.0, 0, message.serialize_to_vec()); - chunk - .sign( - &signer_test - .running_nodes - .conf - .miner - .mining_key - .expect("No mining key"), - ) - .expect("Failed to sign message chunk"); - let result = session.put_chunk(&chunk).expect("Failed to put chunk"); - debug!("Test Put Chunk ACK: {result:?}"); + for i in 0..2 { + let mut chunk = StackerDBChunkData::new(i, 0, message.serialize_to_vec()); + chunk.sign(&miner_sk).expect("Failed to sign message chunk"); + debug!("Produced a signature: {:?}", chunk.sig); + let result = session.put_chunk(&chunk).expect("Failed to put chunk"); + debug!("Test Put Chunk ACK: {result:?}"); + results.push(result); + } assert!( - result.accepted, + results.iter().any(|result| result.accepted), "Failed to submit block proposal to signers" ); From 3477c910111b8a77bfa0db112b32cf4ff7981414 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 16 May 2024 10:36:58 -0700 Subject: [PATCH 0031/1400] Add VRF key registration for 2.5 to 3.0 miner transition Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 23 +++++++++++++++-- testnet/stacks-node/src/tests/signer/v0.rs | 29 ++++++---------------- 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 3a767b9804..b213ad1594 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -3317,10 +3317,16 @@ impl RelayerThread { fn inner_generate_leader_key_register_op( vrf_public_key: VRFPublicKey, consensus_hash: &ConsensusHash, + miner_pk: Option<&StacksPublicKey>, ) -> BlockstackOperationType { + let memo = if let Some(pk) = miner_pk { + Hash160::from_node_public_key(pk).as_bytes().to_vec() + } else { + vec![] + }; BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, - memo: vec![], + memo, consensus_hash: consensus_hash.clone(), vtxindex: 0, txid: Txid([0u8; 32]), @@ -3350,7 +3356,20 @@ impl RelayerThread { ); let burnchain_tip_consensus_hash = &burn_block.consensus_hash; - let op = Self::inner_generate_leader_key_register_op(vrf_pk, burnchain_tip_consensus_hash); + // if the miner has set a mining key in preparation for epoch-3.0, register it as part of their VRF key registration + // once implemented in the nakamoto_node, this will allow miners to transition from 2.5 to 3.0 without submitting a new + // VRF key registration. + let miner_pk = self + .config + .miner + .mining_key + .as_ref() + .map(StacksPublicKey::from_private); + let op = Self::inner_generate_leader_key_register_op( + vrf_pk, + burnchain_tip_consensus_hash, + miner_pk.as_ref(), + ); let mut one_off_signer = self.keychain.generate_op_signer(); if let Some(txid) = diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8fc95d800c..44b550bde1 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -33,7 +33,6 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::tests::nakamoto_integrations::boot_to_epoch_3_reward_set; -use crate::tests::neon_integrations::test_observer; use crate::BurnchainController; impl SignerTest { @@ -81,15 +80,6 @@ fn block_proposal_rejection() { signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); - info!("------------------------- Wait for StackerDB Initialization -------------------------"); - let time = std::time::Instant::now(); - while test_observer::get_stackerdb_chunks().is_empty() { - std::thread::sleep(Duration::from_secs(1)); - assert!( - time.elapsed() < short_timeout, - "Failed to get StackerDB chunks" - ); - } info!("------------------------- Send Block Proposal To Signers -------------------------"); let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = StackerDBSession::new( @@ -120,19 +110,14 @@ fn block_proposal_rejection() { .expect("No mining key"); miner_sk.set_compress_public(true); - // Just attempt to submit a chunk to all possible slots - let mut results = vec![]; - // We just need one to be successful - for i in 0..2 { - let mut chunk = StackerDBChunkData::new(i, 0, message.serialize_to_vec()); - chunk.sign(&miner_sk).expect("Failed to sign message chunk"); - debug!("Produced a signature: {:?}", chunk.sig); - let result = session.put_chunk(&chunk).expect("Failed to put chunk"); - debug!("Test Put Chunk ACK: {result:?}"); - results.push(result); - } + // Submit the block proposal to the miner's slot + let mut chunk = StackerDBChunkData::new(0, 1, message.serialize_to_vec()); + chunk.sign(&miner_sk).expect("Failed to sign message chunk"); + debug!("Produced a signature: {:?}", chunk.sig); + let result = session.put_chunk(&chunk).expect("Failed to put chunk"); + debug!("Test Put Chunk ACK: {result:?}"); assert!( - results.iter().any(|result| result.accepted), + result.accepted, "Failed to submit block proposal to signers" ); From 4850ee26d959e9d5849fb8b206e989d28690e195 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 16 May 2024 11:42:38 -0700 Subject: [PATCH 0032/1400] Add a block rejection test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 28 ++++++++++++---------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 44b550bde1..799bb08f6e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -49,6 +49,7 @@ impl SignerTest { self.run_until_epoch_3_boundary(); } } + #[test] #[ignore] /// Test that a signer can respond to an invalid block proposal @@ -80,6 +81,18 @@ fn block_proposal_rejection() { signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); + let reward_cycle = signer_test.get_current_reward_cycle(); + + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + // Wait for the signers to be ready for the proposal + std::thread::sleep(Duration::from_secs(5)); + info!("------------------------- Send Block Proposal To Signers -------------------------"); let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = StackerDBSession::new( @@ -95,20 +108,17 @@ fn block_proposal_rejection() { .running_nodes .btc_regtest_controller .get_headers_height(); - let reward_cycle = signer_test.get_current_reward_cycle(); - let next_reward_cycle = reward_cycle + 1; let message = SignerMessage::BlockProposal(BlockProposal { block, burn_height, - reward_cycle: next_reward_cycle, + reward_cycle, }); - let mut miner_sk = signer_test + let miner_sk = signer_test .running_nodes .conf .miner .mining_key .expect("No mining key"); - miner_sk.set_compress_public(true); // Submit the block proposal to the miner's slot let mut chunk = StackerDBChunkData::new(0, 1, message.serialize_to_vec()); @@ -127,17 +137,11 @@ fn block_proposal_rejection() { signer_test.wait_for_validate_reject_response(short_timeout); assert_eq!(proposed_signer_signature_hash, block_signer_signature_hash); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(next_reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, StacksPrivateKey::new(), // We are just reading so don't care what the key is false, - next_reward_cycle, + reward_cycle, SignerSlotID(0), // We are just reading so again, don't care about index. ); From 8ac11939512a3da8d5ca8a332409d3375ed7f909 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sun, 12 May 2024 20:02:32 -0700 Subject: [PATCH 0033/1400] wip: modify nakamoto block header to use `Vec` --- stackslib/src/chainstate/nakamoto/mod.rs | 100 ++++++++++++++---- .../src/chainstate/nakamoto/test_signers.rs | 29 ++--- .../src/chainstate/nakamoto/tests/mod.rs | 20 ++-- .../download/nakamoto/tenure_downloader.rs | 70 ++++++------ .../nakamoto/tenure_downloader_unconfirmed.rs | 63 ++++++----- stackslib/src/net/relay.rs | 49 ++++++--- stackslib/src/net/tests/download/nakamoto.rs | 6 +- testnet/stacks-node/src/event_dispatcher.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 62 ++++++++++- .../src/nakamoto_node/sign_coordinator.rs | 4 +- .../src/tests/nakamoto_integrations.rs | 23 ++-- testnet/stacks-node/src/tests/signer/v1.rs | 4 +- 12 files changed, 294 insertions(+), 138 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 17b0bed358..ed64a3f217 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -29,6 +29,7 @@ use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; +use serde_json::Value as SerdeValue; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -178,8 +179,8 @@ lazy_static! { state_index_root TEXT NOT NULL, -- miner's signature over the block miner_signature TEXT NOT NULL, - -- signers' signature over the block - signer_signature TEXT NOT NULL, + -- signers' signatures over the block + signer_signature BLOB NOT NULL, -- bitvec capturing stacker participation in signature signer_bitvec TEXT NOT NULL, -- The following fields are not part of either the StacksHeaderInfo struct @@ -305,8 +306,10 @@ pub struct NakamotoBlockHeader { pub state_index_root: TrieHash, /// Recoverable ECDSA signature from the tenure's miner. pub miner_signature: MessageSignature, - /// Schnorr signature over the block header from the signer set active during the tenure. - pub signer_signature: ThresholdSignature, + /// The set of recoverable ECDSA signatures over + /// the block header from the signer set active during the tenure. + /// (ordered by reward set order) + pub signer_signature: Vec, /// A bitvec which represents the signers that participated in this block signature. /// The maximum number of entries in the bitvec is 4000. pub signer_bitvec: BitVec<4000>, @@ -325,9 +328,19 @@ impl FromRow for NakamotoBlockHeader { let parent_block_id = row.get("parent_block_id")?; let tx_merkle_root = row.get("tx_merkle_root")?; let state_index_root = row.get("state_index_root")?; - let signer_signature = row.get("signer_signature")?; let miner_signature = row.get("miner_signature")?; let signer_bitvec = row.get("signer_bitvec")?; + let signer_signature: SerdeValue = row.get_unwrap("signer_signature"); + let signer_signature = signer_signature + .as_array() + .map(|values| { + values + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, serde_json::Error>>() + }) + .ok_or_else(|| DBError::Corruption)??; Ok(NakamotoBlockHeader { version, @@ -490,10 +503,34 @@ impl NakamotoBlockHeader { } /// Verify the block header against an aggregate public key - pub fn verify_signer(&self, signer_aggregate: &Point) -> bool { - let schnorr_signature = &self.signer_signature.0; + pub fn verify_threshold_signer( + &self, + signer_aggregate: &Point, + signature: &ThresholdSignature, + ) -> bool { let message = self.signer_signature_hash().0; - schnorr_signature.verify(signer_aggregate, &message) + signature.verify(signer_aggregate, &message) + } + + /// Verify the block header against the list of signer signatures + /// + /// TODO: ingest the list of signer pubkeys + /// + /// TODO: validate against: + /// - Any invalid signatures + /// - Any duplicate signatures + /// - At least the minimum number of signatures + pub fn verify_signer_signatures(&self, _reward_set: &RewardSet) -> Result<(), ChainstateError> { + // TODO: verify each signature in the block + let _sig_hash = self.signer_signature_hash(); + + let _signatures = self + .signer_signature + .iter() + .map(|sig| sig.clone()) + .collect::>(); + + return Ok(()); } /// Make an "empty" header whose block data needs to be filled in. @@ -514,7 +551,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), signer_bitvec: BitVec::ones(bitvec_len) .expect("BUG: bitvec of length-1 failed to construct"), } @@ -531,7 +568,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -547,7 +584,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -1690,13 +1727,16 @@ impl NakamotoChainState { /// Does nothing if: /// * we already have the block /// Returns true if we stored the block; false if not. + /// + /// TODO: ingest the list of signer keys (instead of aggregate key) pub fn accept_block( config: &ChainstateConfig, block: NakamotoBlock, db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - aggregate_public_key: &Point, + _aggregate_public_key: &Point, + reward_set: RewardSet, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block @@ -1743,17 +1783,28 @@ impl NakamotoChainState { return Ok(false); }; - let schnorr_signature = &block.header.signer_signature.0; - if !db_handle.expects_signer_signature( - &block.header.consensus_hash, - schnorr_signature, - &block.header.signer_signature_hash().0, - aggregate_public_key, - )? { - let msg = format!( - "Received block, but the signer signature does not match the active stacking cycle" + // TODO: epoch gate to verify aggregate signature + // let schnorr_signature = &block.header.signer_signature.0; + // if !db_handle.expects_signer_signature( + // &block.header.consensus_hash, + // schnorr_signature, + // &block.header.signer_signature_hash().0, + // aggregate_public_key, + // )? { + // let msg = format!( + // "Received block, but the signer signature does not match the active stacking cycle" + // ); + // warn!("{}", msg; "aggregate_key" => %aggregate_public_key); + // return Err(ChainstateError::InvalidStacksBlock(msg)); + // } + + // TODO: epoch gate to verify signatures vec + if let Err(e) = block.header.verify_signer_signatures(&reward_set) { + warn!("Received block, but the signer signatures are invalid"; + "block_id" => %block.block_id(), + "error" => ?e ); - warn!("{}", msg; "aggregate_key" => %aggregate_public_key); + let msg = format!("Received block, but the signer signatures are invalid"); return Err(ChainstateError::InvalidStacksBlock(msg)); } @@ -2236,6 +2287,9 @@ impl NakamotoChainState { let vrf_proof_bytes = vrf_proof.map(|proof| proof.to_hex()); + let signer_signature = serde_json::to_string(&header.signer_signature) + .expect("Unable to serialize signer signatures"); + let args: &[&dyn ToSql] = &[ &u64_to_sql(*stacks_block_height)?, &index_root, @@ -2249,7 +2303,7 @@ impl NakamotoChainState { &u64_to_sql(header.chain_length)?, &u64_to_sql(header.burn_spent)?, &header.miner_signature, - &header.signer_signature, + &signer_signature, &header.tx_merkle_root, &header.state_index_root, &block_hash, diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 30a1ba8120..7179664fac 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -19,6 +19,7 @@ use std::collections::{HashSet, VecDeque}; use std::path::{Path, PathBuf}; use std::{fs, io}; +use clarity::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; @@ -77,6 +78,8 @@ pub struct TestSigners { pub party_key_ids: Vec>, /// The cycle for which the signers are valid pub cycle: u64, + /// The signer's private keys + pub signer_keys: Vec, } impl Default for TestSigners { @@ -104,6 +107,11 @@ impl Default for TestSigners { }) .collect(); + let mut signer_keys = Vec::::new(); + for _ in 0..num_keys { + signer_keys.push(Secp256k1PrivateKey::default()); + } + // Generate an aggregate public key let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { Ok(poly_commitments) => poly_commitments, @@ -124,29 +132,24 @@ impl Default for TestSigners { threshold, party_key_ids, cycle: 0, + signer_keys, } } } impl TestSigners { + // TODO: sign using vec of signatures pub fn sign_nakamoto_block(&mut self, block: &mut NakamotoBlock, cycle: u64) { // Update the aggregate public key if the cycle has changed if self.cycle != cycle { self.generate_aggregate_key(cycle); } - - let mut rng = rand_core::OsRng; let msg = block.header.signer_signature_hash().0; - let (nonces, sig_shares, key_ids) = - wsts::v2::test_helpers::sign(msg.as_slice(), &mut self.signer_parties, &mut rng); - - let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); - sig_aggregator - .init(&self.poly_commitments) - .expect("aggregator init failed"); - let signature = sig_aggregator - .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) - .expect("aggregator sig failed"); + let signer_signature = self + .signer_keys + .iter() + .map(|key| key.sign(&msg).unwrap()) + .collect::>(); test_debug!( "Signed Nakamoto block {} with {} (rc {})", @@ -154,7 +157,7 @@ impl TestSigners { &self.aggregate_public_key, cycle ); - block.header.signer_signature = ThresholdSignature(signature); + block.header.signer_signature = signer_signature; } // Generate and assign a new aggregate public key diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index f8d048aaf1..533c339115 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -154,7 +154,7 @@ fn codec_nakamoto_header() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(8).unwrap(), }; @@ -204,7 +204,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -761,7 +761,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -805,7 +805,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root_2, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -844,7 +844,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root_3, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -1519,7 +1519,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header.sign_miner(&private_key).unwrap(); @@ -1538,7 +1538,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root_bad_ch, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_ch.sign_miner(&private_key).unwrap(); @@ -1557,7 +1557,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root_bad_miner_sig, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_miner_sig @@ -1711,7 +1711,7 @@ pub fn test_get_highest_nakamoto_tenure() { tx_merkle_root: Sha512Trunc256Sum([0x00; 32]), state_index_root: TrieHash([0x00; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; let tenure_change = TenureChangePayload { @@ -2012,7 +2012,7 @@ fn test_make_miners_stackerdb_config() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; let block = NakamotoBlock { diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index c5ea7ba345..340fa717fd 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -243,18 +243,19 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !tenure_start_block - .header - .verify_signer(&self.start_aggregate_public_key) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "start_aggregate_public_key" => %self.start_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !tenure_start_block + // .header + // .verify_threshold_signer(&self.start_aggregate_public_key) + // { + // // signature verification failed + // warn!("Invalid tenure-start block: bad signer signature"; + // "tenure_id" => %self.tenure_id_consensus_hash, + // "block.header.block_id" => %tenure_start_block.header.block_id(), + // "start_aggregate_public_key" => %self.start_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } debug!( "Accepted tenure-start block for tenure {} block={}", @@ -369,18 +370,19 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !tenure_end_block - .header - .verify_signer(&self.end_aggregate_public_key) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "end_aggregate_public_key" => %self.end_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !tenure_end_block + // .header + // .verify_threshold_signer(&self.end_aggregate_public_key) + // { + // // bad signature + // warn!("Invalid tenure-end block: bad signer signature"; + // "tenure_id" => %self.tenure_id_consensus_hash, + // "block.header.block_id" => %tenure_end_block.header.block_id(), + // "end_aggregate_public_key" => %self.end_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } // extract the needful -- need the tenure-change payload (which proves that the tenure-end // block is the tenure-start block for the next tenure) and the parent block ID (which is @@ -470,14 +472,18 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - if !block.header.verify_signer(&self.start_aggregate_public_key) { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "start_aggregate_public_key" => %self.start_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !block + // .header + // .verify_threshold_signer(&self.start_aggregate_public_key) + // { + // warn!("Invalid block: bad signer signature"; + // "tenure_id" => %self.tenure_id_consensus_hash, + // "block.header.block_id" => %block.header.block_id(), + // "start_aggregate_public_key" => %self.start_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } expected_block_id = &block.header.parent_block_id; count += 1; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 4c48a5762f..7a22b4ef2b 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -369,23 +369,26 @@ impl NakamotoUnconfirmedTenureDownloader { let Some(tenure_tip) = self.tenure_tip.as_ref() else { return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { - return Err(NetError::InvalidState); - }; + + // TODO: epoch-gated loading of aggregate key + // let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() + // else { + // return Err(NetError::InvalidState); + // }; // stacker signature has to match the current aggregate public key - if !unconfirmed_tenure_start_block - .header - .verify_signer(unconfirmed_aggregate_public_key) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !unconfirmed_tenure_start_block + // .header + // .verify_threshold_signer(unconfirmed_aggregate_public_key) + // { + // warn!("Invalid tenure-start block: bad signer signature"; + // "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + // "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + // "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } // block has to match the expected hash if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { @@ -433,10 +436,12 @@ impl NakamotoUnconfirmedTenureDownloader { let Some(tenure_tip) = self.tenure_tip.as_ref() else { return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { - return Err(NetError::InvalidState); - }; + + // TODO: epoch-gated load aggregate key + // let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() + // else { + // return Err(NetError::InvalidState); + // }; if tenure_blocks.is_empty() { // nothing to do @@ -454,14 +459,18 @@ impl NakamotoUnconfirmedTenureDownloader { "block_id" => %block.header.block_id()); return Err(NetError::InvalidMessage); } - if !block.header.verify_signer(unconfirmed_aggregate_public_key) { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } + // TODO: epoch-gated verify threshold or vec of signatures + // if !block + // .header + // .verify_threshold_signer(unconfirmed_aggregate_public_key) + // { + // warn!("Invalid block: bad signer signature"; + // "tenure_id" => %tenure_tip.consensus_hash, + // "block.header.block_id" => %block.header.block_id(), + // "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, + // "state" => %self.state); + // return Err(NetError::InvalidMessage); + // } // we may or may not need the tenure-start block for the unconfirmed tenure. But if we // do, make sure it's valid, and it's the last block we receive. diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7236ef76e4..a4506e67e1 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -33,7 +33,9 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleConn}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandle, SortitionHandleConn, +}; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::BlockEventDispatcher; @@ -721,17 +723,38 @@ impl Relayer { ); let config = chainstate.config(); - let Ok(aggregate_public_key) = - NakamotoChainState::get_aggregate_public_key(chainstate, &sortdb, sort_handle, &block) - else { - warn!("Failed to get aggregate public key. Will not store or relay"; - "stacks_block_hash" => %block.header.block_hash(), - "consensus_hash" => %block.header.consensus_hash, - "burn_height" => block.header.chain_length, - "sortition_height" => block_sn.block_height, - ); - return Ok(false); + + // TODO: epoch gate to verify with aggregate key + // let Ok(aggregate_public_key) = + // NakamotoChainState::get_aggregate_public_key(chainstate, &sortdb, sort_handle, &block) + // else { + // warn!("Failed to get aggregate public key. Will not store or relay"; + // "stacks_block_hash" => %block.header.block_hash(), + // "consensus_hash" => %block.header.consensus_hash, + // "burn_height" => block.header.chain_length, + // "sortition_height" => block_sn.block_height, + // ); + // return Ok(false); + // }; + + // TODO: epoch gate to use signatures vec + let tip = sort_handle.tip(); + + let reward_info = match sortdb.get_preprocessed_reward_set_of(&tip) { + Ok(Some(x)) => x, + Ok(None) => { + return Err(chainstate_error::PoxNoRewardCycle); + } + Err(e) => { + return Err(chainstate_error::DBError(e)); + } }; + let reward_cycle = reward_info.reward_cycle; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(chainstate_error::NoRegisteredSigners(reward_cycle)); + }; + let (headers_conn, staging_db_tx) = chainstate.headers_conn_and_staging_tx_begin()?; let accepted = NakamotoChainState::accept_block( &config, @@ -739,7 +762,9 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - &aggregate_public_key, + // &aggregate_public_key, + &Point::new(), + reward_set, )?; staging_db_tx.commit()?; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 31c42c8afb..8528ac8f4c 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -103,7 +103,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -171,7 +171,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -192,7 +192,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index f9d4a4b4fb..0e799ceec4 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -658,7 +658,7 @@ impl EventObserver { ); as_object_mut.insert( "signer_signature".into(), - format!("0x{}", &header.signer_signature).into(), + serde_json::to_value(&header.signer_signature).unwrap_or_default(), ); } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d6edd79963..6ee63ef0fe 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -28,6 +28,7 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::stacks::boot::RewardSet; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, @@ -35,6 +36,7 @@ use stacks::chainstate::stacks::{ TransactionPayload, TransactionVersion, }; use stacks::net::stackerdb::StackerDBs; +use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -180,7 +182,7 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - let (aggregate_public_key, signers_signature) = match self.coordinate_signature( + let (reward_set, signer_signature) = match self.gather_signatures( &mut new_block, self.burn_block.block_height, &mut stackerdbs, @@ -188,13 +190,15 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!("Unrecoverable error while proposing block to signer set: {e:?}. Ending tenure."); + error!( + "Unrecoverable error while gathering signatures: {e:?}. Ending tenure." + ); return; } }; - new_block.header.signer_signature = signers_signature; - if let Err(e) = self.broadcast(new_block.clone(), &aggregate_public_key) { + new_block.header.signer_signature = signer_signature; + if let Err(e) = self.broadcast(new_block.clone(), &Point::new(), reward_set) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -233,6 +237,7 @@ impl BlockMinerThread { } } + #[allow(dead_code)] fn coordinate_signature( &mut self, new_block: &mut NakamotoBlock, @@ -330,6 +335,51 @@ impl BlockMinerThread { Ok((aggregate_public_key, signature)) } + /// Gather signatures from the signers for the block + fn gather_signatures( + &mut self, + new_block: &mut NakamotoBlock, + _burn_block_height: u64, + _stackerdbs: &mut StackerDBs, + _attempts: &mut u64, + ) -> Result<(RewardSet, Vec), NakamotoNodeError> { + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let tip = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &new_block.header.consensus_hash, + ) + .expect("FATAL: could not retrieve chain tip") + .expect("FATAL: could not retrieve chain tip"); + + let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { + Ok(Some(x)) => x, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set found. Cannot initialize miner coordinator.".into(), + )); + } + Err(e) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" + ))); + } + }; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Current reward cycle did not select a reward set. Cannot mine!".into(), + )); + }; + + // TODO: collect signatures from signers + return Ok((reward_set, vec![])); + } + fn get_stackerdb_contract_and_slots( &self, stackerdbs: &StackerDBs, @@ -443,10 +493,13 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } + /// TODO: update to utilize `signer_signature` vec instead of the aggregate + /// public key. fn broadcast( &self, block: NakamotoBlock, aggregate_public_key: &Point, + reward_set: RewardSet, ) -> Result<(), ChainstateError> { #[cfg(test)] { @@ -484,6 +537,7 @@ impl BlockMinerThread { &staging_tx, headers_conn, &aggregate_public_key, + reward_set, )?; staging_tx.commit()?; Ok(()) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 4667958911..764ae60c3c 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -413,11 +413,11 @@ impl SignCoordinator { // In test mode, short-circuit waiting for the signers if the TEST_SIGNING // channel has been created. This allows integration tests for the stacks-node // independent of the stacks-signer. - if let Some(signature) = + if let Some(_signatures) = crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() { debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(signature); + return Ok(ThresholdSignature::empty()); } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 55eb6753bf..6169ee524d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -45,7 +45,7 @@ use stacks::chainstate::stacks::boot::{ }; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; -use stacks::chainstate::stacks::{StacksTransaction, ThresholdSignature, TransactionPayload}; +use stacks::chainstate::stacks::{StacksTransaction, TransactionPayload}; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -59,6 +59,7 @@ use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; use stacks::util::hash::hex_bytes; +use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -167,8 +168,10 @@ lazy_static! { pub static TEST_SIGNING: Mutex> = Mutex::new(None); pub struct TestSigningChannel { - pub recv: Option>, - pub send: Sender, + // pub recv: Option>, + pub recv: Option>>, + // pub send: Sender, + pub send: Sender>, } impl TestSigningChannel { @@ -177,14 +180,16 @@ impl TestSigningChannel { /// Returns None if the singleton isn't instantiated and the miner should coordinate /// a real signer set signature. /// Panics if the blind-signer times out. - pub fn get_signature() -> Option { + /// + /// TODO: update to use signatures vec + pub fn get_signature() -> Option> { let mut signer = TEST_SIGNING.lock().unwrap(); let Some(sign_channels) = signer.as_mut() else { return None; }; let recv = sign_channels.recv.take().unwrap(); drop(signer); // drop signer so we don't hold the lock while receiving. - let signature = recv.recv_timeout(Duration::from_secs(30)).unwrap(); + let signatures = recv.recv_timeout(Duration::from_secs(30)).unwrap(); let overwritten = TEST_SIGNING .lock() .unwrap() @@ -193,12 +198,12 @@ impl TestSigningChannel { .recv .replace(recv); assert!(overwritten.is_none()); - Some(signature) + Some(signatures) } /// Setup the TestSigningChannel as a singleton using TEST_SIGNING, /// returning an owned Sender to the channel. - pub fn instantiate() -> Sender { + pub fn instantiate() -> Sender> { let (send, recv) = channel(); let existed = TEST_SIGNING.lock().unwrap().replace(Self { recv: Some(recv), @@ -335,7 +340,7 @@ pub fn read_and_sign_block_proposal( conf: &Config, signers: &TestSigners, signed_blocks: &HashSet, - channel: &Sender, + channel: &Sender>, ) -> Result { let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -2216,7 +2221,7 @@ fn miner_writes_proposed_block_to_stackerdb() { let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); - proposed_zero_block.header.signer_signature = ThresholdSignature::empty(); + proposed_zero_block.header.signer_signature = Vec::::new(); let proposed_zero_block_hash = format!("0x{}", proposed_zero_block.header.block_hash()); coord_channel diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 83a03fee00..b8d20ea76d 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -529,7 +529,7 @@ fn sign_request_rejected() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; let mut block1 = NakamotoBlock { @@ -556,7 +556,7 @@ fn sign_request_rejected() { tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), + signer_signature: Vec::::new(), signer_bitvec: BitVec::zeros(1).unwrap(), }; let mut block2 = NakamotoBlock { From fd0407bf94ace42529caa29f69b015ac738250ef Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 13 May 2024 15:31:56 -0700 Subject: [PATCH 0034/1400] feat: verify signatures in nakamoto block header --- stackslib/src/chainstate/nakamoto/mod.rs | 84 ++++- .../src/chainstate/nakamoto/tests/mod.rs | 304 +++++++++++++++++- 2 files changed, 371 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ed64a3f217..8752118118 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -19,6 +19,8 @@ use std::fs; use std::ops::{Deref, DerefMut, Range}; use std::path::PathBuf; +use clarity::types::PublicKey; +use clarity::util::secp256k1::{secp256k1_recover, Secp256k1PublicKey}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::database::{BurnStateDB, ClarityDatabase}; @@ -58,8 +60,9 @@ use super::burn::db::sortdb::{ }; use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp, VoteForAggregateKeyOp}; use super::stacks::boot::{ - PoxVersions, RawRewardSetEntry, RewardSet, RewardSetData, BOOT_TEST_POX_4_AGG_KEY_CONTRACT, - BOOT_TEST_POX_4_AGG_KEY_FNAME, SIGNERS_MAX_LIST_SIZE, SIGNERS_NAME, SIGNERS_PK_LEN, + NakamotoSignerEntry, PoxVersions, RawRewardSetEntry, RewardSet, RewardSetData, + BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, SIGNERS_MAX_LIST_SIZE, + SIGNERS_NAME, SIGNERS_PK_LEN, }; use super::stacks::db::accounts::MinerReward; use super::stacks::db::{ @@ -514,21 +517,72 @@ impl NakamotoBlockHeader { /// Verify the block header against the list of signer signatures /// - /// TODO: ingest the list of signer pubkeys - /// - /// TODO: validate against: - /// - Any invalid signatures + /// Validate against: + /// - Any invalid signatures (eg not recoverable or not from a signer) /// - Any duplicate signatures - /// - At least the minimum number of signatures - pub fn verify_signer_signatures(&self, _reward_set: &RewardSet) -> Result<(), ChainstateError> { - // TODO: verify each signature in the block - let _sig_hash = self.signer_signature_hash(); + /// - At least the minimum number of signatures (based on total signer weight + /// and a 70% threshold) + /// - Order of signatures is maintained vs signer set + pub fn verify_signer_signatures(&self, reward_set: &RewardSet) -> Result<(), ChainstateError> { + let message = self.signer_signature_hash(); + let Some(signers) = &reward_set.signers else { + return Err(ChainstateError::InvalidStacksBlock( + "No signers in the reward set".to_string(), + )); + }; - let _signatures = self - .signer_signature + let mut total_weight_signed: u32 = 0; + // `last_index` is used to prevent out-of-order signatures + let mut last_index = None; + + let total_weight = signers.iter().map(|s| s.weight).sum::(); + + // HashMap of + let signers_by_pk = signers .iter() - .map(|sig| sig.clone()) - .collect::>(); + .enumerate() + .map(|(i, signer)| (signer.signing_key, (signer.clone(), i))) + .collect::>(); + + for signature in &self.signer_signature { + let public_key = Secp256k1PublicKey::recover_to_pubkey(message.bits(), signature) + .map_err(|_| { + ChainstateError::InvalidStacksBlock(format!( + "Unable to recover public key from signature {}", + signature.to_hex() + )) + })?; + + let (signer, signer_index) = signers_by_pk + .get(public_key.to_bytes().as_slice()) + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock(format!( + "Public key {} not found in the reward set", + public_key.to_hex() + )) + })?; + + // Enforce order of signatures + match last_index { + Some(index) if index >= *signer_index => { + return Err(ChainstateError::InvalidStacksBlock( + "Signatures are out of order".to_string(), + )); + } + _ => last_index = Some(*signer_index), + } + + total_weight_signed += signer.weight; + } + + // Calculate 70% of total weight as the threshold + let threshold = (total_weight as f64 * 7_f64 / 10_f64).ceil() as u32; + + if total_weight_signed < threshold { + return Err(ChainstateError::InvalidStacksBlock( + "Not enough signatures".to_string(), + )); + } return Ok(()); } @@ -1727,8 +1781,6 @@ impl NakamotoChainState { /// Does nothing if: /// * we already have the block /// Returns true if we stored the block; false if not. - /// - /// TODO: ingest the list of signer keys (instead of aggregate key) pub fn accept_block( config: &ChainstateConfig, block: NakamotoBlock, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 533c339115..2aa9a6a19a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -19,6 +19,7 @@ use std::collections::HashMap; use std::fs; use clarity::types::chainstate::{PoxId, SortitionId, StacksBlockId}; +use clarity::util::secp256k1::Secp256k1PrivateKey; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; @@ -70,7 +71,7 @@ use crate::chainstate::nakamoto::{ FIRST_STACKS_BLOCK_ID, }; use crate::chainstate::stacks::boot::{ - MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use crate::chainstate::stacks::db::{ ChainStateBootData, ChainstateAccountBalance, ChainstateAccountLockup, ChainstateBNSName, @@ -2839,3 +2840,304 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { assert_eq!(filtered_txs.len(), 1); assert!(filtered_txs.contains(&txs.first().expect("failed to get first tx"))); } + +#[cfg(test)] +pub mod nakamoto_block_signatures { + use super::*; + + /// Helper function make a reward set with (PrivateKey, weight) tuples + fn make_reward_set(signers: Vec<(Secp256k1PrivateKey, u32)>) -> RewardSet { + let mut reward_set = RewardSet::empty(); + reward_set.signers = Some( + signers + .iter() + .map(|(s, w)| { + let mut signing_key = [0u8; 33]; + signing_key.copy_from_slice( + &Secp256k1PublicKey::from_private(s) + .to_bytes_compressed() + .as_slice(), + ); + NakamotoSignerEntry { + signing_key, + stacked_amt: 100_u128, + weight: *w, + } + }) + .collect::>(), + ); + reward_set + } + + #[test] + /// Base success case - 3 signers of equal weight, all signing the block + pub fn test_nakamoto_block_verify_signatures() { + let signers = vec![ + Secp256k1PrivateKey::default(), + Secp256k1PrivateKey::default(), + Secp256k1PrivateKey::default(), + ]; + + let reward_set = make_reward_set(signers.iter().map(|s| (s.clone(), 100)).collect()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block sighash for each signer + + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .map(|s| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + // assert!(&header.verify_signer_signatures(&reward_set).is_ok()); + } + + #[test] + /// Fully signed block, but not in order + fn test_out_of_order_signer_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block for each signer, but in reverse + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .rev() + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected out of order signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("out of order")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test with 3 equal signers, and only two sign + fn test_insufficient_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with just the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected insufficient signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Not enough signatures")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test with 4 signers, but one has 75% weight. Only the whale signs + // and the block is valid + fn test_single_signature_threshold() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 75), + (Secp256k1PrivateKey::default(), 10), + (Secp256k1PrivateKey::default(), 5), + (Secp256k1PrivateKey::default(), 10), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with just the whale + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(1) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + } + + #[test] + // Test with a signature that didn't come from the signer set + fn test_invalid_signer() { + let signers = vec![(Secp256k1PrivateKey::default(), 100)]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + // Sign with all signers + let mut signer_signature = signers + .iter() + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + let invalid_signature = Secp256k1PrivateKey::default() + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(invalid_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid signature to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("not found in the reward set")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + fn test_duplicate_signatures() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + // First, sign with the first 2 signers + let mut signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // Sign again with the first signer + let duplicate_signature = signers[0] + .0 + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(duplicate_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected duplicate signature to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Signatures are out of order")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test where a signature used a different message + fn test_signature_invalid_message() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + let mut signer_signature = signers + .iter() + .take(3) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // With the 4th signer, use a junk message + let message = [0u8; 32]; + + let bad_signature = signers[3] + .0 + .sign(&message) + .expect("Failed to sign block sighash"); + + signer_signature.push(bad_signature); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid message to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => {} + _ => panic!("Expected InvalidStacksBlock error"), + } + } + + #[test] + // Test where a signature is not recoverable + fn test_unrecoverable_signature() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + (Secp256k1PrivateKey::default(), 100), + ]; + + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + let message = header.signer_signature_hash().0; + + let mut signer_signature = signers + .iter() + .take(3) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + // Now append an unrecoverable signature + signer_signature.push(MessageSignature::empty()); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected invalid message to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + if !msg.contains("Unable to recover public key") { + panic!("Unexpected error msg: {}", msg); + } + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } +} From 80e6853a3164b9e9ca26c2b303b508e61016d424 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 15 May 2024 12:12:54 -0700 Subject: [PATCH 0035/1400] feat: refactor test_signers for new signer_signature type --- .../chainstate/nakamoto/coordinator/tests.rs | 18 +- stackslib/src/chainstate/nakamoto/mod.rs | 23 ++- .../src/chainstate/nakamoto/test_signers.rs | 174 +++++++++++++++++- .../src/chainstate/nakamoto/tests/mod.rs | 43 +++-- .../src/chainstate/nakamoto/tests/node.rs | 24 ++- .../src/chainstate/stacks/boot/pox_4_tests.rs | 4 +- .../chainstate/stacks/boot/signers_tests.rs | 6 +- .../stacks/boot/signers_voting_tests.rs | 39 ++-- stackslib/src/net/relay.rs | 3 +- stackslib/src/net/tests/download/nakamoto.rs | 6 +- stackslib/src/net/tests/mod.rs | 4 +- testnet/stacks-node/src/event_dispatcher.rs | 72 +++++++- .../src/tests/nakamoto_integrations.rs | 59 +++--- testnet/stacks-node/src/tests/signer/v1.rs | 5 +- 14 files changed, 361 insertions(+), 119 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0f3abe5c29..14ba87292f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -426,8 +426,7 @@ fn replay_reward_cycle( /// Mine a single Nakamoto tenure with a single Nakamoto block #[test] fn test_simple_nakamoto_coordinator_bootup() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![], @@ -491,8 +490,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -617,8 +615,7 @@ fn test_nakamoto_chainstate_getters() { &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1126,8 +1123,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1527,8 +1523,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -1865,8 +1860,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 8752118118..79e8cc81ee 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -336,14 +336,11 @@ impl FromRow for NakamotoBlockHeader { let signer_signature: SerdeValue = row.get_unwrap("signer_signature"); let signer_signature = signer_signature .as_array() - .map(|values| { - values - .iter() - .cloned() - .map(serde_json::from_value::) - .collect::, serde_json::Error>>() - }) - .ok_or_else(|| DBError::Corruption)??; + .ok_or(DBError::Corruption)? + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, _>>()?; Ok(NakamotoBlockHeader { version, @@ -1852,9 +1849,17 @@ impl NakamotoChainState { // TODO: epoch gate to verify signatures vec if let Err(e) = block.header.verify_signer_signatures(&reward_set) { + let reward_set_keys = reward_set + .clone() + .signers + .unwrap() + .iter() + .map(|s| to_hex(&s.signing_key)) + .collect::>(); warn!("Received block, but the signer signatures are invalid"; "block_id" => %block.block_id(), - "error" => ?e + "error" => ?e, + "signer_keys" => ?reward_set_keys ); let msg = format!("Received block, but the signer signatures are invalid"); return Err(ChainstateError::InvalidStacksBlock(msg)); diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 7179664fac..f5c61b2f7d 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -19,7 +19,8 @@ use std::collections::{HashSet, VecDeque}; use std::path::{Path, PathBuf}; use std::{fs, io}; -use clarity::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; +use clarity::util::hash::MerkleHashFunc; +use clarity::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; @@ -36,6 +37,7 @@ use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; use wsts::curve::point::Point; use wsts::traits::Aggregator; +use self::boot::RewardSet; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; @@ -138,28 +140,180 @@ impl Default for TestSigners { } impl TestSigners { - // TODO: sign using vec of signatures + /// Generate TestSigners using a list of signer keys + pub fn new(signer_keys: Vec) -> Self { + TestSigners::default_with_signers(signer_keys) + } + + /// Internal function to generate aggregate key information + fn default_with_signers(signer_keys: Vec) -> Self { + let mut rng = rand_core::OsRng::default(); + let num_keys = 10; + let threshold = 7; + let party_key_ids: Vec> = + vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; + let num_parties = party_key_ids.len().try_into().unwrap(); + + // Create the parties + let mut signer_parties: Vec = party_key_ids + .iter() + .enumerate() + .map(|(pid, pkids)| { + wsts::v2::Party::new( + pid.try_into().unwrap(), + pkids, + num_parties, + num_keys, + threshold, + &mut rng, + ) + }) + .collect(); + + // Generate an aggregate public key + let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { + Ok(poly_commitments) => poly_commitments, + Err(secret_errors) => { + panic!("Got secret errors from DKG: {:?}", secret_errors); + } + }; + let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); + sig_aggregator + .init(&poly_commitments) + .expect("aggregator init failed"); + let aggregate_public_key = sig_aggregator.poly[0]; + Self { + signer_parties, + aggregate_public_key, + poly_commitments, + num_keys, + threshold, + party_key_ids, + cycle: 0, + signer_keys, + } + } + + /// Sign a Nakamoto block using [`Self::signer_keys`]. + /// + /// N.B. If any of [`Self::signer_keys`] are not in the reward set, the resulting + /// signatures will be invalid. Use [`Self::sign_block_with_reward_set()`] to ensure + /// that any signer keys not in the reward set are not included. pub fn sign_nakamoto_block(&mut self, block: &mut NakamotoBlock, cycle: u64) { // Update the aggregate public key if the cycle has changed if self.cycle != cycle { self.generate_aggregate_key(cycle); } - let msg = block.header.signer_signature_hash().0; - let signer_signature = self - .signer_keys - .iter() - .map(|key| key.sign(&msg).unwrap()) - .collect::>(); + + // TODO: epoch gate for aggregated signatures + // let signer_signature = self.sign_block_with_aggregate_key(&block); + + let signer_signature = self.generate_block_signatures(&block); test_debug!( - "Signed Nakamoto block {} with {} (rc {})", + "Signed Nakamoto block {} with {} signatures (rc {})", block.block_id(), - &self.aggregate_public_key, + signer_signature.len(), cycle ); block.header.signer_signature = signer_signature; } + /// Sign a NakamotoBlock and maintain the order + /// of the reward set signers in the resulting signatures. + /// + /// If any of [`Self::signer_keys`] are not in the reward set, their signatures + /// will be ignored. + pub fn sign_block_with_reward_set(&self, block: &mut NakamotoBlock, reward_set: &RewardSet) { + let signatures = self.generate_block_signatures(block); + let reordered_signatures = self.reorder_signatures(signatures, reward_set); + block.header.signer_signature = reordered_signatures; + } + + /// Sign a Nakamoto block and generate a vec of signatures + fn generate_block_signatures(&self, block: &NakamotoBlock) -> Vec { + let msg = block.header.signer_signature_hash().0; + self.signer_keys + .iter() + .map(|key| key.sign(&msg).unwrap()) + .collect::>() + } + + fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { + let mut rng = rand_core::OsRng::default(); + let msg = block.header.signer_signature_hash().0; + let (nonces, sig_shares, key_ids) = + wsts::v2::test_helpers::sign(msg.as_slice(), &mut self.signer_parties, &mut rng); + + let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); + sig_aggregator + .init(&self.poly_commitments) + .expect("aggregator init failed"); + let signature = sig_aggregator + .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) + .expect("aggregator sig failed"); + ThresholdSignature(signature) + } + + /// Reorder a list of signatures to match the order of the reward set. + pub fn reorder_signatures( + &self, + signatures: Vec, + reward_set: &RewardSet, + ) -> Vec { + let test_signer_keys = &self + .signer_keys + .iter() + .cloned() + .map(|key| Secp256k1PublicKey::from_private(&key).to_bytes_compressed()) + .collect::>(); + + let reward_set_keys = &reward_set + .clone() + .signers + .unwrap() + .iter() + .map(|s| s.signing_key.to_vec()) + .collect::>(); + + let signature_keys_map = test_signer_keys + .iter() + .cloned() + .zip(signatures.iter().cloned()) + .collect::>(); + + let mut reordered_signatures = Vec::with_capacity(reward_set_keys.len()); + + let mut missing_keys = 0; + + for key in reward_set_keys { + if let Some(signature) = signature_keys_map.get(key) { + reordered_signatures.push(signature.clone()); + } else { + missing_keys += 1; + } + } + if missing_keys > 0 { + warn!( + "TestSigners: {} keys are in the reward set but not in signer_keys", + missing_keys + ); + } + + reordered_signatures + } + + // Sort [`Self::signer_keys`] by their compressed public key + pub fn sorted_signer_keys(&self) -> Vec { + let mut keys = self.signer_keys.clone(); + keys.sort_by(|a, b| { + let a = Secp256k1PublicKey::from_private(a).to_bytes_compressed(); + let b = Secp256k1PublicKey::from_private(b).to_bytes_compressed(); + a.cmp(&b) + }); + keys + } + // Generate and assign a new aggregate public key pub fn generate_aggregate_key(&mut self, cycle: u64) -> Point { // If the key is already generated for this cycle, return it diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 2aa9a6a19a..99f68fadf6 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -155,7 +155,7 @@ fn codec_nakamoto_header() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![MessageSignature::from_bytes(&[0x01; 65]).unwrap()], signer_bitvec: BitVec::zeros(8).unwrap(), }; @@ -179,12 +179,13 @@ fn codec_nakamoto_header() { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, // stacker signature (mocked) - 0x02, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, - 0x0b, 0x07, 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59, 0xf2, 0x81, 0x5b, 0x16, - 0xf8, 0x17, 0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, // signatures length + 0x00, 0x00, 0x00, 0x01, // stacker signature (mocked) + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, ]; let signer_bitvec_serialization = "00080000000100"; @@ -753,6 +754,11 @@ pub fn test_load_store_update_nakamoto_blocks() { MerkleTree::::new(&txid_vecs).root() }; + let header_signatures = vec![ + MessageSignature::from_bytes(&[0x01; 65]).unwrap(), + MessageSignature::from_bytes(&[0x02; 65]).unwrap(), + ]; + let nakamoto_header = NakamotoBlockHeader { version: 1, chain_length: 457, @@ -762,7 +768,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: header_signatures.clone(), signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -1037,13 +1043,16 @@ pub fn test_load_store_update_nakamoto_blocks() { // can load Nakamoto block, but only the Nakamoto block let nakamoto_blocks_db = chainstate.nakamoto_blocks_db(); + let first_nakamoto_block = nakamoto_blocks_db + .get_nakamoto_block(&nakamoto_header.block_id()) + .unwrap() + .unwrap() + .0; + assert_eq!(first_nakamoto_block, nakamoto_block,); + // Double check that the signatures match assert_eq!( - nakamoto_blocks_db - .get_nakamoto_block(&nakamoto_header.block_id()) - .unwrap() - .unwrap() - .0, - nakamoto_block + first_nakamoto_block.header.signer_signature, + header_signatures ); assert_eq!( nakamoto_blocks_db @@ -1675,8 +1684,7 @@ fn make_fork_run_with_arrivals( /// Tests that getting the highest nakamoto tenure works in the presence of forks #[test] pub fn test_get_highest_nakamoto_tenure() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![], @@ -1824,8 +1832,7 @@ pub fn test_get_highest_nakamoto_tenure() { /// to have slot i in subsequent sortitions. #[test] fn test_make_miners_stackerdb_config() { - let mut test_signers = TestSigners::default(); - let test_stackers = TestStacker::common_signing_set(&test_signers); + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); let mut peer = boot_nakamoto( function_name!(), vec![], diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index b2b275a0e1..354bacb7af 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -95,17 +95,21 @@ impl TestStacker { /// make a set of stackers who will share a single signing key and stack with /// `Self::DEFAULT_STACKER_AMOUNT` - pub fn common_signing_set(test_signers: &TestSigners) -> Vec { - let mut signing_key_seed = test_signers.num_keys.to_be_bytes().to_vec(); + pub fn common_signing_set() -> (TestSigners, Vec) { + let num_keys: u32 = 10; + let mut signing_key_seed = num_keys.to_be_bytes().to_vec(); signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); - (0..test_signers.num_keys) + let stackers = (0..num_keys) .map(|index| TestStacker { signer_private_key: signing_key.clone(), stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), amount: Self::DEFAULT_STACKER_AMOUNT, }) - .collect() + .collect::>(); + + let test_signers = TestSigners::new(vec![signing_key]); + (test_signers, stackers) } } @@ -574,13 +578,23 @@ impl TestStacksNode { .block_height_to_reward_cycle(sortdb.first_block_height, tenure_sn.block_height) .unwrap(); + // Get the reward set + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let reward_set = sortdb + .get_preprocessed_reward_set_of(&sort_tip) + .expect("Failed to get reward cycle info") + .expect("Failed to get reward cycle info") + .known_selected_anchor_block_owned() + .expect("Expected a reward set"); + test_debug!( "Signing Nakamoto block {} in tenure {} with key in cycle {}", nakamoto_block.block_id(), tenure_id_consensus_hash, cycle ); - signers.sign_nakamoto_block(&mut nakamoto_block, cycle); + + signers.sign_block_with_reward_set(&mut nakamoto_block, &reward_set); let block_id = nakamoto_block.block_id(); debug!( diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index be7675c700..16f7312ef4 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -4254,7 +4254,7 @@ fn stack_agg_increase() { let default_initial_balances = 1_000_000_000_000_000_000; let observer = TestEventObserver::new(); - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let mut initial_balances = vec![ (alice.principal.clone(), default_initial_balances), (bob.principal.clone(), default_initial_balances), @@ -6464,7 +6464,7 @@ pub fn pox_4_scenario_test_setup<'a>( TestPeerConfig, ) { // Setup code extracted from your original test - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let aggregate_public_key = test_signers.aggregate_public_key.clone(); let mut peer_config = TestPeerConfig::new(function_name!(), 0, 0); let private_key = peer_config.private_key.clone(); diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index a97a0c1e09..0cd1df3577 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -349,7 +349,11 @@ pub fn prepare_signers_test<'a>( stackers: &[TestStacker], observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, TestSigners, StacksBlockId, u128) { - let mut test_signers = TestSigners::default(); + let signer_keys = stackers + .iter() + .map(|s| s.signer_private_key.clone()) + .collect::>(); + let mut test_signers = TestSigners::new(signer_keys); let mut peer = boot_nakamoto( test_name, diff --git a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs index 039e96f597..9a97e88b5e 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs @@ -178,8 +178,7 @@ fn vote_for_aggregate_public_key_success() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -334,8 +333,7 @@ fn vote_for_aggregate_public_key_with_errors() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -621,8 +619,7 @@ fn vote_for_aggregate_public_key_out_of_window() { let stacker3_index = get_signer_index(&mut peer, latest_block_id, stacker3_address, cycle_id); let stacker4_index = get_signer_index(&mut peer, latest_block_id, stacker4_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -825,8 +822,7 @@ fn vote_for_aggregate_public_key_in_first_block() { let signer_index = get_signer_index(&mut peer, latest_block_id, signer_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_public_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_public_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let txs = vec![ // cast a vote for the aggregate public key @@ -911,9 +907,8 @@ fn vote_for_aggregate_public_key_in_last_block() { ); let cycle_id: u128 = current_reward_cycle; - let mut signers = TestSigners::default(); - let aggregate_public_key_1 = signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key_2 = signers.generate_aggregate_key(cycle_id as u64 + 2); + let aggregate_public_key_1 = test_signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_public_key_2 = test_signers.generate_aggregate_key(cycle_id as u64 + 2); // create vote txs for alice let signer_1_nonce = 1; // Start at 1 because the signer has already voted once @@ -1056,8 +1051,7 @@ fn vote_for_duplicate_aggregate_public_key() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1105,7 +1099,7 @@ fn vote_for_duplicate_aggregate_public_key() { let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 2); + let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); let aggregate_public_key_2 = Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1206,12 +1200,11 @@ fn vote_for_aggregate_public_key_two_rounds() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_public_key_0_point = signers.generate_aggregate_key(0); + let aggregate_public_key_0_point = test_signers.generate_aggregate_key(0); let aggregate_public_key_0 = Value::buff_from(aggregate_public_key_0_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); - let aggregate_public_key_1_point = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_public_key_1_point = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key_1 = Value::buff_from(aggregate_public_key_1_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1402,11 +1395,11 @@ fn vote_for_aggregate_public_key_two_rounds() { // In this cycle, the two rounds are in separate tenures. - let aggregate_public_key_0_point = signers.generate_aggregate_key(1); + let aggregate_public_key_0_point = test_signers.generate_aggregate_key(1); let aggregate_public_key_0 = Value::buff_from(aggregate_public_key_0_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); - let aggregate_public_key_1_point = signers.generate_aggregate_key(cycle_id as u64 + 2); + let aggregate_public_key_1_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); let aggregate_public_key_1 = Value::buff_from(aggregate_public_key_1_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1643,8 +1636,7 @@ fn vote_for_aggregate_public_key_early() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_key = signers.generate_aggregate_key(cycle_id as u64 + 1); + let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1688,7 +1680,7 @@ fn vote_for_aggregate_public_key_early() { let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); // In this tenure, signers have not been set yet, so the vote should fail - let aggregate_public_key_point = signers.generate_aggregate_key(cycle_id as u64 + 2); + let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); let aggregate_public_key = Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); @@ -1872,8 +1864,7 @@ fn vote_for_aggregate_public_key_mixed_rounds() { let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - let mut signers = TestSigners::default(); - let aggregate_public_key_point = signers.generate_aggregate_key(0); + let aggregate_public_key_point = test_signers.generate_aggregate_key(0); let aggregate_public_key = Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index a4506e67e1..33fa3ebc12 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -738,11 +738,12 @@ impl Relayer { // }; // TODO: epoch gate to use signatures vec - let tip = sort_handle.tip(); + let tip = block_sn.sortition_id; let reward_info = match sortdb.get_preprocessed_reward_set_of(&tip) { Ok(Some(x)) => x, Ok(None) => { + error!("No RewardCycleInfo found for tip {}", tip); return Err(chainstate_error::PoxNoRewardCycle); } Err(e) => { diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 8528ac8f4c..02aafdfa1b 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -90,7 +90,7 @@ impl NakamotoDownloadStateMachine { fn test_nakamoto_tenure_downloader() { let ch = ConsensusHash([0x11; 20]); let private_key = StacksPrivateKey::new(); - let mut test_signers = TestSigners::default(); + let mut test_signers = TestSigners::new(vec![]); let aggregate_public_key = test_signers.aggregate_public_key.clone(); @@ -352,7 +352,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!(tip.block_height, 51); - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let naddr = NeighborAddress { addrbytes: PeerAddress([0xff; 16]), @@ -1090,7 +1090,7 @@ fn test_make_tenure_downloaders() { assert_eq!(tip.block_height, 51); - let test_signers = TestSigners::default(); + let test_signers = TestSigners::new(vec![]); let agg_pubkeys = peer.network.aggregate_public_keys.clone(); // test load_wanted_tenures() diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 82e1b8b814..a9534b6d29 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -90,13 +90,13 @@ pub struct NakamotoBootPlan { impl NakamotoBootPlan { pub fn new(test_name: &str) -> Self { - let test_signers = TestSigners::default(); + let (test_signers, test_stackers) = TestStacker::common_signing_set(); Self { test_name: test_name.to_string(), pox_constants: TestPeerConfig::default().burnchain.pox_constants, private_key: StacksPrivateKey::from_seed(&[2]), initial_balances: vec![], - test_stackers: TestStacker::common_signing_set(&test_signers), + test_stackers, test_signers, observer: Some(TestEventObserver::new()), num_peers: 0, diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 0e799ceec4..43258fc048 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -648,10 +648,6 @@ impl EventObserver { "signer_signature_hash".into(), format!("0x{}", header.signer_signature_hash()).into(), ); - as_object_mut.insert( - "signer_signature".into(), - format!("0x{}", header.signer_signature_hash()).into(), - ); as_object_mut.insert( "miner_signature".into(), format!("0x{}", &header.miner_signature).into(), @@ -1435,8 +1431,12 @@ impl EventDispatcher { mod test { use clarity::vm::costs::ExecutionCost; use stacks::burnchains::{PoxConstants, Txid}; - use stacks::chainstate::stacks::db::StacksHeaderInfo; + use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; + use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; + use stacks::chainstate::stacks::events::StacksBlockEventData; use stacks::chainstate::stacks::StacksBlock; + use stacks::types::chainstate::BlockHeaderHash; + use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; @@ -1499,4 +1499,66 @@ mod test { expected_bitvec_str ); } + + #[test] + fn test_block_processed_event_nakamoto() { + let observer = EventObserver { + endpoint: "nowhere".to_string(), + }; + + let filtered_events = vec![]; + let mut block_header = NakamotoBlockHeader::empty(); + let signer_signature = vec![ + MessageSignature::from_bytes(&[0; 65]).unwrap(), + MessageSignature::from_bytes(&[1; 65]).unwrap(), + ]; + block_header.signer_signature = signer_signature.clone(); + let block = NakamotoBlock { + header: block_header.clone(), + txs: vec![], + }; + let mut metadata = StacksHeaderInfo::regtest_genesis(); + metadata.anchored_header = StacksBlockHeaderTypes::Nakamoto(block_header.clone()); + let receipts = vec![]; + let parent_index_hash = StacksBlockId([0; 32]); + let winner_txid = Txid([0; 32]); + let mature_rewards = serde_json::Value::Array(vec![]); + let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); + let parent_burn_block_height = 0; + let parent_burn_block_timestamp = 0; + let anchored_consumed = ExecutionCost::zero(); + let mblock_confirmed_consumed = ExecutionCost::zero(); + let pox_constants = PoxConstants::testnet_default(); + let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); + + let payload = observer.make_new_block_processed_payload( + filtered_events, + &StacksBlockEventData::from((block, BlockHeaderHash([0; 32]))), + &metadata, + &receipts, + &parent_index_hash, + &winner_txid, + &mature_rewards, + parent_burn_block_hash, + parent_burn_block_height, + parent_burn_block_timestamp, + &anchored_consumed, + &mblock_confirmed_consumed, + &pox_constants, + &None, + &Some(signer_bitvec.clone()), + ); + + let event_signer_signature = payload + .get("signer_signature") + .unwrap() + .as_array() + .expect("Expected signer_signature to be an array") + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, _>>() + .expect("Unable to deserialize array of MessageSignature"); + assert_eq!(event_signer_signature, signer_signature); + } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6169ee524d..54dfbd28ec 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -345,9 +345,13 @@ pub fn read_and_sign_block_proposal( let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let reward_cycle = burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); + + let reward_set = sortdb + .get_preprocessed_reward_set_of(&tip.sortition_id) + .expect("Failed to get reward cycle info") + .expect("Failed to get reward cycle info") + .known_selected_anchor_block_owned() + .expect("Expected a reward set"); let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); @@ -364,9 +368,7 @@ pub fn read_and_sign_block_proposal( "signer_sig_hash" => &signer_sig_hash.to_hex(), ); - signers - .clone() - .sign_nakamoto_block(&mut proposed_block, reward_cycle); + signers.sign_block_with_reward_set(&mut proposed_block, &reward_set); channel .send(proposed_block.header.signer_signature) @@ -562,7 +564,7 @@ pub fn boot_to_epoch_3( blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], signer_sks: &[StacksPrivateKey], - self_signing: Option<&TestSigners>, + self_signing: &mut Option<&mut TestSigners>, btc_regtest_controller: &mut BitcoinRegtestController, ) { assert_eq!(stacker_sks.len(), signer_sks.len()); @@ -632,6 +634,11 @@ pub fn boot_to_epoch_3( submit_tx(&http_origin, &stacking_tx); } + // Update TestSigner with `signer_sks` if self-signing + if let Some(ref mut signers) = self_signing { + signers.signer_keys = signer_sks.to_vec(); + } + let prepare_phase_start = btc_regtest_controller .get_burnchain() .pox_constants @@ -982,7 +989,6 @@ fn simple_neon_integration() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); @@ -998,6 +1004,7 @@ fn simple_neon_integration() { ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); naka_conf.add_initial_balance( PrincipalData::from(sender_signer_addr.clone()).to_string(), 100000, @@ -1038,7 +1045,7 @@ fn simple_neon_integration() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -1221,7 +1228,6 @@ fn mine_multiple_per_tenure_integration() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -1277,12 +1283,13 @@ fn mine_multiple_per_tenure_integration() { .spawn(move || run_loop.start(None, 0)) .unwrap(); wait_for_runloop(&blocks_processed); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); boot_to_epoch_3( &naka_conf, &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -1404,7 +1411,6 @@ fn correct_burn_outs() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.burnchain.pox_reward_length = Some(10); naka_conf.burnchain.pox_prepare_length = Some(3); @@ -1441,6 +1447,8 @@ fn correct_burn_outs() { 100000, ); + let signers = TestSigners::new(vec![sender_signer_sk]); + test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; naka_conf.events_observers.insert(EventObserverConfig { @@ -1769,7 +1777,6 @@ fn block_proposal_api_endpoint() { return; } - let signers = TestSigners::default(); let (mut conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); conf.connection_options.block_proposal_token = Some(password.clone()); @@ -1810,13 +1817,14 @@ fn block_proposal_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2131,7 +2139,6 @@ fn miner_writes_proposed_block_to_stackerdb() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); let sender_sk = Secp256k1PrivateKey::new(); @@ -2152,6 +2159,8 @@ fn miner_writes_proposed_block_to_stackerdb() { 100000, ); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; naka_conf.events_observers.insert(EventObserverConfig { @@ -2185,7 +2194,7 @@ fn miner_writes_proposed_block_to_stackerdb() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2267,13 +2276,14 @@ fn vote_for_aggregate_key_burn_op() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let _http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); + let mut signers = TestSigners::new(vec![signer_sk.clone()]); + naka_conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); @@ -2313,7 +2323,7 @@ fn vote_for_aggregate_key_burn_op() { &blocks_processed, &[stacker_sk], &[signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2512,13 +2522,13 @@ fn follower_bootup() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -2573,7 +2583,7 @@ fn follower_bootup() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -2768,7 +2778,6 @@ fn stack_stx_burn_op_integration_test() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.burnchain.satoshis_per_byte = 2; naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -2779,6 +2788,8 @@ fn stack_stx_burn_op_integration_test() { let signer_sk_2 = Secp256k1PrivateKey::new(); let signer_addr_2 = tests::to_addr(&signer_sk_2); + let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); + let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -2819,7 +2830,7 @@ fn stack_stx_burn_op_integration_test() { &blocks_processed, &[stacker_sk], &[signer_sk_1], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); @@ -3203,7 +3214,6 @@ fn forked_tenure_is_ignored() { return; } - let signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(10); let sender_sk = Secp256k1PrivateKey::new(); @@ -3217,6 +3227,7 @@ fn forked_tenure_is_ignored() { ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); naka_conf.add_initial_balance( PrincipalData::from(sender_signer_addr.clone()).to_string(), @@ -3259,7 +3270,7 @@ fn forked_tenure_is_ignored() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index b8d20ea76d..615df08d24 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -30,9 +30,8 @@ use stacks::chainstate::stacks::boot::{SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOT use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ - StacksPrivateKey, StacksTransaction, ThresholdSignature, TransactionAnchorMode, - TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, - TransactionVersion, + StacksPrivateKey, StacksTransaction, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; use stacks::util_lib::strings::StacksString; use stacks_common::bitvec::BitVec; From 57c0db2e07acd4a62f72e5e8236d7fdc4abaa23e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 15 May 2024 13:09:26 -0700 Subject: [PATCH 0036/1400] feat: update sign_coordinator and blind_signer for new message types --- .../stacks-node/src/nakamoto_node/miner.rs | 52 +++++++- .../src/nakamoto_node/sign_coordinator.rs | 113 ++++++++++++++++-- .../src/tests/nakamoto_integrations.rs | 38 ++++-- 3 files changed, 174 insertions(+), 29 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6ee63ef0fe..42009d5380 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -321,7 +321,7 @@ impl BlockMinerThread { })?; *attempts += 1; - let signature = coordinator.begin_sign( + let signature = coordinator.begin_sign_v1( new_block, burn_block_height, *attempts, @@ -339,10 +339,15 @@ impl BlockMinerThread { fn gather_signatures( &mut self, new_block: &mut NakamotoBlock, - _burn_block_height: u64, - _stackerdbs: &mut StackerDBs, - _attempts: &mut u64, + burn_block_height: u64, + stackerdbs: &mut StackerDBs, + attempts: &mut u64, ) -> Result<(RewardSet, Vec), NakamotoNodeError> { + let Some(miner_privkey) = self.config.miner.mining_key else { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; let sort_db = SortitionDB::open( &self.config.get_burn_db_file_path(), true, @@ -356,6 +361,15 @@ impl BlockMinerThread { .expect("FATAL: could not retrieve chain tip") .expect("FATAL: could not retrieve chain tip"); + let reward_cycle = self + .burnchain + .pox_constants + .block_height_to_reward_cycle( + self.burnchain.first_block_height, + self.burn_block.block_height, + ) + .expect("FATAL: building on a burn block that is before the first burn block"); + let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { Ok(Some(x)) => x, Ok(None) => { @@ -376,8 +390,34 @@ impl BlockMinerThread { )); }; - // TODO: collect signatures from signers - return Ok((reward_set, vec![])); + let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); + let mut coordinator = SignCoordinator::new( + &reward_set, + reward_cycle, + miner_privkey_as_scalar, + Point::new(), + &stackerdbs, + &self.config, + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + })?; + + *attempts += 1; + let signature = coordinator.begin_sign_v0( + new_block, + burn_block_height, + *attempts, + &tip, + &self.burnchain, + &sort_db, + &stackerdbs, + &self.globals.counters, + )?; + + return Ok((reward_set, signature)); } fn get_stackerdb_contract_and_slots( diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 764ae60c3c..49204166ab 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -17,7 +17,8 @@ use std::sync::mpsc::Receiver; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; -use libsigner::v1::messages::{MessageSlotID, SignerMessage}; +use libsigner::v0::messages::SignerMessage as SignerMessageV0; +use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -28,6 +29,7 @@ use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; +use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -140,10 +142,10 @@ fn get_signer_commitments( ); continue; }; - let Ok(SignerMessage::DkgResults { + let Ok(SignerMessageV1::DkgResults { aggregate_key, party_polynomials, - }) = SignerMessage::consensus_deserialize(&mut signer_data.as_slice()) + }) = SignerMessageV1::consensus_deserialize(&mut signer_data.as_slice()) else { warn!( "Failed to parse DKG result, will look for results from other signers."; @@ -314,12 +316,12 @@ impl SignCoordinator { .expect("FATAL: tried to initialize WSTS coordinator before first burn block height") } - fn send_signers_message( + fn send_signers_message( message_key: &Scalar, sortdb: &SortitionDB, tip: &BlockSnapshot, stackerdbs: &StackerDBs, - message: SignerMessage, + message: M, is_mainnet: bool, miners_session: &mut StackerDBSession, ) -> Result<(), String> { @@ -363,7 +365,7 @@ impl SignCoordinator { } #[cfg_attr(test, mutants::skip)] - pub fn begin_sign( + pub fn begin_sign_v1( &mut self, block: &NakamotoBlock, burn_block_height: u64, @@ -397,7 +399,7 @@ impl SignCoordinator { "Failed to start signing round in FIRE coordinator: {e:?}" )) })?; - Self::send_signers_message( + Self::send_signers_message::( &self.message_key, sortdb, burn_tip, @@ -483,11 +485,11 @@ impl SignCoordinator { let packets: Vec<_> = messages .into_iter() .filter_map(|msg| match msg { - SignerMessage::DkgResults { .. } - | SignerMessage::BlockResponse(_) - | SignerMessage::EncryptedSignerState(_) - | SignerMessage::Transactions(_) => None, - SignerMessage::Packet(packet) => { + SignerMessageV1::DkgResults { .. } + | SignerMessageV1::BlockResponse(_) + | SignerMessageV1::EncryptedSignerState(_) + | SignerMessageV1::Transactions(_) => None, + SignerMessageV1::Packet(packet) => { debug!("Received signers packet: {packet:?}"); if !packet.verify(&self.wsts_public_keys, &coordinator_pk) { warn!("Failed to verify StackerDB packet: {packet:?}"); @@ -548,7 +550,7 @@ impl SignCoordinator { } } for msg in outbound_msgs { - match Self::send_signers_message( + match Self::send_signers_message::( &self.message_key, sortdb, burn_tip, @@ -573,4 +575,89 @@ impl SignCoordinator { "Timed out waiting for group signature".into(), )) } + + pub fn begin_sign_v0( + &mut self, + block: &NakamotoBlock, + burn_block_height: u64, + block_attempt: u64, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + sortdb: &SortitionDB, + stackerdbs: &StackerDBs, + counters: &Counters, + ) -> Result, NakamotoNodeError> { + let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); + let sign_iter_id = block_attempt; + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + self.coordinator.current_sign_id = sign_id; + self.coordinator.current_sign_iter_id = sign_iter_id; + + let block_proposal = BlockProposal { + block: block.clone(), + burn_height: burn_block_height, + reward_cycle: reward_cycle_id, + }; + + let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); + Self::send_signers_message::( + &self.message_key, + sortdb, + burn_tip, + &stackerdbs, + block_proposal_message, + self.is_mainnet, + &mut self.miners_session, + ) + .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; + counters.bump_naka_proposed_blocks(); + #[cfg(test)] + { + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signatures); + } + } + + let Some(ref mut receiver) = self.receiver else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Failed to obtain the StackerDB event receiver".into(), + )); + }; + + let start_ts = Instant::now(); + while start_ts.elapsed() <= self.signing_round_timeout { + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { + Ok(event) => event, + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + continue; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDB event receiver disconnected".into(), + )) + } + }; + + let is_signer_event = + event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); + if !is_signer_event { + debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); + continue; + } + + // TODO: get messages from signers + } + + Err(NakamotoNodeError::SignerSignatureError( + "Timed out waiting for group signature".into(), + )) + } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 54dfbd28ec..66a0627b67 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -27,7 +27,8 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::ClarityVersion; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; -use libsigner::v1::messages::SignerMessage; +use libsigner::v0::messages::SignerMessage as SignerMessageV0; +use libsigner::v1::messages::SignerMessage as SignerMessageV1; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; @@ -318,24 +319,41 @@ pub fn get_latest_block_proposal( let proposed_block = { let miner_contract_id = boot_code_id(MINERS_NAME, false); let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); - let message: SignerMessage = miners_stackerdb + let message: SignerMessageV0 = miners_stackerdb .get_latest(miner_slot_id.start) .expect("Failed to get latest chunk from the miner slot ID") .expect("No chunk found"); - let SignerMessage::Packet(packet) = message else { - panic!("Expected a signer message packet. Got {message:?}"); + let SignerMessageV0::BlockProposal(block_proposal) = message else { + panic!("Expected a signer message block proposal. Got {message:?}"); }; - let Message::NonceRequest(nonce_request) = packet.msg else { - panic!("Expected a nonce request. Got {:?}", packet.msg); - }; - let block_proposal = - BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) - .expect("Failed to deserialize block proposal"); + // TODO: use v1 message types behind epoch gate + // get_block_proposal_msg_v1(&mut miners_stackerdb, miner_slot_id.start); block_proposal.block }; Ok(proposed_block) } +#[allow(dead_code)] +fn get_block_proposal_msg_v1( + miners_stackerdb: &mut StackerDBSession, + slot_id: u32, +) -> NakamotoBlock { + let message: SignerMessageV1 = miners_stackerdb + .get_latest(slot_id) + .expect("Failed to get latest chunk from the miner slot ID") + .expect("No chunk found"); + let SignerMessageV1::Packet(packet) = message else { + panic!("Expected a signer message packet. Got {message:?}"); + }; + let Message::NonceRequest(nonce_request) = packet.msg else { + panic!("Expected a nonce request. Got {:?}", packet.msg); + }; + let block_proposal = + BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) + .expect("Failed to deserialize block proposal"); + block_proposal.block +} + pub fn read_and_sign_block_proposal( conf: &Config, signers: &TestSigners, From da6dad1d54c69622f98e426f6737401ad664a1eb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 16 May 2024 13:28:16 -0700 Subject: [PATCH 0037/1400] fix: code review comments - use `vec![]` instead of `Vec::new()` or `Vec::with_capacity` - Add a constant for the signer threshold - Update threshold calculation - Fix some unsafe unwraps / math - Refactor how `TestSigner` signs a block using a reward set --- stackslib/src/chainstate/nakamoto/mod.rs | 105 +++++++++-------- .../src/chainstate/nakamoto/test_signers.rs | 70 ++++++------ .../src/chainstate/nakamoto/tests/mod.rs | 106 ++++++++++++++++-- stackslib/src/core/mod.rs | 3 + stackslib/src/net/relay.rs | 3 +- stackslib/src/net/tests/download/nakamoto.rs | 6 +- .../stacks-node/src/nakamoto_node/miner.rs | 6 +- .../src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v1.rs | 4 +- 9 files changed, 199 insertions(+), 106 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 79e8cc81ee..d8e67b3bd4 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -31,7 +31,6 @@ use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; -use serde_json::Value as SerdeValue; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -93,7 +92,7 @@ use crate::clarity_vm::clarity::{ ClarityInstance, ClarityTransactionConnection, Error as ClarityError, PreCommitClarityBlock, }; use crate::clarity_vm::database::SortitionDBRef; -use crate::core::BOOT_BLOCK_HASH; +use crate::core::{BOOT_BLOCK_HASH, NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD}; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; use crate::util_lib::boot; @@ -183,7 +182,7 @@ lazy_static! { -- miner's signature over the block miner_signature TEXT NOT NULL, -- signers' signatures over the block - signer_signature BLOB NOT NULL, + signer_signature TEXT NOT NULL, -- bitvec capturing stacker participation in signature signer_bitvec TEXT NOT NULL, -- The following fields are not part of either the StacksHeaderInfo struct @@ -333,14 +332,9 @@ impl FromRow for NakamotoBlockHeader { let state_index_root = row.get("state_index_root")?; let miner_signature = row.get("miner_signature")?; let signer_bitvec = row.get("signer_bitvec")?; - let signer_signature: SerdeValue = row.get_unwrap("signer_signature"); - let signer_signature = signer_signature - .as_array() - .ok_or(DBError::Corruption)? - .iter() - .cloned() - .map(serde_json::from_value::) - .collect::, _>>()?; + let signer_signature_json: String = row.get("signer_signature")?; + let signer_signature: Vec = + serde_json::from_str(&signer_signature_json).map_err(|_e| DBError::ParseError)?; Ok(NakamotoBlockHeader { version, @@ -532,16 +526,19 @@ impl NakamotoBlockHeader { // `last_index` is used to prevent out-of-order signatures let mut last_index = None; - let total_weight = signers.iter().map(|s| s.weight).sum::(); + let total_weight = signers.iter().map(|s| s.weight).fold(0, |w, acc| { + acc.checked_add(w) + .expect("FATAL: Total signer weight > u32::MAX") + }); // HashMap of let signers_by_pk = signers .iter() .enumerate() - .map(|(i, signer)| (signer.signing_key, (signer.clone(), i))) + .map(|(i, signer)| (&signer.signing_key, (signer, i))) .collect::>(); - for signature in &self.signer_signature { + for signature in self.signer_signature.iter() { let public_key = Secp256k1PublicKey::recover_to_pubkey(message.bits(), signature) .map_err(|_| { ChainstateError::InvalidStacksBlock(format!( @@ -550,40 +547,58 @@ impl NakamotoBlockHeader { )) })?; - let (signer, signer_index) = signers_by_pk - .get(public_key.to_bytes().as_slice()) - .ok_or_else(|| { - ChainstateError::InvalidStacksBlock(format!( - "Public key {} not found in the reward set", - public_key.to_hex() - )) - })?; + let mut public_key_bytes = [0u8; 33]; + public_key_bytes.copy_from_slice(&public_key.to_bytes_compressed()[..]); + + let (signer, signer_index) = signers_by_pk.get(&public_key_bytes).ok_or_else(|| { + ChainstateError::InvalidStacksBlock(format!( + "Public key {} not found in the reward set", + public_key.to_hex() + )) + })?; // Enforce order of signatures - match last_index { - Some(index) if index >= *signer_index => { + if let Some(index) = last_index.as_ref() { + if *index >= *signer_index { return Err(ChainstateError::InvalidStacksBlock( "Signatures are out of order".to_string(), )); } - _ => last_index = Some(*signer_index), + } else { + last_index = Some(*signer_index); } - total_weight_signed += signer.weight; + total_weight_signed = total_weight_signed + .checked_add(signer.weight) + .expect("FATAL: overflow while computing signer set threshold"); } - // Calculate 70% of total weight as the threshold - let threshold = (total_weight as f64 * 7_f64 / 10_f64).ceil() as u32; + let threshold = Self::compute_voting_weight_threshold(total_weight)?; if total_weight_signed < threshold { - return Err(ChainstateError::InvalidStacksBlock( - "Not enough signatures".to_string(), - )); + return Err(ChainstateError::InvalidStacksBlock(format!( + "Not enough signatures. Needed at least {} but got {}", + threshold, total_weight_signed + ))); } return Ok(()); } + pub fn compute_voting_weight_threshold(total_weight: u32) -> Result { + let ceil = if (total_weight as u64 * 7) % 10 == 0 { + 0 + } else { + 1 + }; + u32::try_from((total_weight as u64 * NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD) / 10 + ceil) + .map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Overflow when computing nakamoto block approval threshold".to_string(), + ) + }) + } + /// Make an "empty" header whose block data needs to be filled in. /// This is used by the miner code. pub fn from_parent_empty( @@ -602,7 +617,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), + signer_signature: vec![], signer_bitvec: BitVec::ones(bitvec_len) .expect("BUG: bitvec of length-1 failed to construct"), } @@ -619,7 +634,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -635,7 +650,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::with_capacity(SIGNERS_MAX_LIST_SIZE), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -1784,7 +1799,7 @@ impl NakamotoChainState { db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - _aggregate_public_key: &Point, + _aggregate_public_key: Option<&Point>, reward_set: RewardSet, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); @@ -1847,22 +1862,12 @@ impl NakamotoChainState { // return Err(ChainstateError::InvalidStacksBlock(msg)); // } - // TODO: epoch gate to verify signatures vec if let Err(e) = block.header.verify_signer_signatures(&reward_set) { - let reward_set_keys = reward_set - .clone() - .signers - .unwrap() - .iter() - .map(|s| to_hex(&s.signing_key)) - .collect::>(); warn!("Received block, but the signer signatures are invalid"; "block_id" => %block.block_id(), "error" => ?e, - "signer_keys" => ?reward_set_keys ); - let msg = format!("Received block, but the signer signatures are invalid"); - return Err(ChainstateError::InvalidStacksBlock(msg)); + return Err(e); } // if we pass all the tests, then along the way, we will have verified (in @@ -2344,8 +2349,12 @@ impl NakamotoChainState { let vrf_proof_bytes = vrf_proof.map(|proof| proof.to_hex()); - let signer_signature = serde_json::to_string(&header.signer_signature) - .expect("Unable to serialize signer signatures"); + let signer_signature = serde_json::to_string(&header.signer_signature).map_err(|_| { + ChainstateError::InvalidStacksBlock(format!( + "Failed to serialize signer signature for block {}", + block_hash + )) + })?; let args: &[&dyn ToSql] = &[ &u64_to_sql(*stacks_block_height)?, diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index f5c61b2f7d..a7e521c155 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -219,26 +219,32 @@ impl TestSigners { block.header.signer_signature = signer_signature; } - /// Sign a NakamotoBlock and maintain the order + /// Sign a NakamotoBlock and maintain the order and membership /// of the reward set signers in the resulting signatures. /// /// If any of [`Self::signer_keys`] are not in the reward set, their signatures - /// will be ignored. + /// will not be included. pub fn sign_block_with_reward_set(&self, block: &mut NakamotoBlock, reward_set: &RewardSet) { - let signatures = self.generate_block_signatures(block); - let reordered_signatures = self.reorder_signatures(signatures, reward_set); - block.header.signer_signature = reordered_signatures; + let signatures = self.generate_ordered_signatures(block, reward_set); + block.header.signer_signature = signatures; } - /// Sign a Nakamoto block and generate a vec of signatures + /// Sign a Nakamoto block and generate a vec of signatures. The signatures will + /// be ordered by the signer's public keys, but will not be checked against the + /// reward set. fn generate_block_signatures(&self, block: &NakamotoBlock) -> Vec { let msg = block.header.signer_signature_hash().0; - self.signer_keys - .iter() - .map(|key| key.sign(&msg).unwrap()) - .collect::>() + let mut keys = self.signer_keys.clone(); + keys.sort_by(|a, b| { + let a = Secp256k1PublicKey::from_private(a).to_bytes_compressed(); + let b = Secp256k1PublicKey::from_private(b).to_bytes_compressed(); + a.cmp(&b) + }); + keys.iter().map(|key| key.sign(&msg).unwrap()).collect() } + /// Sign a Nakamoto block using the aggregate key. + /// NB: this function is current unused. fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { let mut rng = rand_core::OsRng::default(); let msg = block.header.signer_signature_hash().0; @@ -255,18 +261,24 @@ impl TestSigners { ThresholdSignature(signature) } - /// Reorder a list of signatures to match the order of the reward set. - pub fn reorder_signatures( + /// Generate an list of signatures for a block. Only + /// signers in the reward set will be included. + pub fn generate_ordered_signatures( &self, - signatures: Vec, + block: &NakamotoBlock, reward_set: &RewardSet, ) -> Vec { - let test_signer_keys = &self + let msg = block.header.signer_signature_hash().0; + + let test_signers_by_pk = self .signer_keys .iter() .cloned() - .map(|key| Secp256k1PublicKey::from_private(&key).to_bytes_compressed()) - .collect::>(); + .map(|s| { + let pk = Secp256k1PublicKey::from_private(&s); + (pk.to_bytes_compressed(), s) + }) + .collect::>(); let reward_set_keys = &reward_set .clone() @@ -276,19 +288,14 @@ impl TestSigners { .map(|s| s.signing_key.to_vec()) .collect::>(); - let signature_keys_map = test_signer_keys - .iter() - .cloned() - .zip(signatures.iter().cloned()) - .collect::>(); - - let mut reordered_signatures = Vec::with_capacity(reward_set_keys.len()); + let mut signatures = Vec::with_capacity(reward_set_keys.len()); let mut missing_keys = 0; for key in reward_set_keys { - if let Some(signature) = signature_keys_map.get(key) { - reordered_signatures.push(signature.clone()); + if let Some(signer_key) = test_signers_by_pk.get(key) { + let signature = signer_key.sign(&msg).unwrap(); + signatures.push(signature); } else { missing_keys += 1; } @@ -300,18 +307,7 @@ impl TestSigners { ); } - reordered_signatures - } - - // Sort [`Self::signer_keys`] by their compressed public key - pub fn sorted_signer_keys(&self) -> Vec { - let mut keys = self.signer_keys.clone(); - keys.sort_by(|a, b| { - let a = Secp256k1PublicKey::from_private(a).to_bytes_compressed(); - let b = Secp256k1PublicKey::from_private(b).to_bytes_compressed(); - a.cmp(&b) - }); - keys + signatures } // Generate and assign a new aggregate public key diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 99f68fadf6..dd36004ff4 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -206,7 +206,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -812,7 +812,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root_2, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -851,7 +851,7 @@ pub fn test_load_store_update_nakamoto_blocks() { tx_merkle_root: nakamoto_tx_merkle_root_3, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -1529,7 +1529,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header.sign_miner(&private_key).unwrap(); @@ -1548,7 +1548,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root_bad_ch, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_ch.sign_miner(&private_key).unwrap(); @@ -1567,7 +1567,7 @@ fn test_nakamoto_block_static_verification() { tx_merkle_root: nakamoto_tx_merkle_root_bad_miner_sig, state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_miner_sig @@ -1720,7 +1720,7 @@ pub fn test_get_highest_nakamoto_tenure() { tx_merkle_root: Sha512Trunc256Sum([0x00; 32]), state_index_root: TrieHash([0x00; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; let tenure_change = TenureChangePayload { @@ -2020,7 +2020,7 @@ fn test_make_miners_stackerdb_config() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; let block = NakamotoBlock { @@ -2848,7 +2848,6 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { assert!(filtered_txs.contains(&txs.first().expect("failed to get first tx"))); } -#[cfg(test)] pub mod nakamoto_block_signatures { use super::*; @@ -2871,11 +2870,69 @@ pub mod nakamoto_block_signatures { weight: *w, } }) - .collect::>(), + .collect(), ); reward_set } + #[test] + // Test that signatures succeed with exactly 70% of the votes + pub fn test_exactly_enough_votes() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 35), + (Secp256k1PrivateKey::default(), 35), + (Secp256k1PrivateKey::default(), 30), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + header + .verify_signer_signatures(&reward_set) + .expect("Failed to verify signatures"); + } + + #[test] + /// Test that signatures fail with just under 70% of the votes + pub fn test_just_not_enough_votes() { + let signers = vec![ + (Secp256k1PrivateKey::default(), 3500), + (Secp256k1PrivateKey::default(), 3499), + (Secp256k1PrivateKey::default(), 3001), + ]; + let reward_set = make_reward_set(signers.clone()); + + let mut header = NakamotoBlockHeader::empty(); + + // Sign the block with the first two signers + let message = header.signer_signature_hash().0; + let signer_signature = signers + .iter() + .take(2) + .map(|(s, _)| s.sign(&message).expect("Failed to sign block sighash")) + .collect::>(); + + header.signer_signature = signer_signature; + + match header.verify_signer_signatures(&reward_set) { + Ok(_) => panic!("Expected insufficient signatures to fail"), + Err(ChainstateError::InvalidStacksBlock(msg)) => { + assert!(msg.contains("Not enough signatures")); + } + _ => panic!("Expected InvalidStacksBlock error"), + } + } + #[test] /// Base success case - 3 signers of equal weight, all signing the block pub fn test_nakamoto_block_verify_signatures() { @@ -3147,4 +3204,33 @@ pub mod nakamoto_block_signatures { _ => panic!("Expected InvalidStacksBlock error"), } } + + #[test] + pub fn test_compute_voting_weight_threshold() { + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(100_u32).unwrap(), + 70_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(10_u32).unwrap(), + 7_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(3000_u32).unwrap(), + 2100_u32, + ); + + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(4000_u32).unwrap(), + 2800_u32, + ); + + // Round-up check + assert_eq!( + NakamotoBlockHeader::compute_voting_weight_threshold(511_u32).unwrap(), + 358_u32, + ); + } } diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 9aeb98eefe..d47edc75ec 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -193,6 +193,9 @@ pub const POX_V3_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = pub const POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT as u32) + 1; +// The threshold % of weighted votes on a block to approve it in Nakamoto +pub const NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD: u64 = 7; + /// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet pub const AST_RULES_PRECHECK_SIZE: u64 = 752000; // on or about Aug 30 2022 diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 33fa3ebc12..7f4f1847a9 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -763,8 +763,7 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - // &aggregate_public_key, - &Point::new(), + None, reward_set, )?; staging_db_tx.commit()?; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 02aafdfa1b..1e76cd1853 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -103,7 +103,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -171,7 +171,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; @@ -192,7 +192,7 @@ fn test_nakamoto_tenure_downloader() { tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 42009d5380..3ed642c9cd 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -198,7 +198,7 @@ impl BlockMinerThread { }; new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), &Point::new(), reward_set) { + if let Err(e) = self.broadcast(new_block.clone(), None, reward_set) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -538,7 +538,7 @@ impl BlockMinerThread { fn broadcast( &self, block: NakamotoBlock, - aggregate_public_key: &Point, + aggregate_public_key: Option<&Point>, reward_set: RewardSet, ) -> Result<(), ChainstateError> { #[cfg(test)] @@ -576,7 +576,7 @@ impl BlockMinerThread { &mut sortition_handle, &staging_tx, headers_conn, - &aggregate_public_key, + aggregate_public_key, reward_set, )?; staging_tx.commit()?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 66a0627b67..11c78596f8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2248,7 +2248,7 @@ fn miner_writes_proposed_block_to_stackerdb() { let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); - proposed_zero_block.header.signer_signature = Vec::::new(); + proposed_zero_block.header.signer_signature = vec![]; let proposed_zero_block_hash = format!("0x{}", proposed_zero_block.header.block_hash()); coord_channel diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 615df08d24..f7e6674599 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -528,7 +528,7 @@ fn sign_request_rejected() { tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; let mut block1 = NakamotoBlock { @@ -555,7 +555,7 @@ fn sign_request_rejected() { tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), - signer_signature: Vec::::new(), + signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), }; let mut block2 = NakamotoBlock { From ebf59a353a01ed4343b4aea992f5da24e0105029 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 16 May 2024 14:47:49 -0700 Subject: [PATCH 0038/1400] Fix test to wait for signers to register to prevent flakiness Signed-off-by: Jacinta Ferrant --- stacks-signer/src/lib.rs | 22 ++---- stacks-signer/src/runloop.rs | 87 ++++++++++----------- stacks-signer/src/v0/signer.rs | 7 +- stacks-signer/src/v1/signer.rs | 18 ++--- stackslib/src/net/stackerdb/db.rs | 9 --- testnet/stacks-node/src/tests/signer/mod.rs | 49 ++++++++++++ testnet/stacks-node/src/tests/signer/v0.rs | 69 +++++++++++++--- testnet/stacks-node/src/tests/signer/v1.rs | 11 ++- 8 files changed, 176 insertions(+), 96 deletions(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index af0e8781a0..2c303eefd0 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -41,9 +41,9 @@ use std::sync::mpsc::{channel, Receiver, Sender}; use config::GlobalConfig; use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait}; +use runloop::SignerResult; use slog::slog_info; use stacks_common::info; -use wsts::state_machine::OperationResult; use crate::client::StacksClient; use crate::config::SignerConfig; @@ -62,7 +62,7 @@ pub trait Signer: Debug + Display { &mut self, stacks_client: &StacksClient, event: Option<&SignerEvent>, - res: Sender>, + res: Sender>, current_reward_cycle: u64, ); /// Process a command @@ -75,17 +75,11 @@ pub trait Signer: Debug + Display { } /// A wrapper around the running signer type for the signer -pub type RunningSigner = - libsigner::RunningSigner, Vec, T>; +pub type RunningSigner = libsigner::RunningSigner, Vec, T>; /// The wrapper for the runloop signer type -type RunLoopSigner = libsigner::Signer< - RunLoopCommand, - Vec, - RunLoop, - SignerEventReceiver, - T, ->; +type RunLoopSigner = + libsigner::Signer, RunLoop, SignerEventReceiver, T>; /// The spawned signer pub struct SpawnedSigner + Send, T: SignerEventTrait> { @@ -94,19 +88,19 @@ pub struct SpawnedSigner + Send, T: SignerEventTrait> { /// The command sender for interacting with the running signer pub cmd_send: Sender, /// The result receiver for interacting with the running signer - pub res_recv: Receiver>, + pub res_recv: Receiver>, /// Phantom data for the signer type _phantom: std::marker::PhantomData, } impl + Send, T: SignerEventTrait> SpawnedSigner { /// Stop the signer thread and return the final state - pub fn stop(self) -> Option> { + pub fn stop(self) -> Option> { self.running_signer.stop() } /// Wait for the signer to terminate, and get the final state. WARNING: This will hang forever if the event receiver stop signal was never sent/no error occurred. - pub fn join(self) -> Option> { + pub fn join(self) -> Option> { self.running_signer.join() } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index abc9cb8254..43bd7977ad 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -18,7 +18,6 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; use std::time::Duration; -use blockstack_lib::burnchains::PoxConstants; use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; use blockstack_lib::util_lib::boot::boot_code_id; use clarity::codec::StacksMessageCodec; @@ -34,6 +33,26 @@ use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, S use crate::config::{GlobalConfig, SignerConfig}; use crate::Signer as SignerTrait; +/// The signer result that can be sent across threads +pub enum SignerResult { + /// The signer has received a status check + StatusCheck(State), + /// The signer has completed an operation + OperationResult(OperationResult), +} + +impl From for SignerResult { + fn from(result: OperationResult) -> Self { + SignerResult::OperationResult(result) + } +} + +impl From for SignerResult { + fn from(state: State) -> Self { + SignerResult::StatusCheck(state) + } +} + /// Which signer operation to perform #[derive(PartialEq, Clone, Debug)] pub enum SignerCommand { @@ -99,16 +118,6 @@ impl RewardCycleInfo { blocks_mined / self.reward_cycle_length } - /// Check if the provided burnchain block height is in the prepare phase - pub fn is_in_prepare_phase(&self, burnchain_block_height: u64) -> bool { - PoxConstants::static_is_in_prepare_phase( - self.first_burnchain_block_height, - self.reward_cycle_length, - self.prepare_phase_block_length, - burnchain_block_height, - ) - } - /// Check if the provided burnchain block height is in the prepare phase of the next cycle pub fn is_in_next_prepare_phase(&self, burnchain_block_height: u64) -> bool { let effective_height = burnchain_block_height - self.first_burnchain_block_height; @@ -341,6 +350,14 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo info!("Received a new burnchain block height ({current_burn_block_height}) in the prepare phase of the next reward cycle ({next_reward_cycle}). Checking for signer registration..."); self.refresh_signer_config(next_reward_cycle); } + } else { + debug!("Received a new burnchain block height ({current_burn_block_height}) but not in prepare phase."; + "reward_cycle" => reward_cycle_info.reward_cycle, + "reward_cycle_length" => reward_cycle_info.reward_cycle_length, + "prepare_phase_block_length" => reward_cycle_info.prepare_phase_block_length, + "first_burnchain_block_height" => reward_cycle_info.first_burnchain_block_height, + "last_burnchain_block_height" => reward_cycle_info.last_burnchain_block_height, + ); } self.cleanup_stale_signers(current_reward_cycle); if self.stacks_signers.is_empty() { @@ -367,7 +384,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo } impl, T: StacksMessageCodec + Clone + Send + Debug> - SignerRunLoop, RunLoopCommand, T> for RunLoop + SignerRunLoop, RunLoopCommand, T> for RunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.config.event_timeout = timeout; @@ -381,8 +398,8 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> &mut self, event: Option>, cmd: Option, - res: Sender>, - ) -> Option> { + res: Sender>, + ) -> Option> { debug!( "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", self.state @@ -409,14 +426,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> .as_ref() .expect("FATAL: cannot be an initialized signer with no reward cycle info.") .reward_cycle; - if self.state == State::NoRegisteredSigners { - let next_reward_cycle = current_reward_cycle.saturating_add(1); - if let Some(event) = event { - info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); - warn!("Ignoring event: {event:?}"); - } - return None; - } for signer in self.stacks_signers.values_mut() { signer.process_event( &self.stacks_client, @@ -431,6 +440,17 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> self.commands.pop_front(), ); } + if self.state == State::NoRegisteredSigners && event.is_some() { + let next_reward_cycle = current_reward_cycle.saturating_add(1); + info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); + } + // This is the only event that we respond to from the outer signer runloop + if let Some(SignerEvent::StatusCheck) = event { + info!("Signer status check requested: {:?}.", self.state); + if let Err(e) = res.send(vec![self.state.into()]) { + error!("Failed to send status check result: {e}."); + } + } None } } @@ -500,49 +520,26 @@ mod tests { last_burnchain_block_height, }; assert!(reward_cycle_info.is_in_reward_cycle(first_burnchain_block_height)); - assert!(!reward_cycle_info.is_in_prepare_phase(first_burnchain_block_height)); - assert!(reward_cycle_info.is_in_reward_cycle(last_burnchain_block_height)); - assert!(!reward_cycle_info.is_in_prepare_phase(last_burnchain_block_height)); - assert!(!reward_cycle_info .is_in_reward_cycle(first_burnchain_block_height.wrapping_add(reward_cycle_length))); - assert!(!reward_cycle_info - .is_in_prepare_phase(!first_burnchain_block_height.wrapping_add(reward_cycle_length))); assert!(reward_cycle_info.is_in_reward_cycle( first_burnchain_block_height .wrapping_add(reward_cycle_length) .wrapping_sub(1) )); - assert!(reward_cycle_info.is_in_prepare_phase( - first_burnchain_block_height - .wrapping_add(reward_cycle_length) - .wrapping_sub(1) - )); assert!(reward_cycle_info.is_in_reward_cycle( first_burnchain_block_height.wrapping_add(reward_cycle_phase_block_length) )); - assert!(!reward_cycle_info.is_in_prepare_phase( - first_burnchain_block_height.wrapping_add(reward_cycle_phase_block_length) - )); - assert!(reward_cycle_info.is_in_reward_cycle(first_burnchain_block_height.wrapping_add(1))); - assert!( - !reward_cycle_info.is_in_prepare_phase(first_burnchain_block_height.wrapping_add(1)) - ); assert!(reward_cycle_info.is_in_reward_cycle( first_burnchain_block_height .wrapping_add(reward_cycle_phase_block_length) .wrapping_add(1) )); - assert!(reward_cycle_info.is_in_prepare_phase( - first_burnchain_block_height - .wrapping_add(reward_cycle_phase_block_length) - .wrapping_add(1) - )); } #[test] diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 57fdf8a6bc..aed67700f0 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -24,11 +24,10 @@ use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, warn}; -use wsts::state_machine::OperationResult; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; -use crate::runloop::RunLoopCommand; +use crate::runloop::{RunLoopCommand, SignerResult}; use crate::signerdb::{BlockInfo, SignerDb}; use crate::Signer as SignerTrait; @@ -79,7 +78,7 @@ impl SignerTrait for Signer { &mut self, stacks_client: &StacksClient, event: Option<&SignerEvent>, - _res: Sender>, + _res: Sender>, current_reward_cycle: u64, ) { let event_parity = match event { @@ -120,7 +119,7 @@ impl SignerTrait for Signer { } } Some(SignerEvent::StatusCheck) => { - debug!("{self}: Received a status check event.") + debug!("{self}: Received a status check event."); } Some(SignerEvent::NewBurnBlock(height)) => { debug!("{self}: Receved a new burn block event for block height {height}") diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index ee93448932..f9d9e0083a 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -55,7 +55,7 @@ use wsts::v2; use super::stackerdb_manager::StackerDBManager; use crate::client::{ClientError, SignerSlotID, StacksClient}; use crate::config::SignerConfig; -use crate::runloop::{RunLoopCommand, SignerCommand}; +use crate::runloop::{RunLoopCommand, SignerCommand, SignerResult}; use crate::signerdb::{BlockInfo, SignerDb}; use crate::v1::coordinator::CoordinatorSelector; use crate::Signer as SignerTrait; @@ -162,7 +162,7 @@ impl SignerTrait for Signer { &mut self, stacks_client: &StacksClient, event: Option<&SignerEvent>, - res: Sender>, + res: Sender>, current_reward_cycle: u64, ) { let event_parity = match event { @@ -338,7 +338,7 @@ impl Signer { pub fn read_dkg_stackerdb_messages( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { if self.state != State::Uninitialized { @@ -598,7 +598,7 @@ impl Signer { &mut self, stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, - res: Sender>, + res: Sender>, current_reward_cycle: u64, ) { let mut block_info = match block_validate_response { @@ -690,7 +690,7 @@ impl Signer { fn handle_signer_messages( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: Sender>, messages: &[SignerMessage], current_reward_cycle: u64, ) { @@ -733,7 +733,7 @@ impl Signer { fn handle_packets( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: Sender>, packets: &[Packet], current_reward_cycle: u64, ) { @@ -1398,11 +1398,11 @@ impl Signer { /// Send any operation results across the provided channel fn send_operation_results( &mut self, - res: Sender>, + res: Sender>, operation_results: Vec, ) { let nmb_results = operation_results.len(); - match res.send(operation_results) { + match res.send(operation_results.into_iter().map(|r| r.into()).collect()) { Ok(_) => { debug!("{self}: Successfully sent {nmb_results} operation result(s)") } @@ -1432,7 +1432,7 @@ impl Signer { pub fn refresh_dkg( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { // First attempt to retrieve the aggregate key from the contract. diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 3ec091d0bf..6cdebb69d9 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -303,16 +303,7 @@ impl<'a> StackerDBTx<'a> { ) -> Result<(), net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let mut total_slots_read = 0u32; - debug!( - "Reconfiguring {} slots of StackerDB: {:?}", - slots.len(), - smart_contract - ); for (principal, slot_count) in slots.iter() { - debug!( - "Reconfigure StackerDB slot: ({}, {})", - &principal, slot_count - ); total_slots_read = total_slots_read .checked_add(*slot_count) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index e5e8f54f94..d40073bcbc 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -48,6 +48,7 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; use stacks_signer::client::{SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; +use stacks_signer::runloop::{SignerResult, State}; use stacks_signer::{Signer, SpawnedSigner}; use wsts::curve::point::Point; use wsts::state_machine::PublicKeys; @@ -147,6 +148,54 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { + debug!("Waiting for Status..."); + let now = std::time::Instant::now(); + let mut states = Vec::with_capacity(self.spawned_signers.len()); + for signer in self.spawned_signers.iter() { + let old_len = states.len(); + loop { + assert!( + now.elapsed() < timeout, + "Timed out waiting for state checks" + ); + let results = signer + .res_recv + .recv_timeout(timeout) + .expect("failed to recv state results"); + for result in results { + match result { + SignerResult::OperationResult(_operation) => { + panic!("Recieved an operation result."); + } + SignerResult::StatusCheck(state) => { + states.push(state); + } + } + } + if states.len() > old_len { + break; + } + } + } + debug!("Finished waiting for state checks!"); + states + } + fn nmb_blocks_to_reward_set_calculation(&mut self) -> u64 { let prepare_phase_len = self .running_nodes diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 799bb08f6e..7d7bebcec5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -27,12 +27,14 @@ use stacks::libstackerdb::StackerDBChunkData; use stacks::types::chainstate::StacksPrivateKey; use stacks::util_lib::boot::boot_code_id; use stacks_signer::client::{SignerSlotID, StackerDB}; +use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::tests::nakamoto_integrations::boot_to_epoch_3_reward_set; +use crate::tests::neon_integrations::next_block_and_wait; use crate::BurnchainController; impl SignerTest { @@ -45,6 +47,53 @@ impl SignerTest { &self.signer_stacks_private_keys, &mut self.running_nodes.btc_regtest_controller, ); + debug!("Waiting for signer set calculation."); + let mut reward_set_calculated = false; + let short_timeout = Duration::from_secs(30); + let now = std::time::Instant::now(); + // Make sure the signer set is calculated before continuing or signers may not + // recognize that they are registered signers in the subsequent burn block event + let reward_cycle = self.get_current_reward_cycle() + 1; + while !reward_set_calculated { + let reward_set = self + .stacks_client + .get_reward_set_signers(reward_cycle) + .expect("Failed to check if reward set is calculated"); + reward_set_calculated = reward_set.is_some(); + if reward_set_calculated { + debug!("Signer set: {:?}", reward_set.unwrap()); + } + std::thread::sleep(Duration::from_secs(1)); + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for reward set calculation" + ); + } + debug!("Signer set calculated"); + + // Manually consume one more block to ensure signers refresh their state + debug!("Waiting for signers to initialize."); + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + let now = std::time::Instant::now(); + loop { + self.send_status_request(); + let states = self.wait_for_states(short_timeout); + if states + .iter() + .all(|state| state == &State::RegisteredSigners) + { + break; + } + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for signers to be registered" + ); + std::thread::sleep(Duration::from_secs(1)); + } + debug!("Singers initialized"); self.run_until_epoch_3_boundary(); } @@ -81,18 +130,6 @@ fn block_proposal_rejection() { signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); - let reward_cycle = signer_test.get_current_reward_cycle(); - - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); - - // Wait for the signers to be ready for the proposal - std::thread::sleep(Duration::from_secs(5)); - info!("------------------------- Send Block Proposal To Signers -------------------------"); let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = StackerDBSession::new( @@ -108,6 +145,7 @@ fn block_proposal_rejection() { .running_nodes .btc_regtest_controller .get_headers_height(); + let reward_cycle = signer_test.get_current_reward_cycle(); let message = SignerMessage::BlockProposal(BlockProposal { block, burn_height, @@ -145,6 +183,13 @@ fn block_proposal_rejection() { SignerSlotID(0), // We are just reading so again, don't care about index. ); + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + let messages: Vec = StackerDB::get_messages( stackerdb .get_session_mut(&MessageSlotID::BlockResponse) diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 83a03fee00..e90f5b1b4c 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -44,7 +44,7 @@ use stacks_common::types::chainstate::{ use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use stacks_signer::client::{SignerSlotID, StacksClient}; -use stacks_signer::runloop::{RunLoopCommand, SignerCommand}; +use stacks_signer::runloop::{RunLoopCommand, SignerCommand, SignerResult}; use stacks_signer::v1::coordinator::CoordinatorSelector; use stacks_signer::v1::stackerdb_manager::StackerDBManager; use stacks_signer::v1::SpawnedSigner; @@ -210,11 +210,16 @@ impl SignerTest { .expect("failed to recv dkg results"); for result in results { match result { - OperationResult::Dkg(point) => { + SignerResult::OperationResult(OperationResult::Dkg(point)) => { info!("Received aggregate_group_key {point}"); aggregate_public_key = Some(point); } - other => panic!("{}", operation_panic_message(&other)), + SignerResult::OperationResult(other) => { + panic!("{}", operation_panic_message(&other)) + } + SignerResult::StatusCheck(state) => { + panic!("Received status check result: {:?}", state); + } } } if aggregate_public_key.is_some() || dkg_now.elapsed() > timeout { From b18e6601c246f4cc687bde30646fd42b6ac7767f Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 17 May 2024 16:29:31 +0300 Subject: [PATCH 0039/1400] add link to documentation --- stacks-signer/src/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 8ce737308d..0b5f767e00 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -85,7 +85,8 @@ fn spawn_running_signer(path: &PathBuf) -> SpawnedSigner { "Reminder: The signer is primarily designed for use with a local or subnet network stacks node. \ It's important to exercise caution if you are communicating with an external node, \ as this could potentially expose sensitive data or functionalities to security risks \ - if additional proper security checks are not integrated in place." + if additional proper security checks are not integrated in place. \ + For more information, check [the documentation here](https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like)." ); info!("Starting signer with config: {}", config); let (cmd_send, cmd_recv) = channel(); From b1c08162711277cd408745ee6aae0d619ea5f9d3 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 17 May 2024 17:09:32 +0300 Subject: [PATCH 0040/1400] update to latest dev with warn signer network --- stacks-signer/src/v1/mod.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs index 7c2477cf2d..4d02ea4258 100644 --- a/stacks-signer/src/v1/mod.rs +++ b/stacks-signer/src/v1/mod.rs @@ -25,8 +25,8 @@ use std::sync::mpsc::{channel, Receiver, Sender}; use libsigner::v1::messages::SignerMessage; use libsigner::SignerEventReceiver; -use slog::slog_info; -use stacks_common::info; +use slog::{slog_info, slog_warn}; +use stacks_common::{info, warn}; use wsts::state_machine::OperationResult; use crate::config::GlobalConfig; @@ -53,6 +53,14 @@ pub struct SpawnedSigner { impl From for SpawnedSigner { fn from(config: GlobalConfig) -> Self { let endpoint = config.endpoint; + warn!( + "Reminder: The signer is primarily designed for use with a local or subnet network stacks node. \ + It's important to exercise caution if you are communicating with an external node, \ + as this could potentially expose sensitive data or functionalities to security risks \ + if additional proper security checks are not integrated in place. \ + For more information, check \ + [the documentation here](https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like)." + ); info!("Starting signer with config: {}", config); let (cmd_send, cmd_recv) = channel(); let (res_send, res_recv) = channel(); From 396bf427eca25ddbfdfaf1ac927406a92851d78c Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 17 May 2024 17:17:27 +0300 Subject: [PATCH 0041/1400] update link, no markdown supported on stderr out --- stacks-signer/src/v1/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs index 4d02ea4258..21e7be7e73 100644 --- a/stacks-signer/src/v1/mod.rs +++ b/stacks-signer/src/v1/mod.rs @@ -58,8 +58,8 @@ impl From for SpawnedSigner { It's important to exercise caution if you are communicating with an external node, \ as this could potentially expose sensitive data or functionalities to security risks \ if additional proper security checks are not integrated in place. \ - For more information, check \ - [the documentation here](https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like)." + For more information, check the documentation at \ + https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like." ); info!("Starting signer with config: {}", config); let (cmd_send, cmd_recv) = channel(); From e29ab98bad462fcfc2a11d0c04d827eadad96258 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 17 May 2024 10:26:52 -0400 Subject: [PATCH 0042/1400] refactor: add method `clarity_uses_tip_burn_block` --- clarity/src/vm/database/clarity_db.rs | 12 +++++++++--- clarity/src/vm/tests/mod.rs | 4 ++-- stacks-common/src/types/mod.rs | 7 +++++++ .../src/chainstate/stacks/boot/contract_tests.rs | 4 ++-- 4 files changed, 20 insertions(+), 7 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4c419dcb2a..03e85ae176 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -866,8 +866,11 @@ impl<'a> ClarityDatabase<'a> { pub fn get_current_burnchain_block_height(&mut self) -> Result { let cur_stacks_height = self.store.get_current_block_height(); - // In epoch 2, we can only access the burn block associated with the last block - if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + // Before epoch 3.0, we can only access the burn block associated with the last block + if !self + .get_clarity_epoch_version()? + .clarity_uses_tip_burn_block() + { if cur_stacks_height == 0 { return Ok(self.burn_state_db.get_burn_start_height()); }; @@ -926,7 +929,10 @@ impl<'a> ClarityDatabase<'a> { /// In Epoch 3+: /// 1. Get the SortitionId of the current Stacks tip fn get_sortition_id_for_stacks_tip(&mut self) -> Result> { - if self.get_clarity_epoch_version()? < StacksEpochId::Epoch30 { + if !self + .get_clarity_epoch_version()? + .clarity_uses_tip_burn_block() + { let current_stacks_height = self.get_current_block_height(); if current_stacks_height < 1 { diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 715c205475..2c6f23ef42 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -166,7 +166,7 @@ impl MemoryEnvironmentGenerator { db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); db.commit().unwrap(); - if epoch >= StacksEpochId::Epoch30 { + if epoch.clarity_uses_tip_burn_block() { db.begin(); db.set_tenure_height(1).unwrap(); db.commit().unwrap(); @@ -185,7 +185,7 @@ impl TopLevelMemoryEnvironmentGenerator { db.begin(); db.set_clarity_epoch_version(epoch).unwrap(); db.commit().unwrap(); - if epoch >= StacksEpochId::Epoch30 { + if epoch.clarity_uses_tip_burn_block() { db.begin(); db.set_tenure_height(1).unwrap(); db.commit().unwrap(); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 0ed0187ad7..a1b90ce428 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -148,6 +148,13 @@ impl StacksEpochId { StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, } } + + /// Returns whether or not this epoch uses the tip for reading burn block + /// info in Clarity (3.0+ behavior) or should use the parent block's burn + /// block (behavior before 3.0). + pub fn clarity_uses_tip_burn_block(&self) -> bool { + self >= &StacksEpochId::Epoch30 + } } impl std::fmt::Display for StacksEpochId { diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index a308e5b339..1c0e3f4299 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -173,7 +173,7 @@ impl ClarityTestSim { let cur_epoch = Self::check_and_bump_epoch(&mut store, &headers_db, &burn_db); let mut db = store.as_clarity_db(&headers_db, &burn_db); - if cur_epoch >= StacksEpochId::Epoch30 { + if cur_epoch.clarity_uses_tip_burn_block() { db.begin(); db.set_tenure_height(self.tenure_height as u32) .expect("FAIL: unable to set tenure height in Clarity database"); @@ -227,7 +227,7 @@ impl ClarityTestSim { debug!("Execute block in epoch {}", &cur_epoch); let mut db = store.as_clarity_db(&headers_db, &burn_db); - if cur_epoch >= StacksEpochId::Epoch30 { + if cur_epoch.clarity_uses_tip_burn_block() { db.begin(); db.set_tenure_height(self.tenure_height as u32) .expect("FAIL: unable to set tenure height in Clarity database"); From 3fd8272e2f1b6a7e91cee57a01c79fb99236964b Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 17 May 2024 10:41:37 -0400 Subject: [PATCH 0043/1400] chore: Update references to `.cargo/config.toml` --- README.md | 2 +- docs/profiling.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3f91b1a9f2..4e5b7860e6 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ _Note on building_: you may set `RUSTFLAGS` to build binaries for your native cp RUSTFLAGS="-Ctarget-cpu=native" ``` -or uncomment these lines in `./cargo/config`: +or uncomment these lines in `./cargo/config.toml`: ``` # [build] diff --git a/docs/profiling.md b/docs/profiling.md index 25f821d2c9..832b3d4457 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -224,7 +224,7 @@ $ sudo sed -i "$ a kernel.kptr_restrict = 0" /etc/sysctl.conf $ sysctl --system ``` -Note that you need to uncomment the following in `.cargo/config` (see [flamegraph-rs](https://github.com/flamegraph-rs/flamegraph) for details) +Note that you need to uncomment the following in `.cargo/config.toml` (see [flamegraph-rs](https://github.com/flamegraph-rs/flamegraph) for details) ``` [target.x86_64-unknown-linux-gnu] From f1d88d418a990f4192a373460b22fcf101221ec5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 17 May 2024 10:43:17 -0700 Subject: [PATCH 0044/1400] CRC: downgrade log in block proposal rewad cycle mismatch and update comment Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index aed67700f0..682c1433c1 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -201,8 +201,8 @@ impl Signer { ) { debug!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { - // We are not signing for this reward cycle. Reject the block - warn!( + // We are not signing for this reward cycle. Ignore the block. + debug!( "{self}: Received a block proposal for a different reward cycle. Ignore it."; "requested_reward_cycle" => block_proposal.reward_cycle ); From 56121f229608040698ae6e491717b74380e47f65 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 17 May 2024 10:50:41 -0700 Subject: [PATCH 0045/1400] Block proposal test confirmed broken on develop Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 3f2d4d9119..87fe5a8f09 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -85,12 +85,12 @@ jobs: - tests::signer::v0::block_proposal_rejection - tests::signer::v1::dkg - tests::signer::v1::sign_request_rejected - - tests::signer::v1::block_proposal - tests::signer::v1::filter_bad_transactions - tests::signer::v1::delayed_dkg # TODO: enable these once v1 signer is fixed # - tests::signer::v1::mine_2_nakamoto_reward_cycles # - tests::signer::v1::sign_after_signer_reboot + # - tests::signer::v1::block_proposal - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights # Do not run this one until we figure out why it fails in CI From e0f905246aa070d2ba2d0765872824ecf8ec03da Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 17 May 2024 15:13:50 -0400 Subject: [PATCH 0046/1400] chore: fix compile-time issue --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 11c78596f8..f46d9a3878 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3509,7 +3509,7 @@ fn check_block_heights() { return; } - let signers = TestSigners::default(); + let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -3589,7 +3589,7 @@ fn check_block_heights() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); From 9627fecf1d4f8ea0909c4a6dd0c1d49ed668b3ec Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 16 May 2024 09:13:20 -0500 Subject: [PATCH 0047/1400] feat: sortition state tracking in signer --- libsigner/src/events.rs | 7 +- .../src/deps_common/bitcoin/util/hash.rs | 19 + stacks-signer/src/chainstate.rs | 375 +++++++++++++ stacks-signer/src/client/mod.rs | 6 + stacks-signer/src/client/stacks_client.rs | 84 ++- stacks-signer/src/lib.rs | 3 + stacks-signer/src/signerdb.rs | 24 +- stackslib/src/burnchains/burnchain.rs | 2 +- .../src/net/api/get_tenures_fork_info.rs | 361 ++++++++++++ stackslib/src/net/api/getsortition.rs | 380 +++++++++++++ stackslib/src/net/api/mod.rs | 4 + .../net/api/tests/get_tenures_fork_info.rs | 63 +++ stackslib/src/net/api/tests/getsortition.rs | 88 +++ stackslib/src/net/api/tests/mod.rs | 2 + .../stacks-node/src/nakamoto_node/miner.rs | 1 + .../src/tests/nakamoto_integrations.rs | 521 +++++++++++++++++- 16 files changed, 1923 insertions(+), 17 deletions(-) create mode 100644 stacks-signer/src/chainstate.rs create mode 100644 stackslib/src/net/api/get_tenures_fork_info.rs create mode 100644 stackslib/src/net/api/getsortition.rs create mode 100644 stackslib/src/net/api/tests/get_tenures_fork_info.rs create mode 100644 stackslib/src/net/api/tests/getsortition.rs diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index c603db7f0b..4de6b80513 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -39,8 +39,11 @@ use stacks_common::codec::{ StacksMessageCodec, }; pub use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::types::chainstate::StacksPublicKey; -use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksPublicKey, +}; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::HexError; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, }; diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index 3e9186bd92..daa1de3360 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -29,6 +29,7 @@ use crate::deps_common::bitcoin::network::encodable::{ConsensusDecodable, Consen use crate::deps_common::bitcoin::network::serialize::{ self, BitcoinHash, RawEncoder, SimpleEncoder, }; +use crate::util::hash::bytes_to_hex; use crate::util::uint::Uint256; use crate::util::HexError; @@ -49,6 +50,24 @@ impl_array_newtype!(Ripemd160Hash, u8, 20); /// A Bitcoin hash160, 20-bytes, computed from x as RIPEMD160(SHA256(x)) pub struct Hash160([u8; 20]); impl_array_newtype!(Hash160, u8, 20); +impl_byte_array_rusqlite_only!(Hash160); + +impl Hash160 { + /// Convert the Hash160 inner bytes to a non-prefixed hex string + pub fn to_hex(&self) -> String { + bytes_to_hex(&self.0) + } + + /// Try to instantiate a Hash160 using the exact inner bytes of the hash. + pub fn from_bytes(bytes: &[u8]) -> Option { + let mut return_bytes = [0; 20]; + if bytes.len() != return_bytes.len() { + return None; + } + return_bytes.copy_from_slice(bytes); + Some(Self(return_bytes)) + } +} impl Default for Sha256dEncoder { fn default() -> Self { diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs new file mode 100644 index 0000000000..ee03b6df85 --- /dev/null +++ b/stacks-signer/src/chainstate.rs @@ -0,0 +1,375 @@ +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::TenureChangePayload; +use blockstack_lib::net::api::getsortition::SortitionInfo; +use slog::{slog_info, slog_warn}; +use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; +use stacks_common::util::hash::Hash160; +use stacks_common::{info, warn}; + +use crate::client::{ClientError, StacksClient}; +use crate::signerdb::SignerDb; + +/// Captures this signer's current view of a sortition's miner. +#[derive(PartialEq, Eq)] +pub enum SortitionMinerStatus { + /// The signer thinks this sortition's miner is invalid, and hasn't signed any blocks for them. + InvalidatedBeforeFirstBlock, + /// The signer thinks this sortition's miner is invalid, but already signed one or more blocks for them. + InvalidatedAfterFirstBlock, + /// The signer thinks this sortition's miner is valid + Valid, +} + +/// Captures the Stacks sortition related state for +/// a successful sortition +pub struct SortitionState { + /// The miner's pub key hash + pub miner_pkh: Hash160, + /// If known already, the public key which hashes to `miner_pkh` + pub miner_pubkey: Option, + /// the last burn block in this fork which had a sortition + pub prior_sortition: ConsensusHash, + /// the committed to parent tenure ID + pub parent_tenure_id: ConsensusHash, + /// this sortition's consensus hash + pub consensus_hash: ConsensusHash, + /// did the miner in this sortition do something + /// to become invalidated as a miner? + pub invalidated: SortitionMinerStatus, +} + +/// The signer's current view of the stacks chain's sortition +/// state +pub struct SortitionsView { + /// the prior successful sortition (this corresponds to the "prior" miner slot) + pub last_sortition: Option, + /// the current successful sortition (this corresponds to the "current" miner slot) + pub cur_sortition: Option, + /// is the view fresh? + pub fresh: bool, + /// the hash at which the sortitions view was last fetched + pub latest_consensus_hash: Option, +} + +impl TryFrom for SortitionState { + type Error = ClientError; + fn try_from(value: SortitionInfo) -> Result { + Ok(Self { + miner_pkh: value + .miner_pk_hash160 + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + miner_pubkey: None, + prior_sortition: value + .last_sortition_ch + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + consensus_hash: value.consensus_hash, + parent_tenure_id: value + .stacks_parent_ch + .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + invalidated: SortitionMinerStatus::Valid, + }) + } +} + +enum ProposedBy<'a> { + LastSortition(&'a SortitionState), + CurrentSortition(&'a SortitionState), +} + +impl<'a> ProposedBy<'a> { + pub fn state(&self) -> &SortitionState { + match self { + ProposedBy::LastSortition(ref x) => x, + ProposedBy::CurrentSortition(ref x) => x, + } + } +} + +impl SortitionsView { + /// Initialize an empty sortitions view struct -- it will refresh() before + /// checking any proposals. + pub fn new() -> Self { + Self { + last_sortition: None, + cur_sortition: None, + fresh: false, + latest_consensus_hash: None, + } + } + + /// Apply checks from the SortitionsView on the block proposal. + /// + pub fn check_proposal( + &mut self, + client: &StacksClient, + signer_db: &SignerDb, + block: &NakamotoBlock, + block_pk: &StacksPublicKey, + ) -> Result { + self.refresh_view(client)?; + let block_pkh = Hash160::from_data(&block_pk.to_bytes_compressed()); + let Some(proposed_by) = self + .cur_sortition + .as_ref() + .and_then(|cur_sortition| { + if block.header.consensus_hash == cur_sortition.consensus_hash { + Some(ProposedBy::CurrentSortition(cur_sortition)) + } else { + None + } + }) + .or_else(|| { + self.last_sortition.as_ref().and_then(|last_sortition| { + if block.header.consensus_hash == last_sortition.consensus_hash { + Some(ProposedBy::LastSortition(last_sortition)) + } else { + None + } + }) + }) + else { + warn!( + "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_consensus_hash" => ?self.cur_sortition.as_ref().map(|x| x.consensus_hash), + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + ); + return Ok(false); + }; + + if proposed_by.state().miner_pkh != block_pkh { + warn!( + "Miner block proposal pubkey does not match the winning pubkey hash for its sortition. Considering invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_pubkey" => &block_pk.to_hex(), + "proposed_block_pubkey_hash" => %block_pkh, + "sortition_winner_pubkey_hash" => %proposed_by.state().miner_pkh, + ); + return Ok(false); + } + + // check that this miner is the most recent sortition + match proposed_by { + ProposedBy::CurrentSortition(sortition) => { + if sortition.invalidated != SortitionMinerStatus::Valid { + warn!( + "Current miner behaved improperly, this signer views the miner as invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + ProposedBy::LastSortition(_last_sortition) => { + if let Some(cur_sortition) = &self.cur_sortition { + // should only consider blocks from the last sortition if the new sortition was invalidated + // before we signed their first block. + if cur_sortition.invalidated + != SortitionMinerStatus::InvalidatedBeforeFirstBlock + { + warn!( + "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + } + }; + + if let Some(tenure_change) = block.get_tenure_change_tx_payload() { + // in tenure changes, we need to check: + // (1) if the tenure change confirms the expected parent block (i.e., + // the last block we signed in the parent tenure) + // (2) if the parent tenure was a valid choice + let confirms_expected_parent = + Self::check_tenure_change_block_confirmation(tenure_change, block, signer_db)?; + if !confirms_expected_parent { + return Ok(false); + } + // now, we have to check if the parent tenure was a valid choice. + let is_valid_parent_tenure = + Self::check_parent_tenure_choice(proposed_by.state(), block, client)?; + if !is_valid_parent_tenure { + return Ok(false); + } + let last_in_tenure = signer_db + .get_last_signed_block_in_tenure(&block.header.consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + if last_in_tenure.is_some() { + warn!( + "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } else { + // check if the new block confirms the last block in the current tenure + let confirms_latest_in_tenure = + Self::confirms_known_blocks_in(block, &block.header.consensus_hash, signer_db)?; + if !confirms_latest_in_tenure { + return Ok(false); + } + } + + if let Some(tenure_extend) = block.get_tenure_extend_tx_payload() { + // in tenure extends, we need to check: + // (1) if this is the most recent sortition, an extend is allowed if it changes the burnchain view + // (2) if this is the most recent sortition, an extend is allowed if enough time has passed to refresh the block limit + let changed_burn_view = + tenure_extend.burn_view_consensus_hash != proposed_by.state().consensus_hash; + let enough_time_passed = Self::tenure_time_passed_block_lim()?; + if !changed_burn_view || !enough_time_passed { + warn!( + "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + } + + Ok(true) + } + + fn check_parent_tenure_choice( + sortition_state: &SortitionState, + block: &NakamotoBlock, + client: &StacksClient, + ) -> Result { + // if the parent tenure is the last sortition, it is a valid choice. + // if the parent tenure is a reorg, then all of the reorged sortitions + // must either have produced zero blocks _or_ produced their first block + // very close to the burn block transition. + if sortition_state.prior_sortition == sortition_state.parent_tenure_id { + return Ok(true); + } + info!( + "Most recent miner's tenure does not build off the prior sortition, checking if this is valid behavior"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + + let tenures_reorged = client.get_tenure_forking_info( + &sortition_state.parent_tenure_id, + &sortition_state.prior_sortition, + )?; + if tenures_reorged.len() == 0 { + warn!("Miner is not building off of most recent tenure, but stacks node was unable to return information about the relevant sortitions. Marking miner invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); + } + for tenure in tenures_reorged.iter() { + if tenure.first_block_mined.is_some() { + // TODO: must check if the first block was poorly timed. + warn!( + "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %sortition_state.parent_tenure_id, + "last_sortition" => %sortition_state.prior_sortition, + "violating_tenure_id" => %tenure.consensus_hash, + "violating_tenure_first_block_id" => ?tenure.first_block_mined, + ); + return Ok(false); + } + } + + return Ok(true); + } + + fn check_tenure_change_block_confirmation( + tenure_change: &TenureChangePayload, + block: &NakamotoBlock, + signer_db: &SignerDb, + ) -> Result { + // in tenure changes, we need to check: + // (1) if the tenure change confirms the expected parent block (i.e., + // the last block we signed in the parent tenure) + // (2) if the parent tenure was a valid choice + Self::confirms_known_blocks_in(block, &tenure_change.prev_tenure_consensus_hash, signer_db) + } + + fn confirms_known_blocks_in( + block: &NakamotoBlock, + tenure: &ConsensusHash, + signer_db: &SignerDb, + ) -> Result { + let Some(last_known_block) = signer_db + .get_last_signed_block_in_tenure(tenure) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))? + else { + info!( + "Have not signed off on any blocks in the parent tenure, assuming block confirmation is correct"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "tenure" => %tenure, + ); + return Ok(true); + }; + if block.header.chain_length > last_known_block.block.header.chain_length { + return Ok(true); + } else { + warn!( + "Miner block proposal's tenure change transaction does not confirm as many blocks as we expect in the parent tenure"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_chain_length" => block.header.chain_length, + "expected_at_least" => last_known_block.block.header.chain_length + 1, + ); + return Ok(false); + } + } + + /// Has the current tenure lasted long enough to extend the block limit? + pub fn tenure_time_passed_block_lim() -> Result { + // TODO + return Ok(false); + } + + /// If necessary, fetch a new view of the recent sortitions + pub fn refresh_view(&mut self, client: &StacksClient) -> Result<(), ClientError> { + if self.fresh { + return Ok(()); + } + let latest_state = client.get_latest_sortition()?; + let latest_ch = latest_state.consensus_hash.clone(); + + // figure out what cur_sortition will be set to. + // if the latest sortition wasn't successful, query the last one that was. + let latest_success = if latest_state.was_sortition { + latest_state + } else { + info!("Latest state wasn't a sortition: {latest_state:?}"); + let last_sortition_ch = latest_state + .last_sortition_ch + .as_ref() + .ok_or_else(|| ClientError::NoSortitionOnChain)?; + client.get_sortition(last_sortition_ch)? + }; + + // now, figure out what `last_sortition` will be set to. + let last_sortition = latest_success + .last_sortition_ch + .as_ref() + .map(|ch| client.get_sortition(ch)) + .transpose()?; + + self.cur_sortition = Some(SortitionState::try_from(latest_success)?); + self.last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten(); + self.fresh = true; + self.latest_consensus_hash = Some(latest_ch); + + Ok(()) + } +} diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 3ce771309e..26ce5f05f5 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -86,6 +86,12 @@ pub enum ClientError { /// Invalid response from the stacks node #[error("Invalid response from the stacks node: {0}")] InvalidResponse(String), + /// A successful sortition has not occurred yet + #[error("The Stacks chain has not processed any successful sortitions yet")] + NoSortitionOnChain, + /// A successful sortition's info response should be parseable into a SortitionState + #[error("A successful sortition's info response should be parseable into a SortitionState")] + UnexpectedSortitionInfo, } /// Retry a function F with an exponential backoff and notification on transient failure diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b4b5d8a3a1..38a8f78d1e 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -26,9 +26,13 @@ use blockstack_lib::chainstate::stacks::{ TransactionSpendingCondition, TransactionVersion, }; use blockstack_lib::net::api::callreadonly::CallReadOnlyResponse; +use blockstack_lib::net::api::get_tenures_fork_info::{ + TenureForkingInfo, RPC_TENURE_FORKING_INFO_PATH, +}; use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; +use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; use blockstack_lib::net::api::getstackers::GetStackersResponse; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; @@ -38,12 +42,14 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use reqwest::header::AUTHORIZATION; use serde_json::json; -use slog::slog_debug; +use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; -use stacks_common::debug; -use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, +}; use stacks_common::types::StacksEpochId; +use stacks_common::{debug, warn}; use wsts::curve::point::{Compressed, Point}; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -358,6 +364,65 @@ impl StacksClient { self.get_account_entry(address).map(|entry| entry.nonce) } + /// Get information about the tenures between `chosen_parent` and `last_sortition` + pub fn get_tenure_forking_info( + &self, + chosen_parent: &ConsensusHash, + last_sortition: &ConsensusHash, + ) -> Result, ClientError> { + let send_request = || { + self.stacks_node_client + .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) + .send() + .map_err(backoff::Error::transient) + }; + let response = retry_with_exponential_backoff(send_request)?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let tenures = response.json()?; + Ok(tenures) + } + + /// Get the sortition information for the latest sortition + pub fn get_latest_sortition(&self) -> Result { + let send_request = || { + self.stacks_node_client + .get(self.sortition_info_path()) + .send() + .map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) + }; + let response = send_request()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let sortition_info = response.json()?; + Ok(sortition_info) + } + + /// Get the sortition information for a given sortition + pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { + let send_request = || { + self.stacks_node_client + .get(self.sortition_info_path()) + .query(&[("consensus", ch.to_hex().as_str())]) + .send() + .map_err(|e| { + warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); + e + }) + }; + let response = send_request()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let sortition_info = response.json()?; + Ok(sortition_info) + } + /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { debug!("Getting stacks node info..."); @@ -649,6 +714,19 @@ impl StacksClient { format!("{}/v2/block_proposal", self.http_origin) } + fn sortition_info_path(&self) -> String { + format!("{}{RPC_SORTITION_INFO_PATH}", self.http_origin) + } + + fn tenure_forking_info_path(&self, start: &ConsensusHash, stop: &ConsensusHash) -> String { + format!( + "{}{RPC_TENURE_FORKING_INFO_PATH}/{}/{}", + self.http_origin, + start.to_hex(), + stop.to_hex() + ) + } + fn core_info_path(&self) -> String { format!("{}/v2/info", self.http_origin) } diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index af0e8781a0..a6856bb732 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -20,6 +20,9 @@ Usage documentation can be found in the [README](https://github.com/Trust-Machin // You should have received a copy of the GNU General Public License // along with this program. If not, see . +/// This module stores chainstate information about Stacks, SortitionDB for +/// tracking by the signer. +pub mod chainstate; /// The cli module for the signer binary pub mod cli; /// The signer client for communicating with stackerdb/stacks nodes diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 8c6b3ba187..5ef24d1c87 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -25,6 +25,7 @@ use rusqlite::{params, Connection, Error as SqliteError, OpenFlags, NO_PARAMS}; use serde::{Deserialize, Serialize}; use slog::slog_debug; use stacks_common::debug; +use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; use wsts::net::NonceRequest; @@ -88,6 +89,9 @@ CREATE TABLE IF NOT EXISTS blocks ( reward_cycle INTEGER NOT NULL, signer_signature_hash TEXT NOT NULL, block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + stacks_height INTEGER NOT NULL, burn_block_height INTEGER NOT NULL, PRIMARY KEY (reward_cycle, signer_signature_hash) )"; @@ -173,6 +177,17 @@ impl SignerDb { try_deserialize(result) } + /// Return the last signed block in a tenure (identified by its consensus hash) + pub fn get_last_signed_block_in_tenure( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height DESC LIMIT 1"; + let result: Option = query_row(&self.db, query, &[tenure])?; + + try_deserialize(result) + } + /// Insert a block into the database. /// `hash` is the `signer_signature_hash` of the block. pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { @@ -196,8 +211,13 @@ impl SignerDb { ); self.db .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info) VALUES (?1, ?2, ?3, ?4)", - params![u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), &block_json], + "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", + params![ + u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), &block_json, + signed_over, + u64_to_sql(block_info.block.header.chain_length)?, + block_info.block.header.consensus_hash.to_hex(), + ], )?; Ok(()) diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 0247a54512..ac2d6a6b8c 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -1078,7 +1078,7 @@ impl Burnchain { /// Hand off the block to the ChainsCoordinator _and_ process the sortition /// *only* to be used by legacy stacks node interfaces, like the Helium node - pub fn process_block_and_sortition_deprecated( + fn process_block_and_sortition_deprecated( db: &mut SortitionDB, burnchain_db: &mut BurnchainDB, burnchain: &Burnchain, diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs new file mode 100644 index 0000000000..13ed91810e --- /dev/null +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -0,0 +1,361 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Hash160}; +use stacks_common::util::HexError; +use {serde, serde_json}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::http::{ + parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, + HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +pub static RPC_TENURE_FORKING_INFO_PATH: &str = "/v3/tenures_fork_info"; + +static DEPTH_LIMIT: usize = 10; + +/// Struct for information about a tenure that is used to determine whether +/// or not the tenure should have been validly forked. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct TenureForkingInfo { + /// The burnchain header hash of the block that triggered this event. + #[serde(with = "prefix_hex")] + pub burn_block_hash: BurnchainHeaderHash, + /// The burn height of the block that triggered this event. + pub burn_block_height: u64, + /// This sortition ID of the block that triggered this event. This incorporates + /// PoX forking information and the burn block hash to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub sortition_id: SortitionId, + /// The parent of this burn block's Sortition ID + #[serde(with = "prefix_hex")] + pub parent_sortition_id: SortitionId, + /// The consensus hash of the block that triggered this event. This incorporates + /// PoX forking information and burn op information to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub consensus_hash: ConsensusHash, + /// Boolean indicating whether or not there was a succesful sortition (i.e. a winning + /// block or miner was chosen). + pub was_sortition: bool, + /// If the sortition occurred, and a block was mined during the tenure, this is the + /// tenure's block. + #[serde(with = "prefix_opt_hex")] + pub first_block_mined: Option, +} + +mod prefix_opt_hex { + pub fn serialize( + val: &Option, + s: S, + ) -> Result { + match val { + Some(ref some_val) => { + let val_str = format!("0x{some_val}"); + s.serialize_some(&val_str) + } + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result, D::Error> { + let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; + let Some(inst_str) = opt_inst_str else { + return Ok(None); + }; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; + Ok(Some(val)) + } +} + +mod prefix_hex { + pub fn serialize( + val: &T, + s: S, + ) -> Result { + s.serialize_str(&format!("0x{val}")) + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result { + let inst_str: String = serde::Deserialize::deserialize(d)?; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + T::try_from(&hex_str).map_err(serde::de::Error::custom) + } +} + +trait HexDeser: Sized { + fn try_from(hex: &str) -> Result; +} + +macro_rules! impl_hex_deser { + ($thing:ident) => { + impl HexDeser for $thing { + fn try_from(hex: &str) -> Result { + $thing::from_hex(hex) + } + } + }; +} + +impl_hex_deser!(BurnchainHeaderHash); +impl_hex_deser!(StacksBlockId); +impl_hex_deser!(SortitionId); +impl_hex_deser!(ConsensusHash); +impl_hex_deser!(BlockHeaderHash); +impl_hex_deser!(Hash160); + +#[derive(Clone, Default)] +pub struct GetTenuresForkInfo { + pub start_sortition: Option, + pub stop_sortition: Option, +} + +/// Decode the HTTP request +impl HttpRequest for GetTenuresForkInfo { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!( + r#"^{RPC_TENURE_FORKING_INFO_PATH}/(?P[0-9a-f]{{40}})/(?P[0-9a-f]{{40}})$"# + )) + .unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let req_contents = HttpRequestContents::new().query_string(query); + + let start_str = captures + .name("start") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to start_sortition group".to_string()) + })? + .as_str(); + let stop_str = captures + .name("stop") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to stop_sortition group".to_string()) + })? + .as_str(); + let start_sortition = ConsensusHash::from_hex(start_str).map_err(|_| { + Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) + })?; + let stop_sortition = ConsensusHash::from_hex(stop_str).map_err(|_| { + Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) + })?; + self.start_sortition = Some(start_sortition); + self.stop_sortition = Some(stop_sortition); + + Ok(req_contents) + } + + fn metrics_identifier(&self) -> &str { + RPC_TENURE_FORKING_INFO_PATH + } +} + +impl TenureForkingInfo { + fn from_snapshot( + sn: &BlockSnapshot, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + ) -> Result { + let first_block_mined = if !sn.sortition { + None + } else { + // is this a nakamoto sortition? + let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sn.block_height)?.ok_or_else( + || { + warn!( + "Failed to lookup stacks epoch for processed snapshot height {}", + sn.block_height + ); + ChainError::InvalidChainstateDB + }, + )?; + if epoch.epoch_id < StacksEpochId::Epoch30 { + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chainstate.db(), + &sn.consensus_hash, + )? + .map(|header| header.index_block_hash()) + } else { + NakamotoChainState::get_nakamoto_tenure_start_block_header( + chainstate.db(), + &sn.consensus_hash, + )? + .map(|header| header.index_block_hash()) + } + }; + Ok(TenureForkingInfo { + burn_block_hash: sn.burn_header_hash.clone(), + burn_block_height: sn.block_height, + sortition_id: sn.sortition_id.clone(), + parent_sortition_id: sn.parent_sortition_id.clone(), + consensus_hash: sn.consensus_hash.clone(), + was_sortition: sn.sortition, + first_block_mined, + }) + } +} + +impl RPCRequestHandler for GetTenuresForkInfo { + /// Reset internal state + fn restart(&mut self) { + self.start_sortition = None; + self.stop_sortition = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let result = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let start_from = self + .stop_sortition + .clone() + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let recurse_end = self + .start_sortition + .clone() + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let mut results = vec![]; + let mut cursor = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &start_from)? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + results.push(TenureForkingInfo::from_snapshot( + &cursor, sortdb, chainstate, + )?); + let handle = sortdb.index_handle(&cursor.sortition_id); + let mut depth = 0; + while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { + depth += 1; + cursor = handle.get_last_snapshot_with_sortition(cursor.block_height)?; + results.push(TenureForkingInfo::from_snapshot( + &cursor, sortdb, chainstate, + )?); + } + + Ok(results) + }); + + let tenures = match result { + Ok(tenures) => tenures, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!( + "Could not find snapshot {:?}\n", + &self.stop_sortition + )), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!( + "Failed to load snapshots for range ({:?}, {:?}]: {:?}\n", + &self.start_sortition, &self.stop_sortition, &e + ); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::JSON, + ); + + Ok(( + resp_preamble, + HttpResponseContents::try_from_json(&tenures)?, + )) + } +} + +impl HttpResponse for GetTenuresForkInfo { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let tenures_info: Vec = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(tenures_info)?) + } +} diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs new file mode 100644 index 0000000000..a4fba89fb5 --- /dev/null +++ b/stackslib/src/net/api/getsortition.rs @@ -0,0 +1,380 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::{to_hex, Hash160}; +use stacks_common::util::HexError; +use {serde, serde_json}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::http::{ + parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, + HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Debug, Clone, PartialEq)] +pub enum QuerySpecifier { + ConsensusHash(ConsensusHash), + BurnchainHeaderHash(BurnchainHeaderHash), + BlockHeight(u64), + Latest, +} + +pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortition"; + +/// Struct for sortition information returned via the GetSortition API call +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct SortitionInfo { + /// The burnchain header hash of the block that triggered this event. + #[serde(with = "prefix_hex")] + pub burn_block_hash: BurnchainHeaderHash, + /// The burn height of the block that triggered this event. + pub burn_block_height: u64, + /// This sortition ID of the block that triggered this event. This incorporates + /// PoX forking information and the burn block hash to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub sortition_id: SortitionId, + /// The parent of this burn block's Sortition ID + #[serde(with = "prefix_hex")] + pub parent_sortition_id: SortitionId, + /// The consensus hash of the block that triggered this event. This incorporates + /// PoX forking information and burn op information to obtain an identifier that is + /// unique across PoX forks and burnchain forks. + #[serde(with = "prefix_hex")] + pub consensus_hash: ConsensusHash, + /// Boolean indicating whether or not there was a succesful sortition (i.e. a winning + /// block or miner was chosen). + pub was_sortition: bool, + /// If sortition occurred, and the miner's VRF key registration + /// associated a nakamoto mining pubkey with their commit, this + /// will contain the Hash160 of that mining key. + #[serde(with = "prefix_opt_hex")] + pub miner_pk_hash160: Option, + /// If sortition occurred, this will be the consensus hash of the burn block corresponding + /// to the winning block commit's parent block ptr. In 3.x, this is the consensus hash of + /// the tenure that this new burn block's miner will be building off of. + #[serde(with = "prefix_opt_hex")] + pub stacks_parent_ch: Option, + /// If sortition occurred, this will be the consensus hash of the most recent sortition before + /// this one. + #[serde(with = "prefix_opt_hex")] + pub last_sortition_ch: Option, + #[serde(with = "prefix_opt_hex")] + /// In Stacks 2.x, this is the winning block. + /// In Stacks 3.x, this is the first block of the parent tenure. + pub committed_block_hash: Option, +} + +mod prefix_opt_hex { + pub fn serialize( + val: &Option, + s: S, + ) -> Result { + match val { + Some(ref some_val) => { + let val_str = format!("0x{some_val}"); + s.serialize_some(&val_str) + } + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result, D::Error> { + let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; + let Some(inst_str) = opt_inst_str else { + return Ok(None); + }; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; + Ok(Some(val)) + } +} + +mod prefix_hex { + pub fn serialize( + val: &T, + s: S, + ) -> Result { + s.serialize_str(&format!("0x{val}")) + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result { + let inst_str: String = serde::Deserialize::deserialize(d)?; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + T::try_from(&hex_str).map_err(serde::de::Error::custom) + } +} + +trait HexDeser: Sized { + fn try_from(hex: &str) -> Result; +} + +macro_rules! impl_hex_deser { + ($thing:ident) => { + impl HexDeser for $thing { + fn try_from(hex: &str) -> Result { + $thing::from_hex(hex) + } + } + }; +} + +impl_hex_deser!(BurnchainHeaderHash); +impl_hex_deser!(SortitionId); +impl_hex_deser!(ConsensusHash); +impl_hex_deser!(BlockHeaderHash); +impl_hex_deser!(Hash160); + +impl TryFrom<(&String, &String)> for QuerySpecifier { + type Error = Error; + + fn try_from(value: (&String, &String)) -> Result { + let hex_str = if value.1.starts_with("0x") { + &value.1[2..] + } else { + value.1.as_str() + }; + match value.0.as_str() { + "consensus" => Ok(Self::ConsensusHash( + ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, + )), + "burn" => Ok(Self::BurnchainHeaderHash( + BurnchainHeaderHash::from_hex(hex_str) + .map_err(|e| Error::DecodeError(e.to_string()))?, + )), + "burn_height" => Ok(Self::BlockHeight( + value + .1 + .parse::() + .map_err(|e| Error::DecodeError(e.to_string()))?, + )), + other => Err(Error::DecodeError(format!("Unknown query param: {other}"))), + } + } +} + +#[derive(Clone)] +pub struct GetSortitionHandler { + pub query: QuerySpecifier, +} + +impl GetSortitionHandler { + pub fn new() -> Self { + Self { + query: QuerySpecifier::Latest, + } + } +} +/// Decode the HTTP request +impl HttpRequest for GetSortitionHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!("^{RPC_SORTITION_INFO_PATH}$")).unwrap() + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let req_contents = HttpRequestContents::new().query_string(query); + if req_contents.get_query_args().len() > 1 { + return Err(Error::DecodeError( + "May only supply up to one query argument".into(), + )); + } + self.query = QuerySpecifier::Latest; + for (key, value) in req_contents.get_query_args().iter() { + self.query = QuerySpecifier::try_from((key, value))?; + } + + Ok(req_contents) + } + + fn metrics_identifier(&self) -> &str { + RPC_SORTITION_INFO_PATH + } +} + +impl RPCRequestHandler for GetSortitionHandler { + /// Reset internal state + fn restart(&mut self) { + self.query = QuerySpecifier::Latest; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let result = + node.with_node_state(|_network, sortdb, _chainstate, _mempool, _rpc_args| { + let query_result = match self.query { + QuerySpecifier::Latest => { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map(Some) + }, + QuerySpecifier::ConsensusHash(ref consensus_hash) => { + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) + }, + QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot(burn_hash) + }, + QuerySpecifier::BlockHeight(burn_height) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot_by_height(burn_height) + }, + }; + let sortition_sn = query_result? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + + let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { + (None, None, None, None) + } else { + let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? + .ok_or_else(|| { + error!( + "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let stacks_parent_sn = handle.get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? + .ok_or_else(|| { + warn!( + "Failed to load the snapshot of the winning block commits parent"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + + // try to figure out what the last snapshot in this fork was with a successful + // sortition. + // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` + let last_sortition_ch = if sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1 { + stacks_parent_sn.consensus_hash.clone() + } else { + // we actually need to perform the marf lookup + let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + last_sortition.consensus_hash + }; + + (sortition_sn.miner_pk_hash.clone(), Some(stacks_parent_sn.consensus_hash), Some(block_commit.block_header_hash), + Some(last_sortition_ch)) + }; + + Ok(SortitionInfo { + burn_block_hash: sortition_sn.burn_header_hash, + burn_block_height: sortition_sn.block_height, + sortition_id: sortition_sn.sortition_id, + parent_sortition_id: sortition_sn.parent_sortition_id, + consensus_hash: sortition_sn.consensus_hash, + was_sortition: sortition_sn.sortition, + miner_pk_hash160, + stacks_parent_ch, + last_sortition_ch, + committed_block_hash, + }) + }); + + let block = match result { + Ok(block) => block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Could not find snapshot {:?}\n", &self.query)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load snapshot for {:?}: {:?}\n", &self.query, &e); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let result = HttpResponseContents::try_from_json(&block)?; + Ok((preamble, result)) + } +} + +impl HttpResponse for GetSortitionHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let sortition_info: SortitionInfo = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(sortition_info)?) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index f1af0a9e60..58425b4955 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -32,6 +32,7 @@ use crate::net::Error as NetError; use crate::stacks_common::codec::StacksMessageCodec; pub mod callreadonly; +pub mod get_tenures_fork_info; pub mod getaccount; pub mod getattachment; pub mod getattachmentsinv; @@ -50,6 +51,7 @@ pub mod getmicroblocks_indexed; pub mod getmicroblocks_unconfirmed; pub mod getneighbors; pub mod getpoxinfo; +pub mod getsortition; pub mod getstackerdbchunk; pub mod getstackerdbmetadata; pub mod getstackers; @@ -127,6 +129,8 @@ impl StacksHttp { self.register_rpc_endpoint(poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new()); self.register_rpc_endpoint(posttransaction::RPCPostTransactionRequestHandler::new()); self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); + self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); + self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); } } diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs new file mode 100644 index 0000000000..6c9e552759 --- /dev/null +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -0,0 +1,63 @@ +use std::collections::BTreeMap; +use std::fmt::Display; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::net::api::get_tenures_fork_info::GetTenuresForkInfo; +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{HttpRequestPreamble, HttpVersion}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::Error as NetError; + +fn make_preamble(start: &T, stop: &R) -> HttpRequestPreamble { + HttpRequestPreamble { + version: HttpVersion::Http11, + verb: "GET".into(), + path_and_query_str: format!("/v3/tenures_fork_info/{start}/{stop}"), + host: PeerHost::DNS("localhost".into(), 0), + content_type: None, + content_length: Some(0), + keep_alive: false, + headers: BTreeMap::new(), + } +} + +#[test] +fn test_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let mut handler = GetTenuresForkInfo::default(); + + let tests = vec![ + ( + make_preamble(&ConsensusHash([0; 20]), &ConsensusHash([255; 20])), + Ok((ConsensusHash([0; 20]), ConsensusHash([255; 20]))), + ), + ( + make_preamble(&BurnchainHeaderHash([0; 32]), &ConsensusHash([255; 20])), + Err(NetError::NotFoundError), + ), + ( + make_preamble(&ConsensusHash([255; 20]), &BurnchainHeaderHash([0; 32])), + Err(NetError::NotFoundError), + ), + ]; + + for (inp, expected_result) in tests.into_iter() { + handler.restart(); + let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + match expected_result { + Ok((start, stop)) => { + assert!(parsed_request.is_ok()); + assert_eq!(&handler.start_sortition, &Some(start)); + assert_eq!(&handler.stop_sortition, &Some(stop)); + } + Err(e) => { + assert_eq!(e, parsed_request.unwrap_err()); + } + } + } +} diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs new file mode 100644 index 0000000000..40cfaf53cf --- /dev/null +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -0,0 +1,88 @@ +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::Error as NetError; + +fn make_preamble(query: &str) -> HttpRequestPreamble { + HttpRequestPreamble { + version: HttpVersion::Http11, + verb: "GET".into(), + path_and_query_str: format!("/v3/sortition{query}"), + host: PeerHost::DNS("localhost".into(), 0), + content_type: None, + content_length: Some(0), + keep_alive: false, + headers: BTreeMap::new(), + } +} + +#[test] +fn test_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let mut handler = GetSortitionHandler::new(); + + let tests = vec![ + (make_preamble(""), Ok(QuerySpecifier::Latest)), + ( + make_preamble("?consensus=deadbeef00deadbeef01deadbeef02deadbeef03"), + Ok(QuerySpecifier::ConsensusHash( + ConsensusHash::from_hex("deadbeef00deadbeef01deadbeef02deadbeef03").unwrap(), + )), + ), + ( + make_preamble("?burn=00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"), + Ok(QuerySpecifier::BurnchainHeaderHash( + BurnchainHeaderHash::from_hex( + "00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff", + ) + .unwrap(), + )), + ), + ( + make_preamble("?burn_height=100"), + Ok(QuerySpecifier::BlockHeight(100)), + ), + ( + make_preamble("?burn_height=a1be"), + Err(HttpError::DecodeError( + "invalid digit found in string".into(), + )), + ), + ( + make_preamble("?burn=a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into())), + ), + ( + make_preamble("?consensus=a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into())), + ), + ( + make_preamble("?burn_height=20&consensus=deadbeef00deadbeef01deadbeef02deadbeef03"), + Err(HttpError::DecodeError( + "May only supply up to one query argument".into(), + )), + ), + ]; + + for (inp, expected_result) in tests.into_iter() { + handler.restart(); + let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + match expected_result { + Ok(query) => { + assert!(parsed_request.is_ok()); + assert_eq!(&handler.query, &query); + } + Err(e) => { + assert_eq!(NetError::Http(e), parsed_request.unwrap_err()); + } + } + } +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ce67147a9e..591a12131c 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -51,6 +51,7 @@ use crate::net::{ }; mod callreadonly; +mod get_tenures_fork_info; mod getaccount; mod getattachment; mod getattachmentsinv; @@ -69,6 +70,7 @@ mod getmicroblocks_indexed; mod getmicroblocks_unconfirmed; mod getneighbors; mod getpoxinfo; +mod getsortition; mod getstackerdbchunk; mod getstackerdbmetadata; mod getstxtransfercost; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ed642c9cd..a757233e8e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -893,6 +893,7 @@ impl BlockMinerThread { block.header.block_hash(), block.txs.len(); "signer_sighash" => %block.header.signer_signature_hash(), + "consensus_hash" => %block.header.consensus_hash, ); self.event_dispatcher.process_mined_nakamoto_block_event( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f46d9a3878..caeee26fae 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::{HashMap, HashSet}; +use std::net::ToSocketAddrs; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::{Arc, Mutex}; @@ -39,14 +40,18 @@ use stacks::chainstate::burn::operations::{ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder; use stacks::chainstate::nakamoto::test_signers::TestSigners; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use stacks::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; -use stacks::chainstate::stacks::{StacksTransaction, TransactionPayload}; +use stacks::chainstate::stacks::{ + SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangePayload, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, + TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, +}; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -60,7 +65,6 @@ use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; use stacks::util::hash::hex_bytes; -use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -71,11 +75,14 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, + TrieHash, }; use stacks_common::types::StacksPublicKeyBuffer; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; +use stacks_signer::client::ClientError; +use stacks_signer::signerdb::{BlockInfo, SignerDb}; use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; @@ -309,7 +316,7 @@ pub fn blind_signer( pub fn get_latest_block_proposal( conf: &Config, sortdb: &SortitionDB, -) -> Result { +) -> Result<(NakamotoBlock, StacksPublicKey), String> { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let miner_pubkey = StacksPublicKey::from_private(&conf.get_miner_config().mining_key.unwrap()); let miner_slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) @@ -330,7 +337,7 @@ pub fn get_latest_block_proposal( // get_block_proposal_msg_v1(&mut miners_stackerdb, miner_slot_id.start); block_proposal.block }; - Ok(proposed_block) + Ok((proposed_block, miner_pubkey)) } #[allow(dead_code)] @@ -371,7 +378,7 @@ pub fn read_and_sign_block_proposal( .known_selected_anchor_block_owned() .expect("Expected a reward set"); - let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?; + let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?.0; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let signer_sig_hash = proposed_block.header.signer_signature_hash(); @@ -2244,7 +2251,8 @@ fn miner_writes_proposed_block_to_stackerdb() { let sortdb = naka_conf.get_burnchain().open_sortition_db(true).unwrap(); let proposed_block = get_latest_block_proposal(&naka_conf, &sortdb) - .expect("Expected to find a proposed block in the StackerDB"); + .expect("Expected to find a proposed block in the StackerDB") + .0; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); @@ -3929,3 +3937,498 @@ fn check_block_heights() { run_loop_thread.join().unwrap(); } + +use stacks_signer::chainstate::SortitionsView; + +#[test] +#[ignore] +fn signer_chainstate() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 200; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * 20, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let mut sortitions_view = SortitionsView::new(); + + // query for prometheus metrics + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + assert!(res.contains(&expected_result)); + } + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted.clone()); + + let socket = naka_conf + .node + .rpc_bind + .to_socket_addrs() + .unwrap() + .next() + .unwrap(); + let signer_client = stacks_signer::client::StacksClient::new( + StacksPrivateKey::from_seed(&[0, 1, 2, 3]), + socket, + naka_conf + .connection_options + .block_proposal_token + .clone() + .unwrap_or("".into()), + false, + ); + + // there hasn't been a successful nakamoto sortition yet, so expect an error + assert!( + matches!( + sortitions_view.refresh_view(&signer_client).unwrap_err(), + ClientError::UnexpectedSortitionInfo + ), + "Sortitions view should fail to refresh if there are no successful nakamoto sortitions yet", + ); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let mut signer_db = + SignerDb::new(format!("{}/signer_db_path", naka_conf.node.working_dir)).unwrap(); + + // Mine some nakamoto tenures + // track the last tenure's first block and subsequent blocks so we can + // check that they get rejected by the sortitions_view + let mut last_tenures_proposals: Option<(StacksPublicKey, NakamotoBlock, Vec)> = + None; + // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals + let mut first_tenure_blocks: Option> = None; + for i in 0..5 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + sortitions_view.fresh = false; + sortitions_view.refresh_view(&signer_client).unwrap(); + + // check the prior tenure's proposals again, confirming that the sortitions_view + // will reject them. + if let Some((ref miner_pk, ref prior_tenure_first, ref prior_tenure_interims)) = + last_tenures_proposals + { + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, prior_tenure_first, miner_pk) + .unwrap(); + assert!( + !valid, + "Sortitions view should reject proposals from prior tenure" + ); + for block in prior_tenure_interims.iter() { + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, block, miner_pk) + .unwrap(); + assert!( + !valid, + "Sortitions view should reject proposals from prior tenure" + ); + } + } + + let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + + let valid = sortitions_view + .check_proposal(&signer_client, &signer_db, &proposal.0, &proposal.1) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); + signer_db + .insert_block(&BlockInfo { + block: proposal.0.clone(), + burn_block_height, + reward_cycle, + vote: None, + valid: Some(true), + nonce_request: None, + signed_over: true, + }) + .unwrap(); + + let before = proposals_submitted.load(Ordering::SeqCst); + + // submit a tx to trigger an intermediate block + let sender_nonce = i; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + let timer = Instant::now(); + while proposals_submitted.load(Ordering::SeqCst) <= before { + thread::sleep(Duration::from_millis(5)); + if timer.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for nakamoto miner to produce intermediate block"); + } + } + + // an intermediate block was produced. check the proposed block + let proposal_interim = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + + let valid = sortitions_view + .check_proposal( + &signer_client, + &signer_db, + &proposal_interim.0, + &proposal_interim.1, + ) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + // force the view to refresh and check again + + sortitions_view.fresh = false; + let valid = sortitions_view + .check_proposal( + &signer_client, + &signer_db, + &proposal_interim.0, + &proposal_interim.1, + ) + .unwrap(); + + assert!( + valid, + "Nakamoto integration test produced invalid block proposal" + ); + + signer_db + .insert_block(&BlockInfo { + block: proposal_interim.0.clone(), + burn_block_height, + reward_cycle, + vote: None, + valid: Some(true), + nonce_request: None, + signed_over: true, + }) + .unwrap(); + + if first_tenure_blocks.is_none() { + first_tenure_blocks = Some(vec![proposal.0.clone(), proposal_interim.0.clone()]); + } + last_tenures_proposals = Some((proposal.1, proposal.0, vec![proposal_interim.0])); + } + + // now we'll check some specific cases of invalid proposals + // Case: the block doesn't confirm the prior blocks that have been signed. + let last_tenure = &last_tenures_proposals.as_ref().unwrap().1.clone(); + let last_tenure_header = &last_tenure.header; + let miner_sk = naka_conf.miner.mining_key.clone().unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: last_tenure_header.chain_length, + burn_spent: last_tenure_header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: last_tenure_header.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + signer_bitvec: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header, + txs: vec![], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but blocks have already + // been signed in this tenure + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: last_tenure_header.chain_length, + burn_spent: last_tenure_header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: last_tenure_header.parent_block_id.clone(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + signer_bitvec: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header, + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange( + last_tenure.get_tenure_change_tx_payload().unwrap().clone(), + ), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but it doesn't confirm all the blocks of the parent tenure + let reorg_to_block = first_tenure_blocks.as_ref().unwrap().first().unwrap(); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: reorg_to_block.header.chain_length + 1, + burn_spent: reorg_to_block.header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: reorg_to_block.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + signer_bitvec: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header.clone(), + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), + burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + previous_tenure_end: reorg_to_block.block_id(), + previous_tenure_blocks: 1, + cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&miner_pk), + }), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + // Case: the block contains a tenure change, but the parent tenure is a reorg + let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); + // make the sortition_view *think* that our block commit pointed at this old tenure + sortitions_view + .cur_sortition + .as_mut() + .map(|sortition_state| { + sortition_state.parent_tenure_id = reorg_to_block.header.consensus_hash.clone() + }); + let mut sibling_block_header = NakamotoBlockHeader { + version: 1, + chain_length: reorg_to_block.header.chain_length + 1, + burn_spent: reorg_to_block.header.burn_spent, + consensus_hash: last_tenure_header.consensus_hash.clone(), + parent_block_id: reorg_to_block.block_id(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature([0; 65]), + signer_signature: Vec::new(), + signer_bitvec: BitVec::ones(1).unwrap(), + }; + sibling_block_header.sign_miner(&miner_sk).unwrap(); + + let sibling_block = NakamotoBlock { + header: sibling_block_header.clone(), + txs: vec![ + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), + burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + previous_tenure_end: reorg_to_block.block_id(), + previous_tenure_blocks: 1, + cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&miner_pk), + }), + }, + last_tenure.txs[1].clone(), + ], + }; + + assert!( + !sortitions_view + .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .unwrap(), + "A sibling of a previously approved block must be rejected." + ); + + sortitions_view.fresh = false; + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From 401d2d3814f1fffd0d1252176805431a4989902f Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 20 May 2024 10:35:59 -0700 Subject: [PATCH 0048/1400] crc: docstring, dead_code macro --- stackslib/src/chainstate/nakamoto/test_signers.rs | 1 + testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index a7e521c155..50a8e063a6 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -245,6 +245,7 @@ impl TestSigners { /// Sign a Nakamoto block using the aggregate key. /// NB: this function is current unused. + #[allow(dead_code)] fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { let mut rng = rand_core::OsRng::default(); let msg = block.header.signer_signature_hash().0; diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 49204166ab..c0f42e7820 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -576,6 +576,10 @@ impl SignCoordinator { )) } + /// Start gathering signatures for a Nakamoto block. + /// This function begins by sending a `BlockProposal` message + /// to the signers, and then waits for the signers to respond + /// with their signatures. pub fn begin_sign_v0( &mut self, block: &NakamotoBlock, @@ -653,7 +657,7 @@ impl SignCoordinator { continue; } - // TODO: get messages from signers + // TODO: get messages from signers (#4775) } Err(NakamotoNodeError::SignerSignatureError( From 650c86cf8925a69f37432ea45b9f97522c7fc13d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 20 May 2024 16:59:36 -0700 Subject: [PATCH 0049/1400] feat: gather v0 block signatures from stackerdb --- .github/workflows/bitcoin-tests.yml | 1 + .../stacks-node/src/nakamoto_node/miner.rs | 4 +- .../src/nakamoto_node/sign_coordinator.rs | 168 ++++++++++++++++-- testnet/stacks-node/src/tests/signer/mod.rs | 71 ++++++-- testnet/stacks-node/src/tests/signer/v0.rs | 86 ++++++++- testnet/stacks-node/src/tests/signer/v1.rs | 7 +- 6 files changed, 294 insertions(+), 43 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 87fe5a8f09..74b4074b8e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -83,6 +83,7 @@ jobs: - tests::nakamoto_integrations::follower_bootup - tests::nakamoto_integrations::forked_tenure_is_ignored - tests::signer::v0::block_proposal_rejection + - tests::signer::v0::miner_gather_signatures - tests::signer::v1::dkg - tests::signer::v1::sign_request_rejected - tests::signer::v1::filter_bad_transactions diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ed642c9cd..29631cdec0 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -310,7 +310,7 @@ impl BlockMinerThread { &reward_set, reward_cycle, miner_privkey_as_scalar, - aggregate_public_key, + Some(aggregate_public_key), &stackerdbs, &self.config, ) @@ -395,7 +395,7 @@ impl BlockMinerThread { &reward_set, reward_cycle, miner_privkey_as_scalar, - Point::new(), + None, &stackerdbs, &self.config, ) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index c0f42e7820..3cf1c6d144 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -13,17 +13,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::BTreeMap; use std::sync::mpsc::Receiver; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; -use libsigner::v0::messages::SignerMessage as SignerMessageV0; +use libsigner::v0::messages::{BlockResponse, SignerMessage as SignerMessageV0}; use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; @@ -65,6 +66,8 @@ pub struct SignCoordinator { is_mainnet: bool, miners_session: StackerDBSession, signing_round_timeout: Duration, + signer_entries: HashMap, + weight_threshold: u32, pub next_signer_bitvec: BitVec<4000>, } @@ -122,6 +125,7 @@ impl NakamotoSigningParams { } } +#[allow(dead_code)] fn get_signer_commitments( is_mainnet: bool, reward_set: &[NakamotoSignerEntry], @@ -196,9 +200,10 @@ impl SignCoordinator { reward_set: &RewardSet, reward_cycle: u64, message_key: Scalar, - aggregate_public_key: Point, + aggregate_public_key: Option, stackerdb_conn: &StackerDBs, config: &Config, + // v1: bool, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { @@ -250,6 +255,32 @@ impl SignCoordinator { ..Default::default() }; + let total_weight = + reward_set_signers + .iter() + .cloned() + .map(|s| s.weight) + .fold(0, |w, acc| { + acc.checked_add(w) + .expect("FATAL: Total signer weight > u32::MAX") + }); + + let threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; + + let signer_public_keys = reward_set_signers + .iter() + .cloned() + .enumerate() + .map(|(idx, signer)| { + let Ok(slot_id) = u32::try_from(idx) else { + return Err(ChainstateError::InvalidStacksBlock( + "Signer index exceeds u32".into(), + )); + }; + Ok((slot_id, signer)) + }) + .collect::, ChainstateError>>()?; + let mut coordinator: FireCoordinator = FireCoordinator::new(coord_config); #[cfg(test)] { @@ -272,25 +303,31 @@ impl SignCoordinator { miners_session, signing_round_timeout: config.miner.wait_on_signers.clone(), next_signer_bitvec, + signer_entries: signer_public_keys, + weight_threshold: threshold, }; - sign_coordinator - .coordinator - .set_aggregate_public_key(Some(aggregate_public_key)); + if let Some(aggregate_public_key) = aggregate_public_key { + sign_coordinator + .coordinator + .set_aggregate_public_key(Some(aggregate_public_key)); + } return Ok(sign_coordinator); } } - let party_polynomials = get_signer_commitments( - is_mainnet, - reward_set_signers.as_slice(), - stackerdb_conn, - reward_cycle, - &aggregate_public_key, - )?; - if let Err(e) = coordinator - .set_key_and_party_polynomials(aggregate_public_key.clone(), party_polynomials) - { - warn!("Failed to set a valid set of party polynomials"; "error" => %e); - }; + if let Some(aggregate_public_key) = aggregate_public_key { + let party_polynomials = get_signer_commitments( + is_mainnet, + reward_set_signers.as_slice(), + stackerdb_conn, + reward_cycle, + &aggregate_public_key, + )?; + if let Err(e) = coordinator + .set_key_and_party_polynomials(aggregate_public_key.clone(), party_polynomials) + { + warn!("Failed to set a valid set of party polynomials"; "error" => %e); + }; + } let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); if replaced_other { @@ -306,6 +343,8 @@ impl SignCoordinator { miners_session, signing_round_timeout: config.miner.wait_on_signers.clone(), next_signer_bitvec, + signer_entries: signer_public_keys, + weight_threshold: threshold, }) } @@ -606,6 +645,9 @@ impl SignCoordinator { }; let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); + debug!("Sending block proposal message to signers"; + "signer_signature_hash" => ?&block.header.signer_signature_hash().0, + ); Self::send_signers_message::( &self.message_key, sortdb, @@ -636,6 +678,13 @@ impl SignCoordinator { )); }; + let mut total_weight_signed: u32 = 0; + let mut gathered_signatures = BTreeMap::new(); + + info!("SignCoordinator: beginning to watch for block signatures."; + "threshold" => self.weight_threshold, + ); + let start_ts = Instant::now(); while start_ts.elapsed() <= self.signing_round_timeout { let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { @@ -657,7 +706,88 @@ impl SignCoordinator { continue; } - // TODO: get messages from signers (#4775) + let modified_slots = &event.modified_slots.clone(); + + // Update `next_signers_bitvec` with the slots that were modified in the event + modified_slots.iter().for_each(|chunk| { + if let Ok(slot_id) = chunk.slot_id.try_into() { + match &self.next_signer_bitvec.set(slot_id, true) { + Err(e) => { + warn!("Failed to set bitvec for next signer: {e:?}"); + } + _ => (), + }; + } else { + error!("FATAL: slot_id greater than u16, which should never happen."); + } + }); + + let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { + warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); + }) else { + continue; + }; + let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { + debug!("Received signer event other than a signer message. Ignoring."); + continue; + }; + if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { + debug!("Received signer event for other reward cycle. Ignoring."); + continue; + }; + let slot_ids = modified_slots + .iter() + .map(|chunk| chunk.slot_id) + .collect::>(); + + debug!("SignCoordinator: Received messages from signers"; + "count" => messages.len(), + "slot_ids" => ?slot_ids, + "threshold" => self.weight_threshold + ); + + for (message, slot_id) in messages.into_iter().zip(slot_ids) { + match message { + SignerMessageV0::BlockResponse(BlockResponse::Accepted(( + response_hash, + signature, + ))) => { + let block_sighash = block.header.signer_signature_hash(); + if block_sighash != response_hash { + warn!( + "Processed signature but didn't validate over the expected block. Returning error."; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + debug!("SignCoordinator: Total weight signed: {total_weight_signed}"); + gathered_signatures.insert(slot_id, signature); + } + SignerMessageV0::BlockResponse(BlockResponse::Rejected(_)) => { + debug!("Received rejected block response. Ignoring."); + } + SignerMessageV0::BlockProposal(_) => { + debug!("Received block proposal message. Ignoring."); + } + } + } + + // After gathering all signatures, return them if we've hit the threshold + if total_weight_signed >= self.weight_threshold { + info!("SignCoordinator: Received enough signatures. Continuing."); + return Ok(gathered_signatures.values().cloned().collect()); + } } Err(NakamotoNodeError::SignerSignatureError( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index d40073bcbc..31d2dabc11 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -38,10 +38,11 @@ use clarity::boot_util::boot_code_id; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::stacks::boot::SIGNERS_NAME; +use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::BlockValidateResponse; +use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; @@ -243,7 +244,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest MinedNakamotoBlockEvent { let new_block = self.mine_nakamoto_block(timeout); let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block(&signer_sighash, timeout); + let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); new_block } @@ -275,15 +276,51 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ThresholdSignature { + let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); + let signer_signature_hex = block_obj.get("signer_signature").unwrap().as_str().unwrap(); + let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); + let signer_signature = + ThresholdSignature::consensus_deserialize(&mut signer_signature_bytes.as_slice()) + .unwrap(); + signer_signature + } + + /// Wait for a confirmed block and return a list of individual + /// signer signatures + fn wait_for_confirmed_block_v0( + &mut self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + ) -> Vec { + let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); + block_obj + .get("signer_signature") + .unwrap() + .as_array() + .expect("Expected signer_signature to be an array") + .iter() + .cloned() + .map(serde_json::from_value::) + .collect::, _>>() + .expect("Unable to deserialize array of MessageSignature") + } + + /// Wait for a confirmed block and return a list of individual + /// signer signatures + fn wait_for_confirmed_block_with_hash( + &mut self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + ) -> serde_json::Map { let t_start = Instant::now(); while t_start.elapsed() <= timeout { let blocks = test_observer::get_blocks(); - if let Some(signature) = blocks.iter().find_map(|block_json| { + if let Some(block) = blocks.iter().find_map(|block_json| { let block_obj = block_json.as_object().unwrap(); let sighash = block_obj // use the try operator because non-nakamoto blocks @@ -294,16 +331,9 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest PublicKeys { - let entries = self - .stacks_client - .get_reward_set_signers(reward_cycle) - .unwrap() - .unwrap(); + let entries = self.get_reward_set_signers(reward_cycle); let entries = SignerEntries::parse(false, &entries).unwrap(); entries.public_keys } + /// Get the signers for the given reward cycle + pub fn get_reward_set_signers(&self, reward_cycle: u64) -> Vec { + self.stacks_client + .get_reward_set_signers(reward_cycle) + .unwrap() + .unwrap() + } + #[allow(dead_code)] fn get_signer_metrics(&self) -> String { #[cfg(feature = "monitoring_prom")] diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7d7bebcec5..55115c5f18 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -14,6 +14,7 @@ // along with this program. If not, see . use std::env; +use std::sync::atomic::Ordering; use std::time::Duration; use libsigner::v0::messages::{ @@ -25,6 +26,8 @@ use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::codec::StacksMessageCodec; use stacks::libstackerdb::StackerDBChunkData; use stacks::types::chainstate::StacksPrivateKey; +use stacks::types::PublicKey; +use stacks::util::secp256k1::Secp256k1PublicKey; use stacks::util_lib::boot::boot_code_id; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; @@ -33,7 +36,7 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::tests::nakamoto_integrations::boot_to_epoch_3_reward_set; +use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; use crate::tests::neon_integrations::next_block_and_wait; use crate::BurnchainController; @@ -96,6 +99,27 @@ impl SignerTest { debug!("Singers initialized"); self.run_until_epoch_3_boundary(); + + let (vrfs_submitted, commits_submitted) = ( + self.running_nodes.vrfs_submitted.clone(), + self.running_nodes.commits_submitted.clone(), + ); + info!("Submitting 1 BTC block for miner VRF key registration"); + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + info!("Successfully triggered first block to wake up the miner runloop."); + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + info!("Ready to mine Nakamoto blocks!"); } } @@ -212,3 +236,63 @@ fn block_proposal_rejection() { } signer_test.shutdown(); } + +// Basic test to ensure that miners are able to gather block responses +// from signers and create blocks. +#[test] +#[ignore] +fn miner_gather_signatures() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers); + signer_test.boot_to_epoch_3(); + let timeout = Duration::from_secs(30); + + info!("------------------------- Try mining one block -------------------------"); + signer_test.mine_nakamoto_block(timeout); + + // Verify that the signers accepted the proposed block, sending back a validate ok response + let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(timeout); + let message = proposed_signer_signature_hash.0; + + info!("------------------------- Test Block Signed -------------------------"); + // Verify that the signers signed the proposed block + let signature = + signer_test.wait_for_confirmed_block_v0(&proposed_signer_signature_hash, timeout); + + info!("Got {} signatures", signature.len()); + + assert_eq!(signature.len(), num_signers); + + let reward_cycle = signer_test.get_current_reward_cycle(); + let signers = signer_test.get_reward_set_signers(reward_cycle); + + // Verify that the signers signed the proposed block + + let all_signed = signers.iter().zip(signature).all(|(signer, signature)| { + let stacks_public_key = Secp256k1PublicKey::from_slice(signer.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + + // let valid = stacks_public_key.verify(message, signature); + let valid = stacks_public_key + .verify(&message, &signature) + .expect("Failed to verify signature"); + if !valid { + error!( + "Failed to verify signature for signer: {:?}", + stacks_public_key + ); + } + valid + }); + assert!(all_signed); +} diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 66855514e7..30f499caae 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -879,7 +879,8 @@ fn block_proposal() { info!("------------------------- Test Block Signed -------------------------"); // Verify that the signers signed the proposed block - let signature = signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, timeout); + let signature = + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, timeout); assert!(signature .0 .verify(&key, proposed_signer_signature_hash.as_bytes())); @@ -1098,7 +1099,7 @@ fn sign_after_signer_reboot() { signer_test.mine_nakamoto_block(timeout); let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); let signature = - signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); assert!( signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), @@ -1119,7 +1120,7 @@ fn sign_after_signer_reboot() { let last_block = signer_test.mine_nakamoto_block(timeout); let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); let frost_signature = - signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); + signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); // Check that the latest block's bitvec is all 1's assert_eq!( From 6cfbb17664d9d4ad7a50a0a624bebd4c603bb092 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 20 May 2024 14:30:16 -0400 Subject: [PATCH 0050/1400] refactor: implement `BurnStateDB` for `SortitionHandleConn` Use `SortitionHandleConn` instead of `SortitionDBConn`. This change required propagation through many locations. --- stackslib/src/chainstate/burn/db/sortdb.rs | 15 ++- stackslib/src/chainstate/coordinator/mod.rs | 4 +- stackslib/src/chainstate/coordinator/tests.rs | 64 ++++----- .../chainstate/nakamoto/coordinator/mod.rs | 4 +- stackslib/src/chainstate/nakamoto/miner.rs | 10 +- .../src/chainstate/nakamoto/tests/mod.rs | 2 +- .../src/chainstate/nakamoto/tests/node.rs | 12 +- stackslib/src/chainstate/stacks/boot/mod.rs | 50 +++---- .../src/chainstate/stacks/boot/pox_2_tests.rs | 10 +- .../src/chainstate/stacks/boot/pox_3_tests.rs | 40 +++--- .../src/chainstate/stacks/boot/pox_4_tests.rs | 42 +++--- .../chainstate/stacks/boot/signers_tests.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 12 +- .../src/chainstate/stacks/db/unconfirmed.rs | 92 +++++++------ stackslib/src/chainstate/stacks/miner.rs | 14 +- .../src/chainstate/stacks/tests/accounting.rs | 66 ++++----- .../stacks/tests/block_construction.rs | 76 ++++++----- .../stacks/tests/chain_histories.rs | 32 ++--- stackslib/src/chainstate/stacks/tests/mod.rs | 8 +- stackslib/src/clarity_vm/database/mod.rs | 17 ++- .../src/clarity_vm/tests/epoch_switch.rs | 2 +- stackslib/src/core/mempool.rs | 2 +- stackslib/src/main.rs | 6 +- stackslib/src/net/api/callreadonly.rs | 96 ++++++------- stackslib/src/net/api/getaccount.rs | 126 +++++++++--------- stackslib/src/net/api/getconstantval.rs | 32 +++-- stackslib/src/net/api/getcontractabi.rs | 20 +-- stackslib/src/net/api/getcontractsrc.rs | 50 +++---- stackslib/src/net/api/getdatavar.rs | 44 +++--- .../src/net/api/getistraitimplemented.rs | 56 ++++---- stackslib/src/net/api/getmapentry.rs | 58 ++++---- stackslib/src/net/api/getpoxinfo.rs | 2 +- stackslib/src/net/api/postblock_proposal.rs | 4 +- stackslib/src/net/api/tests/mod.rs | 10 +- stackslib/src/net/mod.rs | 4 +- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/relay.rs | 42 +++--- stackslib/src/net/stackerdb/config.rs | 2 +- stackslib/src/net/tests/download/epoch2x.rs | 6 +- .../stacks-node/src/nakamoto_node/miner.rs | 38 +++--- testnet/stacks-node/src/neon_node.rs | 8 +- testnet/stacks-node/src/node.rs | 2 +- testnet/stacks-node/src/run_loop/helium.rs | 16 +-- testnet/stacks-node/src/tenure.rs | 4 +- testnet/stacks-node/src/tests/epoch_21.rs | 4 +- testnet/stacks-node/src/tests/epoch_22.rs | 4 +- testnet/stacks-node/src/tests/epoch_24.rs | 4 +- .../src/tests/nakamoto_integrations.rs | 18 +-- .../src/tests/neon_integrations.rs | 21 ++- 49 files changed, 680 insertions(+), 575 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..eb49daa50a 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2595,6 +2595,19 @@ impl<'a> SortitionHandleConn<'a> { } } } + + pub fn get_reward_set_payouts_at( + &self, + sortition_id: &SortitionId, + ) -> Result<(Vec, u128), db_error> { + let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; + let args: &[&dyn ToSql] = &[sortition_id]; + let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; + + let pox_addrs: (Vec, u128) = + serde_json::from_str(&pox_addrs_json).expect("FATAL: failed to decode pox payout JSON"); + Ok(pox_addrs) + } } // Connection methods @@ -2616,7 +2629,7 @@ impl SortitionDB { Ok(index_tx) } - /// Make an indexed connectino + /// Make an indexed connection pub fn index_conn<'a>(&'a self) -> SortitionDBConn<'a> { SortitionDBConn::new( &self.marf, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 96eae44641..2836ec7b4c 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -3274,7 +3274,7 @@ impl< if let Some(ref mut estimator) = self.cost_estimator { let stacks_epoch = self .sortition_db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); estimator.notify_block( @@ -3288,7 +3288,7 @@ impl< if let Some(ref mut estimator) = self.fee_estimator { let stacks_epoch = self .sortition_db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Err(e) = diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index d5073c8f85..a76f047725 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -667,7 +667,7 @@ fn make_genesis_block_with_recipients( ) .unwrap(); - let iconn = sort_db.index_conn(); + let iconn = sort_db.index_handle_at_tip(); let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn, true).unwrap(); let ast_rules = miner_epoch_info.ast_rules.clone(); let mut epoch_tx = builder @@ -922,7 +922,7 @@ fn make_stacks_block_with_input( let total_burn = parents_sortition.total_burn; - let iconn = sort_db.index_conn(); + let iconn = sort_db.index_handle_at_tip(); let mut builder = StacksBlockBuilder::make_regtest_block_builder( burnchain, @@ -1286,7 +1286,7 @@ fn missed_block_commits_2_05() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -1636,7 +1636,7 @@ fn missed_block_commits_2_1() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -1981,7 +1981,7 @@ fn late_block_commits_2_1() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2154,7 +2154,7 @@ fn test_simple_setup() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2464,7 +2464,7 @@ fn test_sortition_with_reward_set() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2709,7 +2709,7 @@ fn test_sortition_with_burner_reward_set() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -2916,7 +2916,7 @@ fn test_pox_btc_ops() { let mut chainstate = get_chainstate(path); let (stacker_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3001,7 +3001,7 @@ fn test_pox_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3219,7 +3219,7 @@ fn test_stx_transfer_btc_ops() { let mut chainstate = get_chainstate(path); let (sender_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3234,7 +3234,7 @@ fn test_stx_transfer_btc_ops() { let (recipient_balance, burn_height) = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -3348,7 +3348,7 @@ fn test_stx_transfer_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3691,13 +3691,13 @@ fn test_delegate_stx_btc_ops() { ); let first_delegation_info = get_delegation_info_pox_2( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, &first_del, ); let second_delegation_info = get_delegation_info_pox_2( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, &second_del, ); @@ -3744,7 +3744,7 @@ fn test_delegate_stx_btc_ops() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -3988,7 +3988,7 @@ fn test_initial_coinbase_reward_distributions() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -4156,7 +4156,7 @@ fn test_epoch_switch_cost_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4176,7 +4176,7 @@ fn test_epoch_switch_cost_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4193,7 +4193,7 @@ fn test_epoch_switch_cost_contract_instantiation() { // check that costs-2 contract DNE before epoch 2.05, and that it does exist after let does_costs_2_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4360,7 +4360,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4381,7 +4381,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4398,7 +4398,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { // check that pox-2 contract DNE before epoch 2.1, and that it does exist after let does_pox_2_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4569,7 +4569,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn.with_clarity_db_readonly(|db| db .get_stacks_epoch(burn_block_height as u32) @@ -4590,7 +4590,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4607,7 +4607,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { // check that pox-3 contract DNE before epoch 2.4, and that it does exist after let does_pox_3_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| { @@ -4855,7 +4855,7 @@ fn atlas_stop_start() { // check that the bns contract exists let does_bns_contract_exist = chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_clarity_db_readonly(|db| db.get_contract(&boot_code_id("bns", false))) @@ -5180,7 +5180,7 @@ fn test_epoch_verify_active_pox_contract() { // Query the pox.clar contract to ensure the total stacked amount is as expected let amount_locked_pox_1_res = get_total_stacked_info( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, curr_reward_cycle, false, @@ -5214,7 +5214,7 @@ fn test_epoch_verify_active_pox_contract() { // Query the pox-2.clar contract to ensure the total stacked amount is as expected let amount_locked_pox_2_res = get_total_stacked_info( &mut chainstate, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &parent_tip, curr_reward_cycle, true, @@ -5516,7 +5516,7 @@ fn test_sortition_with_sunset() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -5864,7 +5864,7 @@ fn test_sortition_with_sunset_and_epoch_switch() { assert_eq!( chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| conn .with_readonly_clarity_env( @@ -6774,7 +6774,7 @@ fn eval_at_chain_tip(chainstate_path: &str, sort_db: &SortitionDB, eval: &str) - let mut chainstate = get_chainstate(chainstate_path); chainstate .with_read_only_clarity_tx( - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), |conn| { conn.with_readonly_clarity_env( diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80..abb89e1839 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -656,7 +656,7 @@ impl< if let Some(ref mut estimator) = self.cost_estimator { let stacks_epoch = self .sortition_db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); estimator.notify_block( @@ -670,7 +670,7 @@ impl< if let Some(ref mut estimator) = self.fee_estimator { let stacks_epoch = self .sortition_db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index ab9ae6a5f9..da1a7af570 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -41,7 +41,9 @@ use stacks_common::util::hash::{Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use crate::burnchains::{PrivateKey, PublicKey}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::nakamoto::{ @@ -221,7 +223,7 @@ impl NakamotoBlockBuilder { pub fn load_tenure_info<'a>( &self, chainstate: &'a mut StacksChainState, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, cause: Option, ) -> Result, Error> { debug!("Nakamoto miner tenure begin"); @@ -297,7 +299,7 @@ impl NakamotoBlockBuilder { /// yet known). pub fn tenure_begin<'a, 'b>( &mut self, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerTenureInfo<'a>, ) -> Result, Error> { let SetupBlockResult { @@ -396,7 +398,7 @@ impl NakamotoBlockBuilder { pub fn build_nakamoto_block( // not directly used; used as a handle to open other chainstates chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mempool: &mut MemPoolDB, // Stacks header we're building off of. parent_stacks_header: &StacksHeaderInfo, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index f8d048aaf1..3cd7d2dde5 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -128,7 +128,7 @@ pub fn get_account( chainstate .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &tip.index_block_hash(), |clarity_conn| { StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index b2b275a0e1..ab338cc90e 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -560,9 +560,13 @@ impl TestStacksNode { tenure_change = None; coinbase = None; - let (mut nakamoto_block, size, cost) = - Self::make_nakamoto_block_from_txs(builder, chainstate, &sortdb.index_conn(), txs) - .unwrap(); + let (mut nakamoto_block, size, cost) = Self::make_nakamoto_block_from_txs( + builder, + chainstate, + &sortdb.index_handle_at_tip(), + txs, + ) + .unwrap(); miner.sign_nakamoto_block(&mut nakamoto_block); let tenure_sn = @@ -638,7 +642,7 @@ impl TestStacksNode { pub fn make_nakamoto_block_from_txs( mut builder: NakamotoBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mut txs: Vec, ) -> Result<(NakamotoBlock, u64, ExecutionCost), ChainstateError> { use clarity::vm::ast::ASTRules; diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index d3e8a494de..e6e02eaab5 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -569,7 +569,7 @@ impl StacksChainState { boot_contract_name: &str, code: &str, ) -> Result { - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let dbconn = self.state_index.sqlite_conn(); self.clarity_state .eval_read_only( @@ -631,7 +631,7 @@ impl StacksChainState { let cost_track = LimitedCostTracker::new_free(); let sender = PrincipalData::Standard(StandardPrincipalData::transient()); let result = self - .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { + .maybe_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |clarity_tx| { clarity_tx.with_readonly_clarity_env( mainnet, chain_id, @@ -1668,7 +1668,7 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let value = peer.chainstate().clarity_eval_read_only( &iconn, &stacks_block_id, @@ -1696,7 +1696,7 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let value = peer.chainstate().clarity_eval_read_only( &iconn, &stacks_block_id, @@ -1842,9 +1842,11 @@ pub mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_account(clarity_tx, addr) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_account(clarity_tx, addr), + ) .unwrap() }); account @@ -1856,9 +1858,11 @@ pub mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_contract(clarity_tx, addr).unwrap() - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_contract(clarity_tx, addr).unwrap(), + ) .unwrap() }); contract_opt @@ -2799,7 +2803,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -2926,7 +2930,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3012,7 +3016,7 @@ pub mod test { let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -3118,7 +3122,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3229,7 +3233,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3447,7 +3451,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3705,7 +3709,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3980,7 +3984,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4224,7 +4228,7 @@ pub mod test { let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -4397,7 +4401,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4696,7 +4700,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -5277,7 +5281,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -5646,7 +5650,7 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); - let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); + let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_handle_at_tip(), block_txs).unwrap(); if tenure_id == 2 { // block should be all the transactions diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 2c47f0ec0b..b5f1859355 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -667,7 +667,7 @@ where { with_sortdb(peer, |ref mut c, ref sortdb| { let headers_db = HeadersDBConn(c.state_index.sqlite_conn()); - let burn_db = sortdb.index_conn(); + let burn_db = sortdb.index_handle_at_tip(); let mut read_only_clar = c .clarity_state .read_only_connection(tip, &headers_db, &burn_db); @@ -3794,7 +3794,7 @@ fn test_get_pox_addrs() { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3896,7 +3896,7 @@ fn test_get_pox_addrs() { let addrs_and_payout = with_sortdb(&mut peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate .maybe_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &tip_index_block, |clarity_tx| { clarity_tx @@ -4091,7 +4091,7 @@ fn test_stack_with_segwit() { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -4193,7 +4193,7 @@ fn test_stack_with_segwit() { let addrs_and_payout = with_sortdb(&mut peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate .maybe_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &tip_index_block, |clarity_tx| { clarity_tx diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index f0c7a9ef75..3134b4773a 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -3348,24 +3348,28 @@ fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip_index_block, |clarity_tx| { - clarity_tx - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::Standard(StandardPrincipalData::transient()), - None, - LimitedCostTracker::new_free(), - |env| { - env.eval_read_only( - &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), - ) - }, - ) - .unwrap() - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip_index_block, + |clarity_tx| { + clarity_tx + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::Standard(StandardPrincipalData::transient()), + None, + LimitedCostTracker::new_free(), + |env| { + env.eval_read_only( + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), + ) + }, + ) + .unwrap() + }, + ) .unwrap(); addrs }) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index be7675c700..16143c98f2 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -833,24 +833,28 @@ fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { let burn_height = tip.block_height - 1; let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { let addrs = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip_index_block, |clarity_tx| { - clarity_tx - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::Standard(StandardPrincipalData::transient()), - None, - LimitedCostTracker::new_free(), - |env| { - env.eval_read_only( - &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), - ) - }, - ) - .unwrap() - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip_index_block, + |clarity_tx| { + clarity_tx + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::Standard(StandardPrincipalData::transient()), + None, + LimitedCostTracker::new_free(), + |env| { + env.eval_read_only( + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), + ) + }, + ) + .unwrap() + }, + ) .unwrap(); addrs }) @@ -2945,7 +2949,7 @@ fn verify_signer_key_sig( ) -> Value { let result: Value = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &latest_block, |clarity_tx| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &latest_block, |clarity_tx| { clarity_tx .with_readonly_clarity_env( false, diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index a97a0c1e09..67fffd878a 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -483,7 +483,7 @@ pub fn readonly_call_with_sortdb( args: Vec, ) -> Value { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), tip, |connection| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |connection| { connection .with_readonly_clarity_env( false, diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index dd70fcfb01..53e71b1321 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -10263,7 +10263,7 @@ pub mod test { ); let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -10515,7 +10515,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11071,7 +11071,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11236,7 +11236,7 @@ pub mod test { let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); let account = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &tip_hash, |conn| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &tip_hash, |conn| { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); @@ -11394,7 +11394,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -11919,7 +11919,7 @@ pub mod test { let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); let account = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &tip_hash, |conn| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &tip_hash, |conn| { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 92d32dd038..0e3ae3ae88 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -754,7 +754,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -777,7 +777,7 @@ mod test { // build 1-block microblock stream let microblocks = { let sortdb = peer.sortdb.take().unwrap(); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); peer.chainstate() .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) @@ -852,18 +852,21 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_unconfirmed_clarity_tx( + &sortdb.index_handle_at_tip(), + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap() .unwrap(); peer.sortdb = Some(sortdb); @@ -876,13 +879,17 @@ mod test { let sortdb = peer.sortdb.take().unwrap(); let confirmed_recv_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &canonical_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &canonical_tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap(); peer.sortdb = Some(sortdb); @@ -984,7 +991,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -1007,9 +1014,9 @@ mod test { // build microblock stream iteratively, and test balances at each additional microblock let sortdb = peer.sortdb.take().unwrap(); let microblocks = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let mut microblock_builder = StacksMicroblockBuilder::new( @@ -1083,18 +1090,21 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_unconfirmed_clarity_tx( + &sortdb.index_handle_at_tip(), + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap() .unwrap(); peer.sortdb = Some(sortdb); @@ -1110,13 +1120,17 @@ mod test { let sortdb = peer.sortdb.take().unwrap(); let confirmed_recv_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &canonical_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &canonical_tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }, + ) .unwrap(); peer.sortdb = Some(sortdb); @@ -1270,7 +1284,7 @@ mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx, anchored_tx], ) .unwrap(); @@ -1297,7 +1311,7 @@ mod test { Relayer::refresh_unconfirmed(&mut inner_node.chainstate, &mut sortdb); let microblock = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut microblock_builder = StacksMicroblockBuilder::resume_unconfirmed( &mut inner_node.chainstate, &sort_iconn, @@ -1386,12 +1400,12 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); let db_recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_conn(), |clarity_tx| { + .with_read_only_unconfirmed_clarity_tx(&sortdb.index_handle_at_tip(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_stx_balance(&recv_addr.into()) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f718a9fb36..450060d3a8 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -39,7 +39,9 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use stacks_common::util::vrf::*; use crate::burnchains::{Burnchain, PrivateKey, PublicKey}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionDBConn, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::stacks::address::StacksAddressExtensions; @@ -1803,7 +1805,7 @@ impl StacksBlockBuilder { pub fn pre_epoch_begin<'a>( &mut self, chainstate: &'a mut StacksChainState, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, confirm_microblocks: bool, ) -> Result, Error> { debug!( @@ -1912,7 +1914,7 @@ impl StacksBlockBuilder { /// returned ClarityTx object. pub fn epoch_begin<'a, 'b>( &mut self, - burn_dbconn: &'a SortitionDBConn, + burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerEpochInfo<'a>, ) -> Result<(ClarityTx<'b, 'b>, ExecutionCost), Error> { let SetupBlockResult { @@ -1974,7 +1976,7 @@ impl StacksBlockBuilder { pub fn make_anchored_block_from_txs( builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost), Error> { Self::make_anchored_block_and_microblock_from_txs( @@ -1993,7 +1995,7 @@ impl StacksBlockBuilder { pub fn make_anchored_block_and_microblock_from_txs( mut builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mut txs: Vec, mut mblock_txs: Vec, ) -> Result<(StacksBlock, u64, ExecutionCost, Option), Error> { @@ -2385,7 +2387,7 @@ impl StacksBlockBuilder { /// returns the assembled block, and the consumed execution budget. pub fn build_anchored_block( chainstate_handle: &StacksChainState, // not directly used; used as a handle to open other chainstates - burn_dbconn: &SortitionDBConn, + burn_dbconn: &SortitionHandleConn, mempool: &mut MemPoolDB, parent_stacks_header: &StacksHeaderInfo, // Stacks header we're building off of total_burn: u64, // the burn so far on the burnchain (i.e. from the last burnchain block) diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 8d65e40a4e..69dddd742c 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -228,7 +228,7 @@ fn test_bad_microblock_fees_pre_v210() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -551,7 +551,7 @@ fn test_bad_microblock_fees_fix_transition() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -907,7 +907,7 @@ fn test_get_block_info_v210() { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1029,7 +1029,7 @@ fn test_get_block_info_v210() { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1296,7 +1296,7 @@ fn test_get_block_info_v210_no_microblocks() { ) .unwrap(); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, @@ -1333,7 +1333,7 @@ fn test_get_block_info_v210_no_microblocks() { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1678,7 +1678,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { anchored_txs.push(stx_transfer); } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1803,7 +1803,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { peer .chainstate() .with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { let list_val = clarity_tx.with_readonly_clarity_env( @@ -1911,29 +1911,33 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { // reported correctly. let recipient_balance = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - let recipient_balance_val = clarity_tx - .with_readonly_clarity_env( - false, - CHAIN_ID_TESTNET, - ClarityVersion::Clarity2, - PrincipalData::parse("SP3Q4A5WWZ80REGBN0ZXNE540ECJ9JZ4A765Q5K2Q").unwrap(), - None, - LimitedCostTracker::new_free(), - |env| { - if pay_to_contract { - env.eval_raw(&format!( - "(stx-get-balance '{}.{})", - &addr_anchored, contract_name - )) - } else { - env.eval_raw(&format!("(stx-get-balance '{})", &addr_recipient)) - } - }, - ) - .unwrap(); - recipient_balance_val.expect_u128().unwrap() - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| { + let recipient_balance_val = clarity_tx + .with_readonly_clarity_env( + false, + CHAIN_ID_TESTNET, + ClarityVersion::Clarity2, + PrincipalData::parse("SP3Q4A5WWZ80REGBN0ZXNE540ECJ9JZ4A765Q5K2Q").unwrap(), + None, + LimitedCostTracker::new_free(), + |env| { + if pay_to_contract { + env.eval_raw(&format!( + "(stx-get-balance '{}.{})", + &addr_anchored, contract_name + )) + } else { + env.eval_raw(&format!("(stx-get-balance '{})", &addr_recipient)) + } + }, + ) + .unwrap(); + recipient_balance_val.expect_u128().unwrap() + }, + ) .unwrap(); // N.B. `stx-get-balance` will reflect one more block-reward than `get-block-info? diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index ae428af15f..9abd98acb1 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -130,7 +130,7 @@ fn test_build_anchored_blocks_empty() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -254,7 +254,7 @@ fn test_build_anchored_blocks_stx_transfers_single() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -391,7 +391,7 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -557,7 +557,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -712,7 +712,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let coinbase_tx = make_coinbase(miner, tenure_id); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -968,7 +968,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let coinbase_tx = make_coinbase(miner, tenure_id); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let (parent_mblock_stream, mblock_pubkey_hash) = { if tenure_id > 0 { chainstate @@ -1250,7 +1250,7 @@ fn test_build_anchored_blocks_incrementing_nonces() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1498,7 +1498,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1652,7 +1652,7 @@ fn test_build_anchored_blocks_multiple_chaintips() { StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), mempool_to_use, &parent_tip, tip.total_burn, @@ -1759,7 +1759,7 @@ fn test_build_anchored_blocks_empty_chaintips() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -1966,7 +1966,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2133,7 +2133,7 @@ fn test_build_anchored_blocks_invalid() { let coinbase_tx = make_coinbase(miner, tenure_id as usize); let mut anchored_block = StacksBlockBuilder::build_anchored_block( - chainstate, &sortdb.index_conn(), &mut mempool, &parent_tip, tip.total_burn, vrf_proof, Hash160([tenure_id as u8; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, &burnchain, + chainstate, &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, vrf_proof, Hash160([tenure_id as u8; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, &burnchain, ).unwrap(); if tenure_id == bad_block_tenure { @@ -2403,7 +2403,7 @@ fn test_build_anchored_blocks_bad_nonces() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2531,7 +2531,7 @@ fn test_build_microblock_stream_forks() { // produce the microblock stream for the parent, which this tenure's anchor // block will confirm. - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) @@ -2654,7 +2654,7 @@ fn test_build_microblock_stream_forks() { let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -2858,7 +2858,7 @@ fn test_build_microblock_stream_forks_with_descendants() { // produce the microblock stream for the parent, which this tenure's anchor // block will confirm. - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) @@ -3081,7 +3081,7 @@ fn test_build_microblock_stream_forks_with_descendants() { let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, parent_tip.anchored_header.as_stacks_epoch2().unwrap().total_work.burn + 1000, @@ -3186,15 +3186,19 @@ fn test_build_microblock_stream_forks_with_descendants() { test_debug!("Check {} in {} for report", &reporter_addr, &chain_tip); peer.with_db_state(|ref mut sortdb, ref mut chainstate, _, _| { chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - // the key at height 1 should be reported as poisoned - let report = StacksChainState::get_poison_microblock_report(clarity_tx, 1) - .unwrap() - .unwrap(); - assert_eq!(report.0, reporter_addr); - assert_eq!(report.1, seq); - Ok(()) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &chain_tip, + |clarity_tx| { + // the key at height 1 should be reported as poisoned + let report = StacksChainState::get_poison_microblock_report(clarity_tx, 1) + .unwrap() + .unwrap(); + assert_eq!(report.0, reporter_addr); + assert_eq!(report.1, seq); + Ok(()) + }, + ) .unwrap() }) .unwrap(); @@ -3659,7 +3663,7 @@ fn test_contract_call_across_clarity_versions() { } } - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); let builder = StacksBlockBuilder::make_block_builder( &burnchain, @@ -3700,7 +3704,7 @@ fn test_contract_call_across_clarity_versions() { let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); peer.chainstate().with_read_only_clarity_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &stacks_block_id, |clarity_tx| { for tenure_id in 1..num_blocks { @@ -3919,7 +3923,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), contract_spends_too_much_tx.clone()] ) { assert_eq!(txid, contract_spends_too_much_txid); @@ -4096,7 +4100,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), spend_too_much.clone()] ) { assert_eq!(txid, spend_too_much.txid()); @@ -4146,7 +4150,7 @@ fn test_is_tx_problematic() { let err = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), runtime_checkerror_problematic.clone()] ); @@ -4198,7 +4202,7 @@ fn test_is_tx_problematic() { if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), runtime_checkerror_problematic.clone()] ) { assert_eq!(txid, runtime_checkerror_problematic.txid()); @@ -4229,7 +4233,7 @@ fn test_is_tx_problematic() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -4392,7 +4396,7 @@ fn mempool_incorporate_pox_unlocks() { // this will be the height of the block that includes this new tenure let my_height = first_stacks_block_height + 1 + tenure_id; - let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_conn(), &parent_tip.index_block_hash(), |clarity_tx| { + let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &parent_tip.index_block_hash(), |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { let burn_block_height = db.get_current_burnchain_block_height().unwrap() as u64; let v1_unlock_height = db.get_v1_unlock_height(); @@ -4472,7 +4476,7 @@ fn mempool_incorporate_pox_unlocks() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, @@ -4612,7 +4616,7 @@ fn test_fee_order_mismatch_nonce_order() { let anchored_block = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, tip.total_burn, diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index cc2fe940b1..21671d00c0 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -150,7 +150,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -336,7 +336,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -483,7 +483,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -531,7 +531,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -820,7 +820,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -868,7 +868,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1085,7 +1085,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1134,7 +1134,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1433,7 +1433,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1478,7 +1478,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1680,7 +1680,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1728,7 +1728,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -1988,7 +1988,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2033,7 +2033,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2235,7 +2235,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); @@ -2283,7 +2283,7 @@ where builder.chain_tip.stacks_block_height, ); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 22a331b193..71d1a7c019 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1316,9 +1316,11 @@ pub fn get_stacks_account(peer: &mut TestPeer, addr: &PrincipalData) -> StacksAc let stacks_block_id = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); let acct = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - StacksChainState::get_account(clarity_tx, addr) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| StacksChainState::get_account(clarity_tx, addr), + ) .unwrap(); Ok(acct) }) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 410c59ba81..b15324f96f 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -428,14 +428,21 @@ impl SortitionDBRef for SortitionHandleTx<'_> { } } -impl SortitionDBRef for SortitionDBConn<'_> { +impl SortitionDBRef for SortitionHandleConn<'_> { fn get_pox_start_cycle_info( &self, sortition_id: &SortitionId, parent_stacks_block_burn_ht: u64, cycle_index: u64, ) -> Result, ChainstateError> { - let mut handle = self.as_handle(sortition_id); + let readonly_marf = self + .index + .reopen_readonly() + .expect("BUG: failure trying to get a read-only interface into the sortition db."); + let mut context = self.context.clone(); + context.chain_tip = sortition_id.clone(); + let mut handle = SortitionHandleConn::new(&readonly_marf, context); + get_pox_start_cycle_info(&mut handle, parent_stacks_block_burn_ht, cycle_index) } @@ -578,7 +585,7 @@ impl BurnStateDB for SortitionHandleTx<'_> { } } -impl BurnStateDB for SortitionDBConn<'_> { +impl BurnStateDB for SortitionHandleConn<'_> { fn get_tip_burn_block_height(&self) -> Option { let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; tip.block_height.try_into().ok() @@ -601,8 +608,6 @@ impl BurnStateDB for SortitionDBConn<'_> { height: u32, sortition_id: &SortitionId, ) -> Option { - let db_handle = SortitionHandleConn::open_reader(self, &sortition_id).ok()?; - let current_height = match self.get_burn_block_height(sortition_id) { None => { return None; @@ -614,7 +619,7 @@ impl BurnStateDB for SortitionDBConn<'_> { return None; } - match db_handle.get_block_snapshot_by_height(height as u64) { + match self.get_block_snapshot_by_height(height as u64) { Ok(Some(x)) => Some(x.burn_header_hash), _ => return None, } diff --git a/stackslib/src/clarity_vm/tests/epoch_switch.rs b/stackslib/src/clarity_vm/tests/epoch_switch.rs index af305f1055..25d01c4905 100644 --- a/stackslib/src/clarity_vm/tests/epoch_switch.rs +++ b/stackslib/src/clarity_vm/tests/epoch_switch.rs @@ -130,7 +130,7 @@ fn test_vm_epoch_switch() { // impl BurnStateDB for SortitionHandleConn { - let burndb = db.index_conn(); + let burndb = db.index_handle_at_tip(); test_burnstatedb_epoch(&burndb, start_height, end_height, 8, 12, 16); } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index a1135989a4..f3d301b0dc 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -322,7 +322,7 @@ impl MemPoolAdmitter { tx_size: u64, ) -> Result<(), MemPoolRejection> { chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &self.cur_consensus_hash, &self.cur_block, tx, diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index bd441cc023..9a643dd8b3 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -641,7 +641,7 @@ simulating a miner. let result = StacksBlockBuilder::build_anchored_block( &chain_state, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &mut mempool_db, &parent_header, chain_tip.total_burn, @@ -1179,7 +1179,7 @@ simulating a miner. // simulate the p2p refreshing itself // update p2p's read-only view of the unconfirmed state p2p_chainstate - .refresh_unconfirmed_state(&p2p_new_sortition_db.index_conn()) + .refresh_unconfirmed_state(&p2p_new_sortition_db.index_handle_at_tip()) .expect("Failed to open unconfirmed Clarity state"); sleep_ms(100); @@ -1522,7 +1522,7 @@ simulating a miner. let result = StacksBlockBuilder::build_anchored_block( &chain_state, - &sort_db.index_conn(), + &sort_db.index_handle_at_tip(), &mut mempool_db, &parent_header, chain_tip.total_burn, diff --git a/stackslib/src/net/api/callreadonly.rs b/stackslib/src/net/api/callreadonly.rs index eb07206772..dc24de1ae4 100644 --- a/stackslib/src/net/api/callreadonly.rs +++ b/stackslib/src/net/api/callreadonly.rs @@ -234,52 +234,56 @@ impl RPCRequestHandler for RPCCallReadOnlyRequestHandler { cost_limit.write_length = 0; cost_limit.write_count = 0; - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - let epoch = clarity_tx.get_epoch(); - let cost_track = clarity_tx - .with_clarity_db_readonly(|clarity_db| { - LimitedCostTracker::new_mid_block( - mainnet, chain_id, cost_limit, clarity_db, epoch, - ) - }) - .map_err(|_| { - ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) - })?; - - let clarity_version = clarity_tx - .with_analysis_db_readonly(|analysis_db| { - analysis_db.get_clarity_version(&contract_identifier) - }) - .map_err(|_| { - ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( - "{}", - &contract_identifier - ))) - })?; - - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - clarity_version, - sender, - sponsor, - cost_track, - |env| { - // we want to execute any function as long as no actual writes are made as - // opposed to be limited to purely calling `define-read-only` functions, - // so use `read_only = false`. This broadens the number of functions that - // can be called, and also circumvents limitations on `define-read-only` - // functions that can not use `contrac-call?`, even when calling other - // read-only functions - env.execute_contract( - &contract_identifier, - function.as_str(), - &args, - false, - ) - }, - ) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + let cost_track = clarity_tx + .with_clarity_db_readonly(|clarity_db| { + LimitedCostTracker::new_mid_block( + mainnet, chain_id, cost_limit, clarity_db, epoch, + ) + }) + .map_err(|_| { + ClarityRuntimeError::from(InterpreterError::CostContractLoadFailure) + })?; + + let clarity_version = clarity_tx + .with_analysis_db_readonly(|analysis_db| { + analysis_db.get_clarity_version(&contract_identifier) + }) + .map_err(|_| { + ClarityRuntimeError::from(CheckErrors::NoSuchContract(format!( + "{}", + &contract_identifier + ))) + })?; + + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + clarity_version, + sender, + sponsor, + cost_track, + |env| { + // we want to execute any function as long as no actual writes are made as + // opposed to be limited to purely calling `define-read-only` functions, + // so use `read_only = false`. This broadens the number of functions that + // can be called, and also circumvents limitations on `define-read-only` + // functions that can not use `contrac-call?`, even when calling other + // read-only functions + env.execute_contract( + &contract_identifier, + function.as_str(), + &args, + false, + ) + }, + ) + }, + ) }); // decode the response diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs index 83a39f3031..cbd4338ac6 100644 --- a/stackslib/src/net/api/getaccount.rs +++ b/stackslib/src/net/api/getaccount.rs @@ -146,76 +146,80 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let account_opt_res = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let key = ClarityDatabase::make_key_for_account_balance(&account); - let burn_block_height = - clarity_db.get_current_burnchain_block_height().ok()? as u64; - let v1_unlock_height = clarity_db.get_v1_unlock_height(); - let v2_unlock_height = clarity_db.get_v2_unlock_height().ok()?; - let v3_unlock_height = clarity_db.get_v3_unlock_height().ok()?; - let (balance, balance_proof) = if with_proof { - clarity_db - .get_data_with_proof::(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) - } else { - clarity_db - .get_data::(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| (STXBalance::zero(), None)) - }; + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let key = ClarityDatabase::make_key_for_account_balance(&account); + let burn_block_height = + clarity_db.get_current_burnchain_block_height().ok()? as u64; + let v1_unlock_height = clarity_db.get_v1_unlock_height(); + let v2_unlock_height = clarity_db.get_v2_unlock_height().ok()?; + let v3_unlock_height = clarity_db.get_v3_unlock_height().ok()?; + let (balance, balance_proof) = if with_proof { + clarity_db + .get_data_with_proof::(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) + } else { + clarity_db + .get_data::(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| (STXBalance::zero(), None)) + }; - let key = ClarityDatabase::make_key_for_account_nonce(&account); - let (nonce, nonce_proof) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| (0, Some("".into()))) - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| (0, None)) - }; + let key = ClarityDatabase::make_key_for_account_nonce(&account); + let (nonce, nonce_proof) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| (0, Some("".into()))) + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| (0, None)) + }; - let unlocked = balance - .get_available_balance_at_burn_block( + let unlocked = balance + .get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + v3_unlock_height, + ) + .ok()?; + + let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height, - ) - .ok()?; - - let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( - burn_block_height, - v1_unlock_height, - v2_unlock_height, - v3_unlock_height, - ); + ); - let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); - let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); + let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); + let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); - Some(AccountEntryResponse { - balance, - locked, - unlock_height, - nonce, - balance_proof, - nonce_proof, + Some(AccountEntryResponse { + balance, + locked, + unlock_height, + nonce, + balance_proof, + nonce_proof, + }) }) - }) - }) + }, + ) }); let account = if let Ok(Some(account)) = account_opt_res { diff --git a/stackslib/src/net/api/getconstantval.rs b/stackslib/src/net/api/getconstantval.rs index f9b2881ac5..4b3068dd5d 100644 --- a/stackslib/src/net/api/getconstantval.rs +++ b/stackslib/src/net/api/getconstantval.rs @@ -144,20 +144,24 @@ impl RPCRequestHandler for RPCGetConstantValRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let contract = clarity_db.get_contract(&contract_identifier).ok()?; - - let cst = contract - .contract_context - .lookup_variable(constant_name.as_str())? - .serialize_to_hex() - .ok()?; - - let data = format!("0x{cst}"); - Some(ConstantValResponse { data }) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let contract = clarity_db.get_contract(&contract_identifier).ok()?; + + let cst = contract + .contract_context + .lookup_variable(constant_name.as_str())? + .serialize_to_hex() + .ok()?; + + let data = format!("0x{cst}"); + Some(ConstantValResponse { data }) + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getcontractabi.rs b/stackslib/src/net/api/getcontractabi.rs index 7fc38433e7..35914de9e9 100644 --- a/stackslib/src/net/api/getcontractabi.rs +++ b/stackslib/src/net/api/getcontractabi.rs @@ -132,14 +132,18 @@ impl RPCRequestHandler for RPCGetContractAbiRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - let epoch = clarity_tx.get_epoch(); - clarity_tx.with_analysis_db_readonly(|db| { - db.load_contract(&contract_identifier, &epoch) - .ok()? - .map(|contract| contract.contract_interface) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + let epoch = clarity_tx.get_epoch(); + clarity_tx.with_analysis_db_readonly(|db| { + db.load_contract(&contract_identifier, &epoch) + .ok()? + .map(|contract| contract.contract_interface) + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs index 32963f5319..1c20bffd1b 100644 --- a/stackslib/src/net/api/getcontractsrc.rs +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -140,30 +140,34 @@ impl RPCRequestHandler for RPCGetContractSrcRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|db| { - let source = db.get_contract_src(&contract_identifier)?; - let contract_commit_key = make_contract_hash_key(&contract_identifier); - let (contract_commit, proof) = if with_proof { - db.get_data_with_proof::(&contract_commit_key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? - } else { - db.get_data::(&contract_commit_key) - .ok() - .flatten() - .map(|a| (a, None))? - }; - - let publish_height = contract_commit.block_height; - Some(ContractSrcResponse { - source, - publish_height, - marf_proof: proof, + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let source = db.get_contract_src(&contract_identifier)?; + let contract_commit_key = make_contract_hash_key(&contract_identifier); + let (contract_commit, proof) = if with_proof { + db.get_data_with_proof::(&contract_commit_key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + db.get_data::(&contract_commit_key) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let publish_height = contract_commit.block_height; + Some(ContractSrcResponse { + source, + publish_height, + marf_proof: proof, + }) }) - }) - }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs index f624f3ca58..124fb4856f 100644 --- a/stackslib/src/net/api/getdatavar.rs +++ b/stackslib/src/net/api/getdatavar.rs @@ -154,26 +154,30 @@ impl RPCRequestHandler for RPCGetDataVarRequestHandler { ); let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None))? - }; - - let data = format!("0x{}", value_hex); - Some(DataVarResponse { data, marf_proof }) - }) - }) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None))? + }; + + let data = format!("0x{}", value_hex); + Some(DataVarResponse { data, marf_proof }) + }) + }, + ) }); let data_resp = match data_opt { diff --git a/stackslib/src/net/api/getistraitimplemented.rs b/stackslib/src/net/api/getistraitimplemented.rs index 16b1e2fd33..aac4079074 100644 --- a/stackslib/src/net/api/getistraitimplemented.rs +++ b/stackslib/src/net/api/getistraitimplemented.rs @@ -160,34 +160,38 @@ impl RPCRequestHandler for RPCGetIsTraitImplementedRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|db| { - let analysis = db - .load_contract_analysis(&contract_identifier) - .ok() - .flatten()?; - if analysis.implemented_traits.contains(&trait_id) { - Some(GetIsTraitImplementedResponse { - is_implemented: true, - }) - } else { - let trait_defining_contract = db - .load_contract_analysis(&trait_id.contract_identifier) + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let analysis = db + .load_contract_analysis(&contract_identifier) .ok() .flatten()?; - let trait_definition = - trait_defining_contract.get_defined_trait(&trait_id.name)?; - let is_implemented = analysis - .check_trait_compliance( - &db.get_clarity_epoch_version().ok()?, - &trait_id, - trait_definition, - ) - .is_ok(); - Some(GetIsTraitImplementedResponse { is_implemented }) - } - }) - }) + if analysis.implemented_traits.contains(&trait_id) { + Some(GetIsTraitImplementedResponse { + is_implemented: true, + }) + } else { + let trait_defining_contract = db + .load_contract_analysis(&trait_id.contract_identifier) + .ok() + .flatten()?; + let trait_definition = + trait_defining_contract.get_defined_trait(&trait_id.name)?; + let is_implemented = analysis + .check_trait_compliance( + &db.get_clarity_epoch_version().ok()?, + &trait_id, + trait_definition, + ) + .is_ok(); + Some(GetIsTraitImplementedResponse { is_implemented }) + } + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs index b5db5af041..5d0cd7504f 100644 --- a/stackslib/src/net/api/getmapentry.rs +++ b/stackslib/src/net/api/getmapentry.rs @@ -183,34 +183,38 @@ impl RPCRequestHandler for RPCGetMapEntryRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { - chainstate.maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - let (value_hex, marf_proof): (String, _) = if with_proof { - clarity_db - .get_data_with_proof(&key) - .ok() - .flatten() - .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (none_response, Some("".into())) - }) - } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None)) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (none_response, None) - }) - }; + chainstate.maybe_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + let (value_hex, marf_proof): (String, _) = if with_proof { + clarity_db + .get_data_with_proof(&key) + .ok() + .flatten() + .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (none_response, Some("".into())) + }) + } else { + clarity_db + .get_data(&key) + .ok() + .flatten() + .map(|a| (a, None)) + .unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (none_response, None) + }) + }; - let data = format!("0x{}", value_hex); - MapEntryResponse { data, marf_proof } - }) - }) + let data = format!("0x{}", value_hex); + MapEntryResponse { data, marf_proof } + }) + }, + ) }); let data_resp = match data_resp { diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs index 9e3cd906d4..c3de3ab0da 100644 --- a/stackslib/src/net/api/getpoxinfo.rs +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -190,7 +190,7 @@ impl RPCPoxInfoData { + 1; let data = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { + .maybe_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |clarity_tx| { clarity_tx.with_readonly_clarity_env( mainnet, chain_id, diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 2669c64356..0423d5c57b 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -36,7 +36,7 @@ use stacks_common::util::retry::BoundReader; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; @@ -206,7 +206,7 @@ impl NakamotoBlockProposal { }); } - let burn_dbconn = sortdb.index_conn(); + let burn_dbconn: SortitionHandleConn = sortdb.index_handle_at_tip(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let mut db_handle = sortdb.index_handle(&sort_tip); let expected_burn_opt = diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ce67147a9e..5f9dfef7f8 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -454,7 +454,7 @@ impl<'a> TestRPC<'a> { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![tx_coinbase_signed.clone(), tx_contract_signed.clone()], ) .unwrap(); @@ -477,7 +477,7 @@ impl<'a> TestRPC<'a> { let sortdb = peer_1.sortdb.take().unwrap(); Relayer::setup_unconfirmed_state(peer_1.chainstate(), &sortdb).unwrap(); let mblock = { - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut microblock_builder = StacksMicroblockBuilder::new( stacks_block.block_hash(), consensus_hash.clone(), @@ -529,11 +529,11 @@ impl<'a> TestRPC<'a> { let sortdb2 = peer_2.sortdb.take().unwrap(); peer_1 .chainstate() - .reload_unconfirmed_state(&sortdb1.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb1.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); peer_2 .chainstate() - .reload_unconfirmed_state(&sortdb2.index_conn(), canonical_tip.clone()) + .reload_unconfirmed_state(&sortdb2.index_handle_at_tip(), canonical_tip.clone()) .unwrap(); peer_1.sortdb = Some(sortdb1); peer_2.sortdb = Some(sortdb2); @@ -732,7 +732,7 @@ impl<'a> TestRPC<'a> { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![tx_coinbase_signed.clone()], ) .unwrap(); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c5..6ab6b7a302 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3531,7 +3531,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), block_txs, ) .unwrap(); @@ -3742,7 +3742,7 @@ pub mod test { |mut builder, ref mut miner, ref sortdb| { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path, None).unwrap(); - let sort_iconn = sortdb.index_conn(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f853bb795a..c1318e6647 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5892,7 +5892,7 @@ impl PeerNetwork { return false; } let stacks_epoch = match sortdb - .index_conn() + .index_handle_at_tip() .get_stacks_epoch(burnchain_tip.block_height as u32) { Some(epoch) => epoch, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7236ef76e4..6f7ad30057 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1840,7 +1840,7 @@ impl Relayer { &canonical_consensus_hash, &canonical_block_hash ); let processed_unconfirmed_state = - chainstate.reload_unconfirmed_state(&sortdb.index_conn(), canonical_tip)?; + chainstate.reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip)?; Ok(processed_unconfirmed_state) } @@ -4043,15 +4043,19 @@ pub mod test { StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); let cur_nonce = stacks_node .chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_nonce( - &spending_account.origin_address().unwrap().into(), - ) - .unwrap() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &chain_tip, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce( + &spending_account.origin_address().unwrap().into(), + ) + .unwrap() + }) + }, + ) .unwrap(); test_debug!( @@ -5415,7 +5419,7 @@ pub mod test { let block = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone()], ) .unwrap() @@ -5482,7 +5486,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone(), bad_tx.clone()], ) { @@ -5504,7 +5508,7 @@ pub mod test { let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx.clone()], ) .unwrap(); @@ -5521,7 +5525,7 @@ pub mod test { let merkle_tree = MerkleTree::::new(&txid_vecs); bad_block.header.tx_merkle_root = merkle_tree.root(); - let sort_ic = sortdb.index_conn(); + let sort_ic = sortdb.index_handle_at_tip(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) .unwrap(); @@ -5806,7 +5810,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -5985,7 +5989,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx, versioned_contract], ) .unwrap(); @@ -6172,7 +6176,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx], ) .unwrap(); @@ -6212,7 +6216,7 @@ pub mod test { let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, @@ -6262,7 +6266,7 @@ pub mod test { let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index f2d8521ae4..de642b98bb 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -493,7 +493,7 @@ impl StackerDBConfig { let cur_epoch = SortitionDB::get_stacks_epoch(sortition_db.conn(), burn_tip.block_height)? .expect("FATAL: no epoch defined"); - let dbconn = sortition_db.index_conn(); + let dbconn = sortition_db.index_handle_at_tip(); // check the target contract let res = chainstate.with_read_only_clarity_tx(&dbconn, &chain_tip_hash, |clarity_tx| { diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 5e9ea0daf2..bc242dc246 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -624,7 +624,7 @@ fn make_contract_call_transaction( let chain_tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); let cur_nonce = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { + .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &chain_tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_nonce(&spending_account.origin_address().unwrap().into()) @@ -807,7 +807,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { StacksBlockBuilder::make_anchored_block_and_microblock_from_txs( builder, chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), vec![coinbase_tx, stack_tx], vec![mblock_tx], ) @@ -1424,7 +1424,7 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc let (anchored_block, block_size, block_execution_cost) = StacksBlockBuilder::build_anchored_block( chainstate, - &sortdb.index_conn(), + &sortdb.index_handle_at_tip(), &mut mempool, &parent_tip, parent_tip diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d6edd79963..a7e78bc37d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -410,21 +410,25 @@ impl BlockMinerThread { // Get all nonces for the signers from clarity DB to use to validate transactions let account_nonces = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &stacks_block_id, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - addresses - .iter() - .map(|address| { - ( - address.clone(), - clarity_db - .get_account_nonce(&address.clone().into()) - .unwrap_or(0), - ) - }) - .collect::>() - }) - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_tip(), + &stacks_block_id, + |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + addresses + .iter() + .map(|address| { + ( + address.clone(), + clarity_db + .get_account_nonce(&address.clone().into()) + .unwrap_or(0), + ) + }) + .collect::>() + }) + }, + ) .unwrap_or_default(); let mut filtered_transactions: HashMap = HashMap::new(); for (_slot, signer_message) in signer_messages { @@ -757,7 +761,7 @@ impl BlockMinerThread { // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &mut mem_pool, &parent_block_info.stacks_parent_header, &self.burn_block.consensus_hash, @@ -933,7 +937,7 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c4eff65ff0..ac379b86e7 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -753,7 +753,7 @@ impl MicroblockMinerThread { .epoch_id; let mint_result = { - let ic = sortdb.index_conn(); + let ic = sortdb.index_handle_at_tip(); let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( chainstate, &ic, @@ -2352,7 +2352,7 @@ impl BlockMinerThread { } let (anchored_block, _, _) = match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -2382,7 +2382,7 @@ impl BlockMinerThread { // try again match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -4047,7 +4047,7 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_conn(), + &burn_db.index_handle_at_tip(), &StacksBlockHeader::make_index_block_hash(mine_tip_ch, mine_tip_bh), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 77117a6822..ba5b7e204e 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -891,7 +891,7 @@ impl Node { let mut fee_estimator = self.config.make_fee_estimator(); let stacks_epoch = db - .index_conn() + .index_handle_at_tip() .get_stacks_epoch_by_epoch_id(&processed_block.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Some(estimator) = cost_estimator.as_mut() { diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index c7212d4132..4c81867369 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -89,11 +89,11 @@ impl RunLoop { let _ = burnchain.sortdb_mut(); // Run the tenure, keep the artifacts - let artifacts_from_1st_tenure = match first_tenure.run(&burnchain.sortdb_ref().index_conn()) - { - Some(res) => res, - None => panic!("Error while running 1st tenure"), - }; + let artifacts_from_1st_tenure = + match first_tenure.run(&burnchain.sortdb_ref().index_handle_at_tip()) { + Some(res) => res, + None => panic!("Error while running 1st tenure"), + }; // Tenures are instantiating their own chainstate, so that nodes can keep a clean chainstate, // while having the option of running multiple tenures concurrently and try different strategies. @@ -136,7 +136,7 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_conn(), + &burnchain.sortdb_ref().index_handle_at_tip(), ); // If the node we're looping on won the sortition, initialize and configure the next tenure @@ -160,7 +160,7 @@ impl RunLoop { &chain_tip, &mut tenure, ); - tenure.run(&burnchain.sortdb_ref().index_conn()) + tenure.run(&burnchain.sortdb_ref().index_handle_at_tip()) } None => None, }; @@ -214,7 +214,7 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_conn(), + &burnchain.sortdb_ref().index_handle_at_tip(), ); } }; diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index fd7683f569..5dd67cddab 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -5,7 +5,7 @@ use std::time::{Duration, Instant}; use stacks::burnchains::PoxConstants; #[cfg(test)] use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::db::sortdb::SortitionDBConn; +use stacks::chainstate::burn::db::sortdb::SortitionHandleConn; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::BlockBuilderSettings; use stacks::chainstate::stacks::{ @@ -72,7 +72,7 @@ impl<'a> Tenure { } } - pub fn run(&mut self, burn_dbconn: &SortitionDBConn) -> Option { + pub fn run(&mut self, burn_dbconn: &SortitionHandleConn) -> Option { info!("Node starting new tenure with VRF {:?}", self.vrf_seed); let duration_left: u128 = self.config.burnchain.commit_anchor_block_within as u128; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 14db80f0b1..947eb633ee 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -5143,7 +5143,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -5423,7 +5423,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 4e387d6304..f3c48adc86 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -400,7 +400,7 @@ fn disable_pox() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1069,7 +1069,7 @@ fn pox_2_unlock_all() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 2cc9868dc6..2394e93621 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -493,7 +493,7 @@ fn fix_to_pox_contract() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1213,7 +1213,7 @@ fn verify_auto_unlock_behavior() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_conn(); + let iconn = sortdb.index_handle_at_tip(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7804781c11..c8fc6c80f3 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1915,7 +1915,7 @@ fn block_proposal_api_endpoint() { ) .expect("Failed to build Nakamoto block"); - let burn_dbconn = btc_regtest_controller.sortdb_ref().index_conn(); + let burn_dbconn = btc_regtest_controller.sortdb_ref().index_handle_at_tip(); let mut miner_tenure_info = builder .load_tenure_info(&mut chainstate, &burn_dbconn, tenure_cause) .unwrap(); @@ -3582,8 +3582,8 @@ fn check_block_heights() { "get-heights", vec![], ); - let heights0 = heights0_value.expect_tuple().unwrap(); - info!("Heights from pre-epoch 3.0: {}", heights0); + let preheights = heights0_value.expect_tuple().unwrap(); + info!("Heights from pre-epoch 3.0: {}", preheights); // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { @@ -3601,7 +3601,7 @@ fn check_block_heights() { let info = get_chain_info_result(&naka_conf).unwrap(); println!("Chain info: {:?}", info); - let mut last_burn_block_height = info.burn_block_height as u128; + let mut last_burn_block_height; let mut last_stacks_block_height = info.stacks_tip_height as u128; let mut last_tenure_height = last_stacks_block_height as u128; @@ -3615,14 +3615,8 @@ fn check_block_heights() { let heights0 = heights0_value.expect_tuple().unwrap(); info!("Heights from epoch 3.0 start: {}", heights0); assert_eq!( - heights0 - .get("burn-block-height") - .unwrap() - .clone() - .expect_u128() - .unwrap() - + 3, - last_burn_block_height, + heights0.get("burn-block-height"), + preheights.get("burn-block-height"), "Burn block height should match" ); assert_eq!( diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 5c683f66fe..aecc0f9c12 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3463,12 +3463,15 @@ fn microblock_fork_poison_integration_test() { .unwrap(); chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + tip_hash, + ) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -3718,12 +3721,15 @@ fn microblock_integration_test() { .unwrap(); chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + tip_hash, + ) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -9134,7 +9140,10 @@ fn use_latest_tip_integration_test() { // Initialize the unconfirmed state. chainstate - .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .reload_unconfirmed_state( + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + tip_hash, + ) .unwrap(); // Make microblock with two transactions. @@ -9157,7 +9166,7 @@ fn use_latest_tip_integration_test() { let mblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_conn(), + &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), consensus_hash, stacks_block.clone(), vec_tx, From efdd01cdb96c8cf9fcd47c6fc221069da6b45fdd Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 21 May 2024 17:15:47 +0300 Subject: [PATCH 0051/1400] Add untilBurnHt check inside `RevokeDelegateStxCommand` --- .../tests/pox-4/pox_RevokeDelegateStxCommand.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index 1c30e3d569..17b0e3e3c1 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -15,6 +15,7 @@ import { Cl, someCV, tupleCV } from "@stacks/transactions"; * * Constraints for running this command include: * - The `Stacker` has to currently be delegating. + * - The `Stacker`'s delegation must not be expired */ export class RevokeDelegateStxCommand implements PoxCommand { readonly wallet: Wallet; @@ -31,10 +32,12 @@ export class RevokeDelegateStxCommand implements PoxCommand { check(model: Readonly): boolean { // Constraints for running this command include: // - The Stacker has to currently be delegating. - + // - The Stacker's delegation must not be expired + const stacker = model.stackers.get(this.wallet.stxAddress)!; return ( model.stackingMinimum > 0 && - model.stackers.get(this.wallet.stxAddress)!.hasDelegated === true + stacker.hasDelegated === true && + stacker.delegatedUntilBurnHt > model.burnBlockHeight ); } From 194ad768d27be17bdd066677131fab528bc016cc Mon Sep 17 00:00:00 2001 From: BowTiedRadone <92028479+BowTiedRadone@users.noreply.github.com> Date: Tue, 21 May 2024 18:14:54 +0300 Subject: [PATCH 0052/1400] Update docs according to suggestion Co-authored-by: Nikos Baxevanis --- .../tests/pox-4/pox_RevokeDelegateStxCommand.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index 17b0e3e3c1..ca70eb7f0f 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -15,7 +15,7 @@ import { Cl, someCV, tupleCV } from "@stacks/transactions"; * * Constraints for running this command include: * - The `Stacker` has to currently be delegating. - * - The `Stacker`'s delegation must not be expired + * - The `Stacker`'s delegation must not be expired. */ export class RevokeDelegateStxCommand implements PoxCommand { readonly wallet: Wallet; From ca5a85301622f8cab833a4cce4a42e8625c284ee Mon Sep 17 00:00:00 2001 From: BowTiedRadone <92028479+BowTiedRadone@users.noreply.github.com> Date: Tue, 21 May 2024 18:15:09 +0300 Subject: [PATCH 0053/1400] Update comment according to suggestion Co-authored-by: Nikos Baxevanis --- .../tests/pox-4/pox_RevokeDelegateStxCommand.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index ca70eb7f0f..54e4806757 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -32,7 +32,7 @@ export class RevokeDelegateStxCommand implements PoxCommand { check(model: Readonly): boolean { // Constraints for running this command include: // - The Stacker has to currently be delegating. - // - The Stacker's delegation must not be expired + // - The Stacker's delegation must not be expired. const stacker = model.stackers.get(this.wallet.stxAddress)!; return ( model.stackingMinimum > 0 && From 66e3cdfb0a80ade46b55bcb849f3bffd84ceb117 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 21 May 2024 20:22:33 +0300 Subject: [PATCH 0054/1400] display only on debug failed connection with private ips --- stackslib/src/net/neighbors/walk.rs | 21 ++++++--------------- stackslib/src/net/stackerdb/sync.rs | 6 +----- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index 71fdd8c1de..478f5c0e3d 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -1012,21 +1012,12 @@ impl NeighborWalk { continue; } Err(e) => { - if na.addrbytes.is_in_private_range() { - debug!( - "{:?}: Failed to connect to {:?}: {:?}", - network.get_local_peer(), - &nk, - &e - ); - } else { - info!( - "{:?}: Failed to connect to {:?}: {:?}", - network.get_local_peer(), - &nk, - &e - ); - } + debug!( + "{:?}: Failed to connect to {:?}: {:?}", + network.get_local_peer(), + &nk, + &e + ); continue; } } diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 001479163f..02390211bc 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -673,11 +673,7 @@ impl StackerDBSync { self.num_attempted_connections += 1; } Err(_e) => { - if naddr.addrbytes.is_in_private_range() { - debug!("Failed to begin session with {:?}: {:?}", &naddr, &_e); - } else { - info!("Failed to begin session with {:?}: {:?}", &naddr, &_e); - } + debug!("Failed to begin session with {:?}: {:?}", &naddr, &_e); } } } From f72ecc8c30ffafb457d41fb1ffac0399253ae4a7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 21 May 2024 11:06:58 -0700 Subject: [PATCH 0055/1400] feat: update metrics in v0 signer, add tests --- stacks-signer/src/monitoring/server.rs | 7 ++++++- stacks-signer/src/v0/signer.rs | 13 ++++++++++--- testnet/stacks-node/Cargo.toml | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 17 +++++++++++++++++ 4 files changed, 34 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs index 9cecd41ed7..ffde008c9f 100644 --- a/stacks-signer/src/monitoring/server.rs +++ b/stacks-signer/src/monitoring/server.rs @@ -95,7 +95,12 @@ impl MonitoringServer { public_key, format!("http://{}", config.node_host), ); - server.update_metrics()?; + if let Err(e) = server.update_metrics() { + warn!( + "Monitoring: Error updating metrics when starting server: {:?}", + e + ); + }; server.main_loop() } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 682c1433c1..e5471053ae 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -239,6 +239,7 @@ impl Signer { "block_id" => %block_proposal.block.block_id(), ); let block_info = BlockInfo::from(block_proposal.clone()); + crate::monitoring::increment_block_proposals_received(); stacks_client .submit_block_for_validation(block_info.block.clone()) .unwrap_or_else(|e| { @@ -311,11 +312,17 @@ impl Signer { }; // Submit a proposal response to the .signers contract for miners debug!("{self}: Broadcasting a block response to stacks node: {response:?}"); - if let Err(e) = self + match self .stackerdb - .send_message_with_retry::(response.into()) + .send_message_with_retry::(response.clone().into()) { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + Ok(_) => { + let accepted = matches!(response, BlockResponse::Accepted(..)); + crate::monitoring::increment_block_responses_sent(accepted); + } + Err(e) => { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + } } self.signer_db .insert_block(&block_info) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index bceb484cd7..42f4f858b7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -62,7 +62,7 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 55115c5f18..85b971a426 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -295,4 +295,21 @@ fn miner_gather_signatures() { valid }); assert!(all_signed); + + // Test prometheus metrics response + #[cfg(feature = "monitoring_prom")] + { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `5`, even though there is only one block proposed. + let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); + assert!(metrics_response.contains(&expected_result)); + let expected_result = format!( + "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", + num_signers + ); + assert!(metrics_response.contains(&expected_result)); + } } From 690b9bcbbfa573d50702bb549176427d4c8255c4 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 21 May 2024 21:27:54 +0300 Subject: [PATCH 0056/1400] fix cause typo, add checker strict and drop table --- stackslib/src/chainstate/nakamoto/tenure.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index c9e5c0cf59..078e2514bf 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -119,6 +119,9 @@ use crate::util_lib::db::{ }; pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" + -- Drop the existing table if it exists + DROP TABLE IF EXISTS nakamoto_tenures; + CREATE TABLE nakamoto_tenures ( -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit -- was mined) @@ -129,7 +132,7 @@ pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" burn_view_consensus_hash TEXT NOT NULL, -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). -- this is equal to the `cause` field in a TenureChange - cause INETGER NOT NULL, + cause INTEGER NOT NULL, -- block hash of start-tenure block block_hash TEXT NOT NULL, -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) @@ -144,9 +147,10 @@ pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" num_blocks_confirmed INTEGER NOT NULL, -- this is the ith tenure transaction in its respective Nakamoto chain history. tenure_index INTEGER NOT NULL, - + -- schema version field + schema_version INTEGER NOT NULL DEFAULT 1, PRIMARY KEY(burn_view_consensus_hash,tenure_index) - ); + ) STRICT; CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); From aa9baf977c5edfa6b28e47d7f9896e1f3a84c2ef Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 21 May 2024 11:57:18 -0700 Subject: [PATCH 0057/1400] fix: move reward_set loading to top-level BlockMinerThread --- .../stacks-node/src/nakamoto_node/miner.rs | 57 +++++++++---------- 1 file changed, 27 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 29631cdec0..09a70b1178 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -155,6 +155,27 @@ impl BlockMinerThread { let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + + let tip = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &self.burn_block.consensus_hash, + ) + .expect("FATAL: could not retrieve chain tip") + .expect("FATAL: could not retrieve chain tip"); + + let reward_set = sort_db + .get_preprocessed_reward_set_of(&tip.sortition_id) + .expect("FATAL: Error fetching reward set") + .expect("FATAL: No reward set found for miner") + .known_selected_anchor_block_owned() + .expect("FATAL: No reward set found for miner"); + let mut attempts = 0; // now, actually run this tenure loop { @@ -182,11 +203,12 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - let (reward_set, signer_signature) = match self.gather_signatures( + let signer_signature = match self.gather_signatures( &mut new_block, self.burn_block.block_height, &mut stackerdbs, &mut attempts, + &reward_set, ) { Ok(x) => x, Err(e) => { @@ -198,7 +220,7 @@ impl BlockMinerThread { }; new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), None, reward_set) { + if let Err(e) = self.broadcast(new_block.clone(), None, reward_set.clone()) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -221,12 +243,6 @@ impl BlockMinerThread { self.mined_blocks.push(new_block); } - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); let wait_start = Instant::now(); while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); @@ -342,7 +358,8 @@ impl BlockMinerThread { burn_block_height: u64, stackerdbs: &mut StackerDBs, attempts: &mut u64, - ) -> Result<(RewardSet, Vec), NakamotoNodeError> { + reward_set: &RewardSet, + ) -> Result, NakamotoNodeError> { let Some(miner_privkey) = self.config.miner.mining_key else { return Err(NakamotoNodeError::MinerConfigurationFailed( "No mining key configured, cannot mine", @@ -370,26 +387,6 @@ impl BlockMinerThread { ) .expect("FATAL: building on a burn block that is before the first burn block"); - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(Some(x)) => x, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set found. Cannot initialize miner coordinator.".into(), - )); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" - ))); - } - }; - - let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Current reward cycle did not select a reward set. Cannot mine!".into(), - )); - }; - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new( &reward_set, @@ -417,7 +414,7 @@ impl BlockMinerThread { &self.globals.counters, )?; - return Ok((reward_set, signature)); + return Ok(signature); } fn get_stackerdb_contract_and_slots( From 050418cfa3018c3d842bbec2a99899c4905dc7e3 Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Wed, 22 May 2024 00:08:29 +0200 Subject: [PATCH 0058/1400] feat(pox-4-tests): add check function delegate to PoxCommand-derived types - Added import for StackStxSigCommand_Err and StackStxAuthCommand_Err - Added StackStxAuthCommand_Err with a custom check function delegate to PoxCommands - Added StackStxSigCommand_Err with a custom check function delegate to PoxCommands This allows the check function to be parameterized, reducing the need for copy-pasting classes. Note: This is a very work in progress. --- .../tests/pox-4/pox_Commands.ts | 62 +++++++ .../pox-4/pox_StackStxAuthCommand_Err.ts | 172 ++++++++++++++++++ .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 170 +++++++++++++++++ 3 files changed, 404 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts index ba7043d5ec..44ff2c8ea0 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -3,7 +3,9 @@ import { Real, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; import { GetStackingMinimumCommand } from "./pox_GetStackingMinimumCommand"; import { GetStxAccountCommand } from "./pox_GetStxAccountCommand"; import { StackStxSigCommand } from "./pox_StackStxSigCommand"; +import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand } from "./pox_StackStxAuthCommand"; +import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { DelegateStxCommand } from "./pox_DelegateStxCommand"; import { DelegateStackStxCommand } from "./pox_DelegateStackStxCommand"; import { Simnet } from "@hirosystems/clarinet-sdk"; @@ -83,6 +85,36 @@ export function PoxCommands( r.margin, ) ), + // StackStxAuthCommand_Err + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this: StackStxAuthCommand_Err, model: Readonly): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + console.log("I in StackStxAuthCommand_Err stacker", stacker); + return ( + model.stackingMinimum > 0 && !stacker.isStacking && + !stacker.hasDelegated + ); + }, + 123, + ) + ), // StackExtendAuthCommand fc .record({ @@ -105,6 +137,36 @@ export function PoxCommands( r.currentCycle, ), ), + // StackStxSigCommand_Err + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this: StackStxSigCommand_Err, model: Readonly): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + console.log("I in StackStxSigCommand_Err stacker", stacker); + return ( + model.stackingMinimum > 0 && !stacker.isStacking && + !stacker.hasDelegated + ); + }, + 123, + ) + ), // StackExtendSigCommand fc .record({ diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts new file mode 100644 index 0000000000..ad310fef9a --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -0,0 +1,172 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackStxAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackStxAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackStxAuthCommand_Err` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + * @param checkFunc - A function to check constraints for running this command. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const { result: setAuthorization } = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(setAuthorization).toBeOk(Cl.bool(true)); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Act + const stackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(stackStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-stx-auth", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx auth auth-id ${this.authId} and period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts new file mode 100644 index 0000000000..4c5f7ce149 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -0,0 +1,170 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackStxSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackStxSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackStxSigCommand` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + * @param checkFunc - A function to check constraints for running this command. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackStx, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Act + const stackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(stackStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-stx-sig", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx sig auth-id ${this.authId} and period ${this.period}`; + } +} From 6f02aeb64414e057d93a44e00ea6795aaf7940d3 Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Wed, 22 May 2024 08:27:17 +0200 Subject: [PATCH 0059/1400] feat(pox-4-tests): add check function delegate to PoxCommand-derived types - Separate success paths from failure paths to keep pox_Commands.ts focused on success cases only. This prevents the file from growing with out-of-scope cases. Note: This is a work in progress. --- .../tests/pox-4/err_Commands.ts | 74 +++++++++++++++++++ .../tests/pox-4/pox-4.stateful-prop.test.ts | 8 +- .../tests/pox-4/pox_Commands.ts | 70 +----------------- .../pox-4/pox_StackStxAuthCommand_Err.ts | 2 +- .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 2 +- 5 files changed, 86 insertions(+), 70 deletions(-) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts new file mode 100644 index 0000000000..2e4259f740 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -0,0 +1,74 @@ +import fc from "fast-check"; +import { PoxCommand, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; +import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; +import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; +import { Simnet } from "@hirosystems/clarinet-sdk"; + +export function ErrCommands( + wallets: Map, + stackers: Map, + network: Simnet, +): fc.Arbitrary[] { + const cmds = [ + // StackStxAuthCommand_Err + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this: StackStxAuthCommand_Err, model: Readonly): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( + model.stackingMinimum > 0 && !stacker.isStacking && + !stacker.hasDelegated + ); + }, + 123, + ) + ), + // StackStxSigCommand_Err + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this: StackStxSigCommand_Err, model: Readonly): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( + model.stackingMinimum > 0 && !stacker.isStacking && + !stacker.hasDelegated + ); + }, + 123, + ) + ), + ]; + + return cmds; +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 15f4d4ddc0..29c57187b3 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -16,6 +16,7 @@ import { StackingClient } from "@stacks/stacking"; import fc from "fast-check"; import { PoxCommands } from "./pox_Commands.ts"; +import { ErrCommands } from "./err_Commands.ts"; import fs from "fs"; import path from "path"; @@ -139,9 +140,14 @@ it("statefully interacts with PoX-4", async () => { simnet.setEpoch("3.0"); + const successPath = PoxCommands(model.wallets, model.stackers, sut.network); + const failurePath = ErrCommands(model.wallets, model.stackers, sut.network); + fc.assert( fc.property( - PoxCommands(model.wallets, model.stackers, sut.network), + // More on size: https://github.com/dubzzz/fast-check/discussions/2978 + // More on cmds: https://github.com/dubzzz/fast-check/discussions/3026 + fc.commands(successPath.concat(failurePath), { size: "xsmall" }), (cmds) => { const initialState = () => ({ model: model, real: sut }); fc.modelRun(initialState, cmds); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts index 44ff2c8ea0..d44ef23b22 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -1,11 +1,9 @@ import fc from "fast-check"; -import { Real, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; +import { PoxCommand, Stacker, StxAddress, Wallet } from "./pox_CommandModel"; import { GetStackingMinimumCommand } from "./pox_GetStackingMinimumCommand"; import { GetStxAccountCommand } from "./pox_GetStxAccountCommand"; import { StackStxSigCommand } from "./pox_StackStxSigCommand"; -import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand } from "./pox_StackStxAuthCommand"; -import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { DelegateStxCommand } from "./pox_DelegateStxCommand"; import { DelegateStackStxCommand } from "./pox_DelegateStackStxCommand"; import { Simnet } from "@hirosystems/clarinet-sdk"; @@ -29,7 +27,7 @@ export function PoxCommands( wallets: Map, stackers: Map, network: Simnet, -): fc.Arbitrary>> { +): fc.Arbitrary[] { const cmds = [ // GetStackingMinimumCommand fc.record({ @@ -85,36 +83,6 @@ export function PoxCommands( r.margin, ) ), - // StackStxAuthCommand_Err - fc.record({ - wallet: fc.constantFrom(...wallets.values()), - authId: fc.nat(), - period: fc.integer({ min: 1, max: 12 }), - margin: fc.integer({ min: 1, max: 9 }), - }).map(( - r: { - wallet: Wallet; - authId: number; - period: number; - margin: number; - }, - ) => - new StackStxAuthCommand_Err( - r.wallet, - r.authId, - r.period, - r.margin, - function (this: StackStxAuthCommand_Err, model: Readonly): boolean { - const stacker = model.stackers.get(this.wallet.stxAddress)!; - console.log("I in StackStxAuthCommand_Err stacker", stacker); - return ( - model.stackingMinimum > 0 && !stacker.isStacking && - !stacker.hasDelegated - ); - }, - 123, - ) - ), // StackExtendAuthCommand fc .record({ @@ -137,36 +105,6 @@ export function PoxCommands( r.currentCycle, ), ), - // StackStxSigCommand_Err - fc.record({ - wallet: fc.constantFrom(...wallets.values()), - authId: fc.nat(), - period: fc.integer({ min: 1, max: 12 }), - margin: fc.integer({ min: 1, max: 9 }), - }).map(( - r: { - wallet: Wallet; - authId: number; - period: number; - margin: number; - }, - ) => - new StackStxSigCommand_Err( - r.wallet, - r.authId, - r.period, - r.margin, - function (this: StackStxSigCommand_Err, model: Readonly): boolean { - const stacker = model.stackers.get(this.wallet.stxAddress)!; - console.log("I in StackStxSigCommand_Err stacker", stacker); - return ( - model.stackingMinimum > 0 && !stacker.isStacking && - !stacker.hasDelegated - ); - }, - 123, - ) - ), // StackExtendSigCommand fc .record({ @@ -511,9 +449,7 @@ export function PoxCommands( ), ]; - // More on size: https://github.com/dubzzz/fast-check/discussions/2978 - // More on cmds: https://github.com/dubzzz/fast-check/discussions/3026 - return fc.commands(cmds, { size: "xsmall" }); + return cmds; } export const REWARD_CYCLE_LENGTH = 1050; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts index ad310fef9a..e1d0a2e113 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -26,7 +26,6 @@ export class StackStxAuthCommand_Err implements PoxCommand { readonly authId: number; readonly period: number; readonly margin: number; - readonly checkFunc: CheckFunc; readonly errorCode: number; @@ -39,6 +38,7 @@ export class StackStxAuthCommand_Err implements PoxCommand { * @param margin - Multiplier for minimum required uSTX to stack so that each * Stacker locks a different amount of uSTX across test runs. * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. */ constructor( wallet: Wallet, diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts index 4c5f7ce149..db6af5c5ba 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -26,7 +26,6 @@ export class StackStxSigCommand_Err implements PoxCommand { readonly authId: number; readonly period: number; readonly margin: number; - readonly checkFunc: CheckFunc; readonly errorCode: number; @@ -39,6 +38,7 @@ export class StackStxSigCommand_Err implements PoxCommand { * @param margin - Multiplier for minimum required uSTX to stack so that each * Stacker locks a different amount of uSTX across test runs. * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. */ constructor( wallet: Wallet, From b3e09c4435143898137cff8109e6795134aa2bfd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 22 May 2024 09:01:22 -0400 Subject: [PATCH 0060/1400] CRC: general improvements Signed-off-by: Jacinta Ferrant --- libsigner/src/libsigner.rs | 2 +- libsigner/src/v0/messages.rs | 4 ++-- libsigner/src/v1/messages.rs | 4 ++-- stacks-signer/src/client/stackerdb.rs | 6 +++--- stacks-signer/src/config.rs | 10 ++++++++-- stacks-signer/src/lib.rs | 2 +- stacks-signer/src/v0/signer.rs | 19 ++++++++++--------- stacks-signer/src/v1/signer.rs | 19 ++++++++++--------- 8 files changed, 37 insertions(+), 29 deletions(-) diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index e08aa20daa..43d8e5b687 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -68,7 +68,7 @@ pub trait MessageSlotID: Sized + Eq + Hash + Debug + Copy { /// The contract identifier for the message slot in stacker db fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier; /// All possible Message Slot values - fn all() -> Vec; + fn all() -> &'static [Self]; } /// A trait for signer messages used in signer communciation diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 177e293dd4..4d32253f2e 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -74,8 +74,8 @@ impl MessageSlotIDTrait for MessageSlotID { fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) } - fn all() -> Vec { - MessageSlotID::ALL.to_vec() + fn all() -> &'static [Self] { + MessageSlotID::ALL } } diff --git a/libsigner/src/v1/messages.rs b/libsigner/src/v1/messages.rs index 0ec6ee7614..c345cc3e13 100644 --- a/libsigner/src/v1/messages.rs +++ b/libsigner/src/v1/messages.rs @@ -104,8 +104,8 @@ impl MessageSlotIDTrait for MessageSlotID { fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) } - fn all() -> Vec { - MessageSlotID::ALL.to_vec() + fn all() -> &'static [Self] { + MessageSlotID::ALL } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 9dd3ea5be7..e5ccb5a89f 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -52,7 +52,7 @@ pub struct StackerDB { reward_cycle: u64, } -impl From<&SignerConfig> for StackerDB { +impl From<&SignerConfig> for StackerDB { fn from(config: &SignerConfig) -> Self { Self::new( &config.node_host, @@ -64,7 +64,7 @@ impl From<&SignerConfig> for StackerDB { } } -impl StackerDB { +impl StackerDB { /// Create a new StackerDB client pub fn new( host: &str, @@ -77,7 +77,7 @@ impl StackerDB { for msg_id in M::all() { let session = StackerDBSession::new(host, msg_id.stacker_db_contract(is_mainnet, reward_cycle)); - signers_message_stackerdb_sessions.insert(msg_id, session); + signers_message_stackerdb_sessions.insert(*msg_id, session); } Self { diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index f36ae91c26..76000d58a8 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::fmt::Display; +use std::fmt::{Debug, Display}; use std::fs; use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; @@ -152,7 +152,7 @@ pub struct SignerConfig { } /// The parsed configuration for the signer -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct GlobalConfig { /// endpoint to the stacks node pub node_host: String, @@ -396,6 +396,12 @@ impl Display for GlobalConfig { } } +impl Debug for GlobalConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.config_to_log_string()) + } +} + /// Helper function for building a signer config for each provided signer private key #[allow(clippy::too_many_arguments)] pub fn build_signer_config_tomls( diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 2c303eefd0..78b6f8249a 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -120,7 +120,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner let runloop = RunLoop::new(config); let mut signer: RunLoopSigner = libsigner::Signer::new(runloop, ev, cmd_recv, res_send); - let running_signer = signer.spawn(endpoint).unwrap(); + let running_signer = signer.spawn(endpoint).expect("Failed to spawn signer"); SpawnedSigner { running_signer, cmd_send, diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 682c1433c1..bad520a6ce 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -96,18 +96,23 @@ impl SignerTrait for Signer { return; } debug!("{self}: Processing event: {event:?}"); + let Some(event) = event else { + // No event. Do nothing. + debug!("{self}: No event received"); + return; + }; match event { - Some(SignerEvent::BlockValidationResponse(block_validate_response)) => { + SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); self.handle_block_validate_response(block_validate_response) } - Some(SignerEvent::SignerMessages(_signer_set, messages)) => { + SignerEvent::SignerMessages(_signer_set, messages) => { debug!( "{self}: Received {} messages from the other signers. Ignoring...", messages.len() ); } - Some(SignerEvent::MinerMessages(messages, _)) => { + SignerEvent::MinerMessages(messages, _) => { debug!( "{self}: Received {} messages from the miner", messages.len(); @@ -118,16 +123,12 @@ impl SignerTrait for Signer { } } } - Some(SignerEvent::StatusCheck) => { + SignerEvent::StatusCheck => { debug!("{self}: Received a status check event."); } - Some(SignerEvent::NewBurnBlock(height)) => { + SignerEvent::NewBurnBlock(height) => { debug!("{self}: Receved a new burn block event for block height {height}") } - None => { - // No event. Do nothing. - debug!("{self}: No event received") - } } } diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index f9d9e0083a..e053786058 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -192,8 +192,13 @@ impl SignerTrait for Signer { } self.refresh_coordinator(); debug!("{self}: Processing event: {event:?}"); + let Some(event) = event else { + // No event. Do nothing. + debug!("{self}: No event received"); + return; + }; match event { - Some(SignerEvent::BlockValidationResponse(block_validate_response)) => { + SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); self.handle_block_validate_response( stacks_client, @@ -202,7 +207,7 @@ impl SignerTrait for Signer { current_reward_cycle, ) } - Some(SignerEvent::SignerMessages(signer_set, messages)) => { + SignerEvent::SignerMessages(signer_set, messages) => { if *signer_set != self.stackerdb_manager.get_signer_set() { debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); return; @@ -213,7 +218,7 @@ impl SignerTrait for Signer { ); self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); } - Some(SignerEvent::MinerMessages(messages, miner_key)) => { + SignerEvent::MinerMessages(messages, miner_key) => { let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); self.miner_key = Some(miner_key); @@ -229,16 +234,12 @@ impl SignerTrait for Signer { ); self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); } - Some(SignerEvent::StatusCheck) => { + SignerEvent::StatusCheck => { debug!("{self}: Received a status check event.") } - Some(SignerEvent::NewBurnBlock(height)) => { + SignerEvent::NewBurnBlock(height) => { debug!("{self}: Receved a new burn block event for block height {height}") } - None => { - // No event. Do nothing. - debug!("{self}: No event received") - } } } From 04e7fe683289f95fa852ad800b3014b9fdd5939b Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 22 May 2024 18:32:24 +0300 Subject: [PATCH 0061/1400] Remove command tracking from the command's `run` method The command run tracking will be added to the command's `check` method. --- .../tests/pox-4/pox_StackStxAuthCommand_Err.ts | 1 - .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 1 - 2 files changed, 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts index e1d0a2e113..35212e0320 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -59,7 +59,6 @@ export class StackStxAuthCommand_Err implements PoxCommand { check = (model: Readonly): boolean => this.checkFunc.call(this, model); run(model: Stub, real: Real): void { - model.trackCommandRun(this.constructor.name); const currentRewCycle = currentCycle(real.network); // The maximum amount of uSTX that can be used (per tx) with this signer diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts index db6af5c5ba..58092109f0 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -59,7 +59,6 @@ export class StackStxSigCommand_Err implements PoxCommand { check = (model: Readonly): boolean => this.checkFunc.call(this, model); run(model: Stub, real: Real): void { - model.trackCommandRun(this.constructor.name); const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); const burnBlockHeight = Number( cvToValue(burnBlockHeightCV as ClarityValue), From a243d3bdf4c8cd51001edf048c359d6ce0a58ae0 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 22 May 2024 18:36:52 +0300 Subject: [PATCH 0062/1400] Pass the incremented burn height when calling `stack-stx` If not passed incremented, the call will result in an `ERR_INVALID_START_BURN_HEIGHT` when being sent at the limit between 2 cycles. --- .../tests/pox-4/pox_StackStxAuthCommand_Err.ts | 2 +- .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts index 35212e0320..6889e89917 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -112,7 +112,7 @@ export class StackStxAuthCommand_Err implements PoxCommand { // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight), + Cl.uint(burnBlockHeight + 1), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts index 58092109f0..f9c2cdc8d4 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -110,7 +110,7 @@ export class StackStxSigCommand_Err implements PoxCommand { // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight), + Cl.uint(burnBlockHeight + 1), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) From 7a0c1fd96e3f8ba4392720501b252f343ca9c582 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 22 May 2024 19:01:19 +0300 Subject: [PATCH 0063/1400] Add the unhappy path cases for `StackStxXCommand_Err` This commit: - adds 6 unhappy path cases for the `stack-stx` PoX-4 method, 3 for each signing method (authorization or signature) - adds a dictionary that contains the PoX-4 error names and the error codes - adds the command run tracking inside the `check` method, resulting in displaying all the paths hit and the number of times. --- .../tests/pox-4/err_Commands.ts | 207 ++++++++++++++++-- 1 file changed, 194 insertions(+), 13 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 2e4259f740..08a911e68a 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -1,16 +1,27 @@ import fc from "fast-check"; -import { PoxCommand, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; +import { + PoxCommand, + Stacker, + Stub, + StxAddress, + Wallet, +} from "./pox_CommandModel"; import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { Simnet } from "@hirosystems/clarinet-sdk"; +const POX_4_ERRORS = { + ERR_STACKING_ALREADY_STACKED: 3, + ERR_STACKING_ALREADY_DELEGATED: 20, +}; + export function ErrCommands( wallets: Map, stackers: Map, network: Simnet, ): fc.Arbitrary[] { const cmds = [ - // StackStxAuthCommand_Err + // StackStxAuthCommand_Err_Stacking_Already_Stacked_1 fc.record({ wallet: fc.constantFrom(...wallets.values()), authId: fc.nat(), @@ -29,17 +40,102 @@ export function ErrCommands( r.authId, r.period, r.margin, - function (this: StackStxAuthCommand_Err, model: Readonly): boolean { + function ( + this: StackStxAuthCommand_Err, + model: Readonly, + ): boolean { const stacker = model.stackers.get(this.wallet.stxAddress)!; - return ( - model.stackingMinimum > 0 && !stacker.isStacking && + if ( + model.stackingMinimum > 0 && + stacker.isStacking && !stacker.hasDelegated - ); + ) { + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Stacked_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxAuthCommand_Err_Stacking_Already_Stacked_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function ( + this: StackStxAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.hasDelegated + ) { + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Stacked_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxAuthCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function ( + this: StackStxAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stacker.isStacking && + stacker.hasDelegated + ) { + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; }, - 123, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) ), - // StackStxSigCommand_Err + // StackStxSigCommand_Err_Stacking_Already_Stacked_1 fc.record({ wallet: fc.constantFrom(...wallets.values()), authId: fc.nat(), @@ -58,14 +154,99 @@ export function ErrCommands( r.authId, r.period, r.margin, - function (this: StackStxSigCommand_Err, model: Readonly): boolean { + function ( + this: StackStxSigCommand_Err, + model: Readonly, + ): boolean { const stacker = model.stackers.get(this.wallet.stxAddress)!; - return ( - model.stackingMinimum > 0 && !stacker.isStacking && + if ( + model.stackingMinimum > 0 && + stacker.isStacking && !stacker.hasDelegated - ); + ) { + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Stacked_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxSigCommand_Err_Stacking_Already_Stacked_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function ( + this: StackStxSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.hasDelegated + ) { + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Stacked_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxSigCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function ( + this: StackStxSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stacker.isStacking && + stacker.hasDelegated + ) { + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; }, - 123, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) ), ]; From bd159e349616ce9312ba84a81afd657564d4985a Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 22 May 2024 19:02:27 +0300 Subject: [PATCH 0064/1400] Remove `StackStxXCommand.ts` from statistics They needed to be excluded as we have removed the command run tracking from the run method. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 29c57187b3..d5f7245c25 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -109,7 +109,9 @@ it("statefully interacts with PoX-4", async () => { // commands are run at least once. const statistics = fs.readdirSync(path.join(__dirname)).filter((file) => file.startsWith("pox_") && file.endsWith(".ts") && - file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" + file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && + file !== "pox_StackStxAuthCommand_Err.ts" && + file !== "pox_StackStxSigCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. @@ -155,7 +157,7 @@ it("statefully interacts with PoX-4", async () => { ), { // Defines the number of test iterations to run; default is 100. - numRuns: 1000, + numRuns: 20000, // Adjusts the level of detail in test reports. Default is 0 (minimal). // At level 2, reports include extensive details, helpful for deep // debugging. This includes not just the failing case and its seed, but From 6cd331a44071b592c32d8bc91150dc2744e4274a Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 22 May 2024 20:11:01 +0300 Subject: [PATCH 0065/1400] migrate db to schema_2, fix typo and remove unused import --- stackslib/src/chainstate/nakamoto/mod.rs | 8 +++- .../src/chainstate/nakamoto/signer_set.rs | 1 - stackslib/src/chainstate/nakamoto/tenure.rs | 44 +++++++++++++++++-- stackslib/src/chainstate/stacks/db/mod.rs | 11 ++++- 4 files changed, 55 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 2cdf93eef5..bcc03cdeca 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -76,7 +76,7 @@ use crate::chainstate::burn::operations::{LeaderBlockCommitOp, LeaderKeyRegister use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; -use crate::chainstate::nakamoto::tenure::NAKAMOTO_TENURES_SCHEMA; +use crate::chainstate::nakamoto::tenure::{NAKAMOTO_TENURES_SCHEMA_1, NAKAMOTO_TENURES_SCHEMA_2}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{POX_4_NAME, SIGNERS_UPDATE_STATE}; use crate::chainstate::stacks::db::{DBConfig as ChainstateConfig, StacksChainState}; @@ -144,7 +144,7 @@ lazy_static! { reward_set TEXT NOT NULL, PRIMARY KEY (index_block_hash) );"#.into(), - NAKAMOTO_TENURES_SCHEMA.into(), + NAKAMOTO_TENURES_SCHEMA_1.into(), r#" -- Table for Nakamoto block headers CREATE TABLE nakamoto_block_headers ( @@ -216,6 +216,10 @@ lazy_static! { UPDATE db_config SET version = "4"; "#.into(), ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_2: Vec = vec![ + NAKAMOTO_TENURES_SCHEMA_2.into() + ]; } /// Matured miner reward schedules diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index e776ca41db..a7e8df6ed0 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -58,7 +58,6 @@ use crate::chainstate::burn::operations::{ }; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; -use crate::chainstate::nakamoto::tenure::NAKAMOTO_TENURES_SCHEMA; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{ PoxVersions, RawRewardSetEntry, RewardSet, BOOT_TEST_POX_4_AGG_KEY_CONTRACT, diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 078e2514bf..fde669760d 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -118,8 +118,45 @@ use crate::util_lib::db::{ FromRow, }; -pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" - -- Drop the existing table if it exists +pub static NAKAMOTO_TENURES_SCHEMA_1: &'static str = r#" + CREATE TABLE nakamoto_tenures ( + -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit + -- was mined) + tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the previous tenure's start-tenure block + prev_tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the last-processed sortition + burn_view_consensus_hash TEXT NOT NULL, + -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). + -- this is equal to the `cause` field in a TenureChange + cause INETGER NOT NULL, + -- block hash of start-tenure block + block_hash TEXT NOT NULL, + -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) + block_id TEXT NOT NULL, + -- this field is the total number of _sortition-induced_ tenures in the chain history (including this tenure), + -- as of the _end_ of this block. A tenure can contain multiple TenureChanges; if so, then this + -- is the height of the _sortition-induced_ TenureChange that created it. + coinbase_height INTEGER NOT NULL, + -- number of blocks this tenure. + -- * for tenure-changes induced by sortitions, this is the number of blocks in the previous tenure + -- * for tenure-changes induced by extension, this is the number of blocks in the current tenure so far. + num_blocks_confirmed INTEGER NOT NULL, + -- this is the ith tenure transaction in its respective Nakamoto chain history. + tenure_index INTEGER NOT NULL, + + PRIMARY KEY(burn_view_consensus_hash,tenure_index) + ); + CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); + CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); + CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); + CREATE INDEX nakamoto_tenures_by_burn_view_consensus_hash ON nakamoto_tenures(burn_view_consensus_hash); + CREATE INDEX nakamoto_tenures_by_tenure_index ON nakamoto_tenures(tenure_index); + CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); +"#; + +pub static NAKAMOTO_TENURES_SCHEMA_2: &'static str = r#" + -- Drop the nakamoto_tenures table if it exists DROP TABLE IF EXISTS nakamoto_tenures; CREATE TABLE nakamoto_tenures ( @@ -147,8 +184,7 @@ pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" num_blocks_confirmed INTEGER NOT NULL, -- this is the ith tenure transaction in its respective Nakamoto chain history. tenure_index INTEGER NOT NULL, - -- schema version field - schema_version INTEGER NOT NULL DEFAULT 1, + PRIMARY KEY(burn_view_consensus_hash,tenure_index) ) STRICT; CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 374fc11ae1..f10a87dccc 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -53,7 +53,7 @@ use crate::chainstate::burn::operations::{ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, - NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, + NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -668,7 +668,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "4"; +pub const CHAINSTATE_VERSION: &'static str = "5"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1079,6 +1079,13 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "4" => { + // migrate to nakamoto 2 + info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", From 2416c4b78858ae7e43feb1a80c9439e194f769d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 15:39:03 -0400 Subject: [PATCH 0066/1400] feat: better API for querying preprocessed reward sets (e.g. by reward cycle ID, by sortition ID, etc.) --- stackslib/src/chainstate/burn/db/sortdb.rs | 190 +++++++++++++++++---- 1 file changed, 155 insertions(+), 35 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..eee027b72b 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -83,7 +83,6 @@ use crate::core::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_MAX, }; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; -use crate::net::Error as NetError; use crate::util_lib::db::{ db_mkdirs, get_ancestor_block_hash, opt_u64_to_sql, query_count, query_row, query_row_columns, query_row_panic, query_rows, sql_pragma, table_exists, tx_begin_immediate, tx_busy_handler, @@ -3542,20 +3541,104 @@ impl SortitionDB { sortition_id: &SortitionId, rc_info: &RewardCycleInfo, ) -> Result<(), db_error> { - let sql = "INSERT INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; + let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; let args: &[&dyn ToSql] = &[sortition_id, &rc_json]; sort_tx.execute(sql, args)?; Ok(()) } + /// Get the prepare phase start sortition ID of a reward cycle + fn inner_get_prepare_phase_start_sortition_id_for_reward_cycle( + index_conn: &SortitionDBConn, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + let prepare_phase_start = pox_constants + .reward_cycle_to_block_height(first_block_height, reward_cycle_id) + .saturating_sub(pox_constants.prepare_length.into()); + + let first_sortition = get_ancestor_sort_id(index_conn, prepare_phase_start, tip)? + .ok_or_else(|| { + error!( + "Could not find prepare phase start ancestor while fetching reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_height" => prepare_phase_start + ); + db_error::NotFoundError + })?; + Ok(first_sortition) + } + + pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + Self::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( + &self.index_conn(), + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } + + /// Get the reward set for a reward cycle, given the reward cycle tip. + /// Return the reward cycle info for this reward cycle + fn inner_get_preprocessed_reward_set_for_reward_cycle( + index_conn: &SortitionDBConn, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + let first_sortition = Self::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( + index_conn, + pox_constants, + first_block_height, + tip, + reward_cycle_id, + )?; + info!("Fetching preprocessed reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_sortition_id" => %first_sortition, + ); + + Ok(( + Self::get_preprocessed_reward_set(index_conn, &first_sortition)? + .ok_or(db_error::NotFoundError)?, + first_sortition, + )) + } + + pub fn get_preprocessed_reward_set_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + Self::inner_get_preprocessed_reward_set_for_reward_cycle( + &self.index_conn(), + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } + /// Figure out the reward cycle for `tip` and lookup the preprocessed /// reward set (if it exists) for the active reward cycle during `tip` - pub fn get_preprocessed_reward_set_of( - &self, + fn inner_get_preprocessed_reward_set_of( + index_conn: &SortitionDBConn, + pox_constants: &PoxConstants, + first_block_height: u64, tip: &SortitionId, - ) -> Result, db_error> { - let tip_sn = SortitionDB::get_block_snapshot(self.conn(), tip)?.ok_or_else(|| { + ) -> Result { + let tip_sn = SortitionDB::get_block_snapshot(index_conn, tip)?.ok_or_else(|| { error!( "Could not find snapshot for sortition while fetching reward set"; "tip_sortition_id" => %tip, @@ -3563,38 +3646,30 @@ impl SortitionDB { db_error::NotFoundError })?; - let reward_cycle_id = self - .pox_constants - .block_height_to_reward_cycle(self.first_block_height, tip_sn.block_height) + let reward_cycle_id = pox_constants + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height) .expect("FATAL: stored snapshot with block height < first_block_height"); - let prepare_phase_start = self - .pox_constants - .reward_cycle_to_block_height(self.first_block_height, reward_cycle_id) - .saturating_sub(self.pox_constants.prepare_length.into()); + Self::inner_get_preprocessed_reward_set_for_reward_cycle( + index_conn, + pox_constants, + first_block_height, + tip, + reward_cycle_id, + ) + .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) + } - let first_sortition = get_ancestor_sort_id( + pub fn get_preprocessed_reward_set_of( + &self, + tip: &SortitionId, + ) -> Result { + Ok(Self::inner_get_preprocessed_reward_set_of( &self.index_conn(), - prepare_phase_start, - &tip_sn.sortition_id, - )? - .ok_or_else(|| { - error!( - "Could not find prepare phase start ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_height" => prepare_phase_start - ); - db_error::NotFoundError - })?; - - info!("Fetching preprocessed reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_sortition_id" => %first_sortition, - ); - - Self::get_preprocessed_reward_set(self.conn(), &first_sortition) + &self.pox_constants, + self.first_block_height, + tip, + )?) } /// Get a pre-processed reawrd set. @@ -3617,7 +3692,7 @@ impl SortitionDB { } pub fn get_preprocessed_reward_set_size(&self, tip: &SortitionId) -> Option { - let Ok(Some(reward_info)) = &self.get_preprocessed_reward_set_of(&tip) else { + let Ok(reward_info) = &self.get_preprocessed_reward_set_of(&tip) else { return None; }; let Some(reward_set) = reward_info.known_selected_anchor_block() else { @@ -3842,6 +3917,46 @@ impl<'a> SortitionDBConn<'a> { serde_json::from_str(&pox_addrs_json).expect("FATAL: failed to decode pox payout JSON"); Ok(pox_addrs) } + + pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + SortitionDB::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( + self, + &self.context.pox_constants, + self.context.first_block_height, + tip, + reward_cycle_id, + ) + } + + pub fn get_preprocessed_reward_set_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + SortitionDB::inner_get_preprocessed_reward_set_for_reward_cycle( + self, + &self.context.pox_constants, + self.context.first_block_height, + tip, + reward_cycle_id, + ) + } + + pub fn get_preprocessed_reward_set_of( + &self, + tip: &SortitionId, + ) -> Result { + SortitionDB::inner_get_preprocessed_reward_set_of( + self, + &self.context.pox_constants, + self.context.first_block_height, + tip, + ) + } } // High-level functions used by ChainsCoordinator @@ -4559,12 +4674,14 @@ impl SortitionDB { Ok(ret) } + /// DO NOT CALL FROM CONSENSUS CODE pub fn index_handle_at_tip<'a>(&'a self) -> SortitionHandleConn<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.index_handle(&sortition_id) } /// Open a tx handle at the burn chain tip + /// DO NOT CALL FROM CONSENSUS CODE pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.tx_handle_begin(&sortition_id).unwrap() @@ -4574,6 +4691,7 @@ impl SortitionDB { /// Returns Ok(Some(tip info)) on success /// Returns Ok(None) if there are no Nakamoto blocks in this tip /// Returns Err(..) on other DB error + /// DO NOT CALL FROM CONSENSUS CODE pub fn get_canonical_nakamoto_tip_hash_and_height( conn: &Connection, tip: &BlockSnapshot, @@ -4598,6 +4716,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. + /// DO NOT CALL FROM CONSENSUS CODE pub fn get_canonical_stacks_chain_tip_hash_and_height( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash, u64), db_error> { @@ -4625,6 +4744,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. + /// DO NOT CALL FROM CONSENSUS CODE pub fn get_canonical_stacks_chain_tip_hash( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash), db_error> { From 753f9c97b7f605c76bb2517ffa59f2bf237ef45c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 15:39:38 -0400 Subject: [PATCH 0067/1400] chore: fix 4813 by re-trying to store a reward set if it has 'selected-and-unknown' status --- stackslib/src/chainstate/coordinator/mod.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 96eae44641..4c201bfb2c 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -120,7 +120,7 @@ impl NewBurnchainBlockStatus { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct RewardCycleInfo { pub reward_cycle: u64, pub anchor_status: PoxAnchorBlockStatus, @@ -845,9 +845,21 @@ pub fn get_reward_cycle_info( .expect("FATAL: no start-of-prepare-phase sortition"); let mut tx = sort_db.tx_begin()?; - if SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)? - .is_none() - { + let preprocessed_reward_set = + SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)?; + let need_to_store = if let Some(reward_cycle_info) = preprocessed_reward_set { + // overwrite if we have an unknown anchor block + !reward_cycle_info.is_reward_info_known() + } else { + true + }; + if need_to_store { + test_debug!( + "Store preprocessed reward set for cycle {} (prepare start sortition {}): {:?}", + prev_reward_cycle, + &first_prepare_sn.sortition_id, + &reward_cycle_info + ); SortitionDB::store_preprocessed_reward_set( &mut tx, &first_prepare_sn.sortition_id, From ad1094bd0b377e1e69ddcefb24f880dd0024ccdd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 15:40:24 -0400 Subject: [PATCH 0068/1400] chore: blow away aggregate key verification code (it won't be used until a later date, and it's in the git history so we can fetch it later) --- stackslib/src/chainstate/nakamoto/mod.rs | 104 +---------------------- 1 file changed, 1 insertion(+), 103 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d8e67b3bd4..21e2022f95 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -496,16 +496,6 @@ impl NakamotoBlockHeader { Ok(()) } - /// Verify the block header against an aggregate public key - pub fn verify_threshold_signer( - &self, - signer_aggregate: &Point, - signature: &ThresholdSignature, - ) -> bool { - let message = self.signer_signature_hash().0; - signature.verify(signer_aggregate, &message) - } - /// Verify the block header against the list of signer signatures /// /// Validate against: @@ -1799,7 +1789,6 @@ impl NakamotoChainState { db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - _aggregate_public_key: Option<&Point>, reward_set: RewardSet, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); @@ -1847,21 +1836,6 @@ impl NakamotoChainState { return Ok(false); }; - // TODO: epoch gate to verify aggregate signature - // let schnorr_signature = &block.header.signer_signature.0; - // if !db_handle.expects_signer_signature( - // &block.header.consensus_hash, - // schnorr_signature, - // &block.header.signer_signature_hash().0, - // aggregate_public_key, - // )? { - // let msg = format!( - // "Received block, but the signer signature does not match the active stacking cycle" - // ); - // warn!("{}", msg; "aggregate_key" => %aggregate_public_key); - // return Err(ChainstateError::InvalidStacksBlock(msg)); - // } - if let Err(e) = block.header.verify_signer_signatures(&reward_set) { warn!("Received block, but the signer signatures are invalid"; "block_id" => %block.block_id(), @@ -1881,83 +1855,6 @@ impl NakamotoChainState { Ok(true) } - /// Get the aggregate public key for the given block from the signers-voting contract - pub(crate) fn load_aggregate_public_key( - sortdb: &SortitionDB, - sort_handle: &SH, - chainstate: &mut StacksChainState, - for_burn_block_height: u64, - at_block_id: &StacksBlockId, - warn_if_not_found: bool, - ) -> Result { - // Get the current reward cycle - let Some(rc) = sort_handle.pox_constants().block_height_to_reward_cycle( - sort_handle.first_burn_block_height(), - for_burn_block_height, - ) else { - // This should be unreachable, but we'll return an error just in case. - let msg = format!( - "BUG: Failed to determine reward cycle of burn block height: {}.", - for_burn_block_height - ); - warn!("{msg}"); - return Err(ChainstateError::InvalidStacksBlock(msg)); - }; - - test_debug!( - "get-approved-aggregate-key at block {}, cycle {}", - at_block_id, - rc - ); - match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { - Some(key) => Ok(key), - None => { - // this can happen for a whole host of reasons - if warn_if_not_found { - warn!( - "Failed to get aggregate public key"; - "block_id" => %at_block_id, - "reward_cycle" => rc, - ); - } - Err(ChainstateError::InvalidStacksBlock( - "Failed to get aggregate public key".into(), - )) - } - } - } - - /// Get the aggregate public key for a block. - /// TODO: The block at which the aggregate public key is queried needs to be better defined. - /// See https://github.com/stacks-network/stacks-core/issues/4109 - pub fn get_aggregate_public_key( - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - sort_handle: &SH, - block: &NakamotoBlock, - ) -> Result { - let block_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &block.header.consensus_hash)? - .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; - let aggregate_key_block_header = - Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); - let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), block_sn.block_height)? - .ok_or(ChainstateError::InvalidStacksBlock( - "Failed to get epoch ID".into(), - ))? - .epoch_id; - - let aggregate_public_key = Self::load_aggregate_public_key( - sortdb, - sort_handle, - chainstate, - block_sn.block_height, - &aggregate_key_block_header.index_block_hash(), - epoch_id >= StacksEpochId::Epoch30, - )?; - Ok(aggregate_public_key) - } - /// Return the total ExecutionCost consumed during the tenure up to and including /// `block` pub fn get_total_tenure_cost_at( @@ -2112,6 +2009,7 @@ impl NakamotoChainState { } /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) + /// DO NOT CALL FROM CONSENSUS CODE pub fn get_canonical_block_header( chainstate_conn: &Connection, sortdb: &SortitionDB, From da60d066825c06f8afe21bcb6e533a05e03b4fff Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:29:30 -0400 Subject: [PATCH 0069/1400] chore: API sync --- .../chainstate/nakamoto/coordinator/tests.rs | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 14ba87292f..63651b3946 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -636,7 +636,7 @@ fn test_nakamoto_chainstate_getters() { // no tenures yet assert!( - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_tx) .unwrap() .is_none() ); @@ -769,7 +769,7 @@ fn test_nakamoto_chainstate_getters() { // we now have a tenure, and it confirms the last epoch2 block let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_tx) .unwrap() .unwrap(); assert_eq!(highest_tenure.coinbase_height, 12); @@ -797,7 +797,7 @@ fn test_nakamoto_chainstate_getters() { .is_some()); assert!(NakamotoChainState::check_tenure_continuity( chainstate.db(), - sort_tx.sqlite(), + &sort_tx, &blocks[0].header.consensus_hash, &blocks[1].header, ) @@ -969,7 +969,7 @@ fn test_nakamoto_chainstate_getters() { // we now have a new highest tenure let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_tx) .unwrap() .unwrap(); assert_eq!(highest_tenure.coinbase_height, 13); @@ -994,14 +994,14 @@ fn test_nakamoto_chainstate_getters() { .is_none()); assert!(NakamotoChainState::check_tenure_continuity( chainstate.db(), - sort_tx.sqlite(), + &sort_tx, &new_blocks[0].header.consensus_hash, &new_blocks[1].header, ) .unwrap()); assert!(!NakamotoChainState::check_tenure_continuity( chainstate.db(), - sort_tx.sqlite(), + &sort_tx, &blocks[0].header.consensus_hash, &new_blocks[1].header, ) @@ -1613,7 +1613,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) @@ -1705,7 +1705,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) @@ -1800,7 +1800,7 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) @@ -2001,7 +2001,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) From 67038a22890990c529b5df82dc168b0718765eb5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:29:40 -0400 Subject: [PATCH 0070/1400] chore: API sync --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 21e2022f95..bae7fd3436 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2826,7 +2826,7 @@ impl NakamotoChainState { // this block is mined in the ongoing tenure. if !Self::check_tenure_continuity( chainstate_tx, - burn_dbconn.sqlite(), + burn_dbconn, &parent_ch, &block.header, )? { From 127165ad7659550120e229692606d1b82b822310 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:29:50 -0400 Subject: [PATCH 0071/1400] fix: search for the highest tenure from the block-processor's given sortition tip. Do not attempt to get the canonical stacks or burnchain tips. --- stackslib/src/chainstate/nakamoto/tenure.rs | 27 +++++++++++++-------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index c9e5c0cf59..b68950a875 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -556,17 +556,24 @@ impl NakamotoChainState { } /// Get the highest non-empty processed tenure on the canonical sortition history. - pub fn get_highest_nakamoto_tenure( + pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, - sortdb_conn: &Connection, + sortdb_conn: &SH, ) -> Result, ChainstateError> { - // find the tenure for the Stacks chain tip - let (tip_ch, tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb_conn)?; - if tip_ch == FIRST_BURNCHAIN_CONSENSUS_HASH || tip_bhh == FIRST_STACKS_BLOCK_HASH { - // no chain tip, so no tenure - return Ok(None); + // NOTE: we do a *search* here in case the canonical Stacks pointer stored on the canonical + // sortition gets invalidated through a reorg. + let mut cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &sortdb_conn.tip())? + .ok_or(ChainstateError::NoSuchBlockError)?; + loop { + if let Some(tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( + headers_conn, + &cursor.consensus_hash, + )? { + return Ok(Some(tenure)); + } + cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? + .ok_or(ChainstateError::NoSuchBlockError)?; } - Self::get_nakamoto_tenure_change_by_tenure_id(headers_conn, &tip_ch) } /// Verify that a tenure change tx is a valid first-ever tenure change. It must connect to an @@ -857,9 +864,9 @@ impl NakamotoChainState { /// /// Returns Ok(bool) to indicate whether or not this block is in the same tenure as its parent. /// Returns Err(..) on DB error - pub(crate) fn check_tenure_continuity( + pub(crate) fn check_tenure_continuity( headers_conn: &Connection, - sortdb_conn: &Connection, + sortdb_conn: &SH, parent_ch: &ConsensusHash, block_header: &NakamotoBlockHeader, ) -> Result { From 9bc3d55b254a6cf0ea46c00f5730415c1307d548 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:30:18 -0400 Subject: [PATCH 0072/1400] feat: add a way to synthesize a reward set from a list of signers --- .../src/chainstate/nakamoto/test_signers.rs | 44 +++++++++++++++++-- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index a7e521c155..1a52b0a2c2 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -30,7 +30,9 @@ use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; -use stacks_common::types::chainstate::{BlockHeaderHash, SortitionId, StacksBlockId, VRFSeed}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, +}; use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; @@ -52,6 +54,7 @@ use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::{NakamotoSignerEntry, PoxStartCycleInfo}; use crate::chainstate::stacks::db::*; use crate::chainstate::stacks::miner::*; use crate::chainstate::stacks::{ @@ -205,9 +208,6 @@ impl TestSigners { self.generate_aggregate_key(cycle); } - // TODO: epoch gate for aggregated signatures - // let signer_signature = self.sign_block_with_aggregate_key(&block); - let signer_signature = self.generate_block_signatures(&block); test_debug!( @@ -229,6 +229,42 @@ impl TestSigners { block.header.signer_signature = signatures; } + /// Synthesize a reward set from the signer for the purposes of signing and verifying blocks + /// later on + pub fn synthesize_reward_set(&self) -> RewardSet { + let mut signer_entries = vec![]; + let mut pox_addrs = vec![]; + for key in self.signer_keys.iter() { + let signing_key_vec = Secp256k1PublicKey::from_private(key).to_bytes_compressed(); + let mut signing_key = [0u8; 33]; + signing_key[0..33].copy_from_slice(&signing_key_vec[0..33]); + + let nakamoto_signer_entry = NakamotoSignerEntry { + signing_key, + stacked_amt: 100_000_000_000, + weight: 1, + }; + let pox_addr = PoxAddress::Standard( + StacksAddress { + version: AddressHashMode::SerializeP2PKH.to_version_testnet(), + bytes: Hash160::from_data(&nakamoto_signer_entry.signing_key), + }, + Some(AddressHashMode::SerializeP2PKH), + ); + signer_entries.push(nakamoto_signer_entry); + pox_addrs.push(pox_addr); + } + + RewardSet { + rewarded_addresses: pox_addrs, + start_cycle_state: PoxStartCycleInfo { + missed_reward_slots: vec![], + }, + signers: Some(signer_entries), + pox_ustx_threshold: Some(100_000_000_000), + } + } + /// Sign a Nakamoto block and generate a vec of signatures. The signatures will /// be ordered by the signer's public keys, but will not be checked against the /// reward set. From b8e85e38e808e305b1ba73ec12a2d2aafec402ee Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:30:46 -0400 Subject: [PATCH 0073/1400] chore: move unused code for loading the aggregate public key into a test module, where it is still required for test coverage --- .../src/chainstate/nakamoto/tests/mod.rs | 87 ++++++++++++++++++- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index dd36004ff4..9aab60ab9b 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -92,8 +92,89 @@ use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as db_error; use crate::util_lib::strings::StacksString; +/// WSTS aggregate public keys are not shipping immediately in Nakamoto, but there is still a lot +/// of test coverage for it. The code here is preserved to keep these tests working until WSTS's +/// coordinator implementaiton is ready. +impl NakamotoChainState { + /// Get the aggregate public key for the given block from the signers-voting contract + pub(crate) fn load_aggregate_public_key( + sortdb: &SortitionDB, + sort_handle: &SH, + chainstate: &mut StacksChainState, + for_burn_block_height: u64, + at_block_id: &StacksBlockId, + warn_if_not_found: bool, + ) -> Result { + // Get the current reward cycle + let Some(rc) = sort_handle.pox_constants().block_height_to_reward_cycle( + sort_handle.first_burn_block_height(), + for_burn_block_height, + ) else { + // This should be unreachable, but we'll return an error just in case. + let msg = format!( + "BUG: Failed to determine reward cycle of burn block height: {}.", + for_burn_block_height + ); + warn!("{msg}"); + return Err(ChainstateError::InvalidStacksBlock(msg)); + }; + + test_debug!( + "get-approved-aggregate-key at block {}, cycle {}", + at_block_id, + rc + ); + match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { + Some(key) => Ok(key), + None => { + // this can happen for a whole host of reasons + if warn_if_not_found { + warn!( + "Failed to get aggregate public key"; + "block_id" => %at_block_id, + "reward_cycle" => rc, + ); + } + Err(ChainstateError::InvalidStacksBlock( + "Failed to get aggregate public key".into(), + )) + } + } + } + + /// Get the aggregate public key for a block. + /// TODO: The block at which the aggregate public key is queried needs to be better defined. + /// See https://github.com/stacks-network/stacks-core/issues/4109 + pub fn get_aggregate_public_key( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + sort_handle: &SH, + block: &NakamotoBlock, + ) -> Result { + let block_sn = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &block.header.consensus_hash)? + .ok_or(ChainstateError::DBError(db_error::NotFoundError))?; + let aggregate_key_block_header = + Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), block_sn.block_height)? + .ok_or(ChainstateError::InvalidStacksBlock( + "Failed to get epoch ID".into(), + ))? + .epoch_id; + + let aggregate_public_key = Self::load_aggregate_public_key( + sortdb, + sort_handle, + chainstate, + block_sn.block_height, + &aggregate_key_block_header.index_block_hash(), + epoch_id >= StacksEpochId::Epoch30, + )?; + Ok(aggregate_public_key) + } +} + impl<'a> NakamotoStagingBlocksConnRef<'a> { - #[cfg(test)] pub fn get_all_blocks_in_tenure( &self, tenure_id_consensus_hash: &ConsensusHash, @@ -1764,7 +1845,7 @@ pub fn test_get_highest_nakamoto_tenure() { &stacks_ch, &stacks_bhh, stacks_height ); let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); @@ -1802,7 +1883,7 @@ pub fn test_get_highest_nakamoto_tenure() { // new tip doesn't include the last two tenures let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); let last_tenure_change = &all_tenure_changes[2]; From 41c50fa3e8b631d2f325e6497f2e0f02683e048c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:31:13 -0400 Subject: [PATCH 0074/1400] chore: API sync --- stackslib/src/chainstate/nakamoto/tests/node.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 354bacb7af..fc425d0580 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -583,7 +583,6 @@ impl TestStacksNode { let reward_set = sortdb .get_preprocessed_reward_set_of(&sort_tip) .expect("Failed to get reward cycle info") - .expect("Failed to get reward cycle info") .known_selected_anchor_block_owned() .expect("Expected a reward set"); From f1d46570ae5e26d298bf437db6c4519cb7f63750 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:31:22 -0400 Subject: [PATCH 0075/1400] chore: fmt --- stackslib/src/chainstate/stacks/boot/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index d3e8a494de..01ca39be4a 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -40,7 +40,9 @@ use serde::Deserialize; use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; use stacks_common::types; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, StacksAddress, StacksBlockId, StacksPublicKey, +}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use wsts::curve::point::{Compressed, Point}; use wsts::curve::scalar::Scalar; From ffb572e7e6b176f0160744633372401d3f4496bf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:31:30 -0400 Subject: [PATCH 0076/1400] chore: doc epoch2-specific behavior --- stackslib/src/net/chat.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 52622d1e59..3037ac60d1 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1628,7 +1628,7 @@ impl ConversationP2P { .map_err(|e| net_error::from(e))?; if cfg!(test) { - // make *sure* the behavior stays the same + // make *sure* the behavior stays the same in epoch 2 let original_blocks_inv_data: BlocksInvData = chainstate.get_blocks_inventory(&block_hashes)?; From f2fac094da51753a6b452e742808a8e0a215e7eb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:31:42 -0400 Subject: [PATCH 0077/1400] chore: use reward sets instead of aggregate public keys --- .../net/download/nakamoto/download_state_machine.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 77cf64dba6..c95dc6d5f3 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -38,9 +38,11 @@ use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -861,6 +863,7 @@ impl NakamotoDownloadStateMachine { "Peer {} has no inventory for reward cycle {}", naddr, reward_cycle ); + test_debug!("Peer {} has the following inventory data: {:?}", naddr, inv); continue; }; for (i, wt) in wanted_tenures.iter().enumerate() { @@ -1152,14 +1155,14 @@ impl NakamotoDownloadStateMachine { fn update_tenure_downloaders( &mut self, count: usize, - agg_public_keys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) { self.tenure_downloads.make_tenure_downloaders( &mut self.tenure_download_schedule, &mut self.available_tenures, &mut self.tenure_block_ids, count, - agg_public_keys, + current_reward_sets, ) } @@ -1435,7 +1438,7 @@ impl NakamotoDownloadStateMachine { sortdb, sort_tip, chainstate, - &network.aggregate_public_keys, + &network.current_reward_sets, ) else { neighbor_rpc.add_dead(network, &naddr); continue; @@ -1500,7 +1503,7 @@ impl NakamotoDownloadStateMachine { max_count: usize, ) -> HashMap> { // queue up more downloaders - self.update_tenure_downloaders(max_count, &network.aggregate_public_keys); + self.update_tenure_downloaders(max_count, &network.current_reward_sets); // run all downloaders let new_blocks = self.tenure_downloads.run(network, &mut self.neighbor_rpc); From 3fff20e2f36f95fd6b61f17e3d8cd75d8ac9418f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:01 -0400 Subject: [PATCH 0078/1400] chore: use reward set signature verification --- .../download/nakamoto/tenure_downloader.rs | 95 +++++++++---------- 1 file changed, 46 insertions(+), 49 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 340fa717fd..a3586602e6 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ @@ -41,6 +40,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -129,8 +129,8 @@ impl fmt::Display for NakamotoTenureDownloadState { /// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); /// in this case, the end-block is the start-block of the ongoing tenure. /// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the aggregate -/// public key for this tenure; their hash-chain continuity will be validated against the start +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start /// and end block hashes; their quantity will be validated against the tenure-change transaction /// in the end-block. /// @@ -149,10 +149,10 @@ pub struct NakamotoTenureDownloader { pub tenure_end_block_id: StacksBlockId, /// Address of who we're asking for blocks pub naddr: NeighborAddress, - /// Aggregate public key that signed the start-block of this tenure - pub start_aggregate_public_key: Point, - /// Aggregate public key that signed the end-block of this tenure - pub end_aggregate_public_key: Point, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with /// this state machine. pub idle: bool, @@ -178,8 +178,8 @@ impl NakamotoTenureDownloader { tenure_start_block_id: StacksBlockId, tenure_end_block_id: StacksBlockId, naddr: NeighborAddress, - start_aggregate_public_key: Point, - end_aggregate_public_key: Point, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, ) -> Self { test_debug!( "Instantiate downloader to {} for tenure {}", @@ -191,8 +191,8 @@ impl NakamotoTenureDownloader { tenure_start_block_id, tenure_end_block_id, naddr, - start_aggregate_public_key, - end_aggregate_public_key, + start_signer_keys, + end_signer_keys, idle: false, state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), tenure_start_block: None, @@ -243,19 +243,18 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - // TODO: epoch-gated verify threshold or vec of signatures - // if !tenure_start_block - // .header - // .verify_threshold_signer(&self.start_aggregate_public_key) - // { - // // signature verification failed - // warn!("Invalid tenure-start block: bad signer signature"; - // "tenure_id" => %self.tenure_id_consensus_hash, - // "block.header.block_id" => %tenure_start_block.header.block_id(), - // "start_aggregate_public_key" => %self.start_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } debug!( "Accepted tenure-start block for tenure {} block={}", @@ -370,19 +369,18 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - // TODO: epoch-gated verify threshold or vec of signatures - // if !tenure_end_block - // .header - // .verify_threshold_signer(&self.end_aggregate_public_key) - // { - // // bad signature - // warn!("Invalid tenure-end block: bad signer signature"; - // "tenure_id" => %self.tenure_id_consensus_hash, - // "block.header.block_id" => %tenure_end_block.header.block_id(), - // "end_aggregate_public_key" => %self.end_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } // extract the needful -- need the tenure-change payload (which proves that the tenure-end // block is the tenure-start block for the next tenure) and the parent block ID (which is @@ -472,18 +470,17 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidMessage); } - // TODO: epoch-gated verify threshold or vec of signatures - // if !block - // .header - // .verify_threshold_signer(&self.start_aggregate_public_key) - // { - // warn!("Invalid block: bad signer signature"; - // "tenure_id" => %self.tenure_id_consensus_hash, - // "block.header.block_id" => %block.header.block_id(), - // "start_aggregate_public_key" => %self.start_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } expected_block_id = &block.header.parent_block_id; count += 1; From 0318fb28735bb562b6c45b778edf5b927a51d4b9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:19 -0400 Subject: [PATCH 0079/1400] chore: use reward set signature verification --- .../nakamoto/tenure_downloader_set.rs | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 357b588e8a..0100eb0ecd 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -31,16 +31,17 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -418,7 +419,7 @@ impl NakamotoTenureDownloaderSet { available: &mut HashMap>, tenure_block_ids: &HashMap, count: usize, - agg_public_keys: &BTreeMap>, + current_reward_cycles: &BTreeMap, ) { test_debug!("schedule: {:?}", schedule); test_debug!("available: {:?}", &available); @@ -479,19 +480,25 @@ impl NakamotoTenureDownloaderSet { test_debug!("Neighbor {} does not serve tenure {}", &naddr, ch); continue; }; - let Some(Some(start_agg_pubkey)) = agg_public_keys.get(&tenure_info.start_reward_cycle) + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.known_selected_anchor_block()) else { test_debug!( - "Cannot fetch tenure-start block due to no known aggregate public key: {:?}", + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, &tenure_info ); schedule.pop_front(); continue; }; - let Some(Some(end_agg_pubkey)) = agg_public_keys.get(&tenure_info.end_reward_cycle) + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.known_selected_anchor_block()) else { test_debug!( - "Cannot fetch tenure-end block due to no known aggregate public key: {:?}", + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, &tenure_info ); schedule.pop_front(); @@ -499,12 +506,10 @@ impl NakamotoTenureDownloaderSet { }; test_debug!( - "Download tenure {} (start={}, end={}) with aggregate keys {}, {} (rc {},{})", + "Download tenure {} (start={}, end={}) (rc {},{})", &ch, &tenure_info.start_block_id, &tenure_info.end_block_id, - &start_agg_pubkey, - &end_agg_pubkey, tenure_info.start_reward_cycle, tenure_info.end_reward_cycle ); @@ -513,8 +518,8 @@ impl NakamotoTenureDownloaderSet { tenure_info.start_block_id.clone(), tenure_info.end_block_id.clone(), naddr.clone(), - start_agg_pubkey.clone(), - end_agg_pubkey.clone(), + start_reward_set.clone(), + end_reward_set.clone(), ); test_debug!("Request tenure {} from neighbor {}", ch, &naddr); From 4f38a047a1c7a2ef4fab77339b70be0abb2d1c8b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:33 -0400 Subject: [PATCH 0080/1400] chore: use reward set signature verification --- .../nakamoto/tenure_downloader_unconfirmed.rs | 120 +++++++++--------- 1 file changed, 57 insertions(+), 63 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 7a22b4ef2b..4d4d4dee47 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -31,16 +31,17 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, }; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, }; +use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ Error as chainstate_error, StacksBlockHeader, TenureChangePayload, @@ -107,10 +108,10 @@ pub struct NakamotoUnconfirmedTenureDownloader { pub state: NakamotoUnconfirmedDownloadState, /// Address of who we're asking pub naddr: NeighborAddress, - /// Aggregate public key of the highest confirmed tenure - pub confirmed_aggregate_public_key: Option, - /// Aggregate public key of the unconfirmed (ongoing) tenure - pub unconfirmed_aggregate_public_key: Option, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, /// Block ID of this node's highest-processed block. /// We will not download any blocks lower than this, if it's set. pub highest_processed_block_id: Option, @@ -133,8 +134,8 @@ impl NakamotoUnconfirmedTenureDownloader { Self { state: NakamotoUnconfirmedDownloadState::GetTenureInfo, naddr, - confirmed_aggregate_public_key: None, - unconfirmed_aggregate_public_key: None, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, highest_processed_block_id, highest_processed_block_height: None, tenure_tip: None, @@ -185,7 +186,7 @@ impl NakamotoUnconfirmedTenureDownloader { local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, remote_tenure_tip: RPCGetTenureInfo, - agg_pubkeys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) -> Result<(), NetError> { if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { return Err(NetError::InvalidState); @@ -297,21 +298,24 @@ impl NakamotoUnconfirmedTenureDownloader { ) .expect("FATAL: sortition from before system start"); - // get aggregate public keys for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_aggregate_public_key)) = - agg_pubkeys.get(&parent_tenure_rc).cloned() + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.known_selected_anchor_block()) else { warn!( - "No aggregate public key for confirmed tenure {} (rc {})", + "No signer public keys for confirmed tenure {} (rc {})", &parent_local_tenure_sn.consensus_hash, parent_tenure_rc ); return Err(NetError::InvalidState); }; - let Some(Some(unconfirmed_aggregate_public_key)) = agg_pubkeys.get(&tenure_rc).cloned() + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.known_selected_anchor_block()) else { warn!( - "No aggregate public key for confirmed tenure {} (rc {})", + "No signer public keys for unconfirmed tenure {} (rc {})", &local_tenure_sn.consensus_hash, tenure_rc ); return Err(NetError::InvalidState); @@ -339,14 +343,12 @@ impl NakamotoUnconfirmedTenureDownloader { } test_debug!( - "Will validate unconfirmed blocks with ({},{}) and ({},{})", - &confirmed_aggregate_public_key, + "Will validate unconfirmed blocks with reward sets in ({},{})", parent_tenure_rc, - &unconfirmed_aggregate_public_key, tenure_rc ); - self.confirmed_aggregate_public_key = Some(confirmed_aggregate_public_key); - self.unconfirmed_aggregate_public_key = Some(unconfirmed_aggregate_public_key); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); self.tenure_tip = Some(remote_tenure_tip); Ok(()) @@ -370,25 +372,22 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); }; - // TODO: epoch-gated loading of aggregate key - // let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - // else { - // return Err(NetError::InvalidState); - // }; - - // stacker signature has to match the current aggregate public key - // TODO: epoch-gated verify threshold or vec of signatures - // if !unconfirmed_tenure_start_block - // .header - // .verify_threshold_signer(unconfirmed_aggregate_public_key) - // { - // warn!("Invalid tenure-start block: bad signer signature"; - // "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - // "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - // "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } // block has to match the expected hash if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { @@ -437,11 +436,9 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); }; - // TODO: epoch-gated load aggregate key - // let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - // else { - // return Err(NetError::InvalidState); - // }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; if tenure_blocks.is_empty() { // nothing to do @@ -459,18 +456,17 @@ impl NakamotoUnconfirmedTenureDownloader { "block_id" => %block.header.block_id()); return Err(NetError::InvalidMessage); } - // TODO: epoch-gated verify threshold or vec of signatures - // if !block - // .header - // .verify_threshold_signer(unconfirmed_aggregate_public_key) - // { - // warn!("Invalid block: bad signer signature"; - // "tenure_id" => %tenure_tip.consensus_hash, - // "block.header.block_id" => %block.header.block_id(), - // "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - // "state" => %self.state); - // return Err(NetError::InvalidMessage); - // } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } // we may or may not need the tenure-start block for the unconfirmed tenure. But if we // do, make sure it's valid, and it's the last block we receive. @@ -616,12 +612,10 @@ impl NakamotoUnconfirmedTenureDownloader { else { return Err(NetError::InvalidState); }; - let Some(confirmed_aggregate_public_key) = self.confirmed_aggregate_public_key.as_ref() - else { + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { return Err(NetError::InvalidState); }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { return Err(NetError::InvalidState); }; @@ -634,8 +628,8 @@ impl NakamotoUnconfirmedTenureDownloader { unconfirmed_tenure.winning_block_id.clone(), unconfirmed_tenure_start_block.header.block_id(), self.naddr.clone(), - confirmed_aggregate_public_key.clone(), - unconfirmed_aggregate_public_key.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), ) .with_tenure_end_block(unconfirmed_tenure_start_block.clone()); @@ -723,7 +717,7 @@ impl NakamotoUnconfirmedTenureDownloader { sortdb: &SortitionDB, local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, - agg_pubkeys: &BTreeMap>, + current_reward_sets: &BTreeMap, ) -> Result>, NetError> { match &self.state { NakamotoUnconfirmedDownloadState::GetTenureInfo => { @@ -735,7 +729,7 @@ impl NakamotoUnconfirmedTenureDownloader { local_sort_tip, chainstate, remote_tenure_info, - agg_pubkeys, + current_reward_sets, )?; Ok(None) } From b0100bc078e28f0b5dac26075f3a468bbd76a3fc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:45 -0400 Subject: [PATCH 0081/1400] chore: document post-nakamoto usage of aggregate public key --- stackslib/src/net/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c5..1cead0306a 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2021,6 +2021,7 @@ pub mod test { /// What services should this peer support? pub services: u16, /// aggregate public key to use + /// (NOTE: will be used post-Nakamoto) pub aggregate_public_key: Option, pub test_stackers: Option>, pub test_signers: Option, From 3de7c89a3cd35d6412e9528576c94e3d1dbed22b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:32:58 -0400 Subject: [PATCH 0082/1400] feat: cache reward sets by reward cycle and sortition ID, instead of aggregate public keys --- stackslib/src/net/p2p.rs | 155 ++++++++++++++++++++++++--------------- 1 file changed, 95 insertions(+), 60 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f853bb795a..821d4dbc1d 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -36,7 +36,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use {mio, url}; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; @@ -45,7 +44,7 @@ use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB}; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::{ static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, - static_get_stacks_tip_affirmation_map, + static_get_stacks_tip_affirmation_map, RewardCycleInfo, }; use crate::chainstate::stacks::boot::MINERS_NAME; use crate::chainstate::stacks::db::StacksChainState; @@ -259,13 +258,10 @@ pub struct PeerNetwork { /// In epoch 2.x, this is the same as the tip block ID /// In nakamoto, this is the block ID of the first block in the current tenure pub tenure_start_block_id: StacksBlockId, - /// The aggregate public keys of each witnessed reward cycle. - /// Only active during epoch 3.x and beyond. - /// Gets refreshed on each new Stacks block arrival, which deals with burnchain forks. - /// Stored in a BTreeMap because we often need to query the last or second-to-last reward cycle - /// aggregate public key, and we need to determine whether or not to load new reward cycles' - /// keys. - pub aggregate_public_keys: BTreeMap>, + /// The reward sets of the current and past reward cycle. + /// Needed to validate blocks, which are signed by a threshold of stackers + pub current_reward_sets: BTreeMap, + pub current_reward_set_ids: BTreeMap, // information about the state of the network's anchor blocks pub heaviest_affirmation_map: AffirmationMap, @@ -476,7 +472,8 @@ impl PeerNetwork { stacks_tip_sn: None, parent_stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), tenure_start_block_id: StacksBlockId([0x00; 32]), - aggregate_public_keys: BTreeMap::new(), + current_reward_sets: BTreeMap::new(), + current_reward_set_ids: BTreeMap::new(), peerdb: peerdb, atlasdb: atlasdb, @@ -5430,58 +5427,100 @@ impl PeerNetwork { )) } - /// Refresh our view of the aggregate public keys - /// Returns a list of (reward-cycle, option(pubkey)) pairs. - /// An option(pubkey) is defined for all reward cycles, but for epochs 2.4 and earlier, it will - /// be None. - fn find_new_aggregate_public_keys( + /// Refresh our view of the last two reward cycles + fn refresh_reward_cycles( &mut self, sortdb: &SortitionDB, tip_sn: &BlockSnapshot, - chainstate: &mut StacksChainState, - stacks_tip_block_id: &StacksBlockId, - ) -> Result)>, net_error> { - let sort_tip_rc = self + ) -> Result<(), net_error> { + let cur_rc = self .burnchain .block_height_to_reward_cycle(tip_sn.block_height) .expect("FATAL: sortition from before system start"); - let next_agg_pubkey_rc = self - .aggregate_public_keys - .last_key_value() - .map(|(rc, _)| rc.saturating_add(1)) - .unwrap_or(0); - let mut new_agg_pubkeys: Vec<_> = (next_agg_pubkey_rc..=sort_tip_rc) - .filter_map(|key_rc| { - let ih = sortdb.index_handle(&tip_sn.sortition_id); - let agg_pubkey_opt = if self.get_current_epoch().epoch_id < StacksEpochId::Epoch25 { - None - } else { - test_debug!( - "Try to get aggregate public key for reward cycle {}", - key_rc + + let prev_rc = cur_rc.saturating_sub(1); + + // keyed by both rc and sortition ID in case there's a bitcoin fork -- we'd want the + // canonical reward set to be loaded + let cur_rc_sortition_id = sortdb + .get_prepare_phase_start_sortition_id_for_reward_cycle(&tip_sn.sortition_id, cur_rc)?; + let prev_rc_sortition_id = sortdb + .get_prepare_phase_start_sortition_id_for_reward_cycle(&tip_sn.sortition_id, prev_rc)?; + + for (rc, sortition_id) in [ + (prev_rc, prev_rc_sortition_id), + (cur_rc, cur_rc_sortition_id), + ] + .into_iter() + { + if let Some(sort_id) = self.current_reward_set_ids.get(&rc) { + if sort_id == &sortition_id { + continue; + } + } + let Ok((reward_cycle_info, reward_cycle_sort_id)) = sortdb + .get_preprocessed_reward_set_for_reward_cycle(&tip_sn.sortition_id, rc) + .map_err(|e| { + warn!( + "Failed to load reward set for cycle {} ({}): {:?}", + rc, &sortition_id, &e ); - NakamotoChainState::load_aggregate_public_key( - sortdb, - &ih, - chainstate, - self.burnchain.reward_cycle_to_block_height(key_rc), - &stacks_tip_block_id, - false, + e + }) + else { + // NOTE: this should never be reached + continue; + }; + if !reward_cycle_info.is_reward_info_known() { + // haven't yet processed the anchor block, so don't store + test_debug!("Reward cycle info for cycle {} at sortition {} expects the PoX anchor block, so will not cache", rc, &reward_cycle_sort_id); + continue; + } + + test_debug!( + "Reward cycle info for cycle {} at sortition {} is {:?}", + rc, + &reward_cycle_sort_id, + &reward_cycle_info + ); + self.current_reward_sets.insert(rc, reward_cycle_info); + self.current_reward_set_ids.insert(rc, reward_cycle_sort_id); + } + + // free memory + if self.current_reward_sets.len() > 3 { + self.current_reward_sets.retain(|old_rc, _| { + if (*old_rc).saturating_add(1) < prev_rc { + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + } + let Some(old_sortition_id) = self.current_reward_set_ids.get(old_rc) else { + // shouldn't happen + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + }; + let Ok(prepare_phase_sort_id) = sortdb + .get_prepare_phase_start_sortition_id_for_reward_cycle( + &tip_sn.sortition_id, + *old_rc, ) - .ok() + else { + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; }; - if agg_pubkey_opt.is_none() { - return None; + if prepare_phase_sort_id != *old_sortition_id { + // non-canonical reward cycle info + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; } - Some((key_rc, agg_pubkey_opt)) - }) - .collect(); - - if new_agg_pubkeys.len() == 0 && self.aggregate_public_keys.len() == 0 { - // special case -- we're before epoch 3.0, so don't waste time doing this again - new_agg_pubkeys.push((sort_tip_rc, None)); + true + }); } - Ok(new_agg_pubkeys) + Ok(()) } /// Refresh view of burnchain, if needed. @@ -5511,14 +5550,13 @@ impl PeerNetwork { != self.burnchain_tip.canonical_stacks_tip_consensus_hash || burnchain_tip_changed || stacks_tip_changed; + + if stacks_tip_changed || burnchain_tip_changed { + self.refresh_reward_cycles(sortdb, &canonical_sn)?; + } + let mut ret: HashMap> = HashMap::new(); - let aggregate_public_keys = self.find_new_aggregate_public_keys( - sortdb, - &canonical_sn, - chainstate, - &new_stacks_tip_block_id, - )?; let (parent_stacks_tip, tenure_start_block_id, stacks_tip_sn) = if stacks_tip_changed { let stacks_tip_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_tip.0)?; @@ -5692,9 +5730,6 @@ impl PeerNetwork { self.stacks_tip = stacks_tip; self.stacks_tip_sn = stacks_tip_sn; self.parent_stacks_tip = parent_stacks_tip; - for (key_rc, agg_pubkey_opt) in aggregate_public_keys { - self.aggregate_public_keys.insert(key_rc, agg_pubkey_opt); - } self.tenure_start_block_id = tenure_start_block_id; Ok(ret) From dc43a273d5414b58dbc3d408a9cb2a57a16029bb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:33:28 -0400 Subject: [PATCH 0083/1400] chore: API sync; delete dead code (it's in the git history now) --- stackslib/src/net/relay.rs | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7f4f1847a9..0acc48244d 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -30,7 +30,6 @@ use stacks_common::types::chainstate::{BurnchainHeaderHash, PoxId, SortitionId, use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView}; use crate::chainstate::burn::db::sortdb::{ @@ -723,26 +722,11 @@ impl Relayer { ); let config = chainstate.config(); - - // TODO: epoch gate to verify with aggregate key - // let Ok(aggregate_public_key) = - // NakamotoChainState::get_aggregate_public_key(chainstate, &sortdb, sort_handle, &block) - // else { - // warn!("Failed to get aggregate public key. Will not store or relay"; - // "stacks_block_hash" => %block.header.block_hash(), - // "consensus_hash" => %block.header.consensus_hash, - // "burn_height" => block.header.chain_length, - // "sortition_height" => block_sn.block_height, - // ); - // return Ok(false); - // }; - - // TODO: epoch gate to use signatures vec let tip = block_sn.sortition_id; let reward_info = match sortdb.get_preprocessed_reward_set_of(&tip) { - Ok(Some(x)) => x, - Ok(None) => { + Ok(x) => x, + Err(db_error::NotFoundError) => { error!("No RewardCycleInfo found for tip {}", tip); return Err(chainstate_error::PoxNoRewardCycle); } @@ -763,7 +747,6 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - None, reward_set, )?; staging_db_tx.commit()?; From 4d0a1a37e6a8aedb88f353141ac5f6b896a49b2d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:33:48 -0400 Subject: [PATCH 0084/1400] chore: use reward sets instead of aggregate public keys --- stackslib/src/net/tests/download/nakamoto.rs | 125 ++++++++++++++----- 1 file changed, 92 insertions(+), 33 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 1e76cd1853..47dabd176e 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -92,7 +92,7 @@ fn test_nakamoto_tenure_downloader() { let private_key = StacksPrivateKey::new(); let mut test_signers = TestSigners::new(vec![]); - let aggregate_public_key = test_signers.aggregate_public_key.clone(); + let reward_set = test_signers.synthesize_reward_set(); let tenure_start_header = NakamotoBlockHeader { version: 1, @@ -116,7 +116,6 @@ fn test_nakamoto_tenure_downloader() { cause: TenureChangeCause::BlockFound, pubkey_hash: Hash160([0x02; 20]), }; - use stacks_common::types::net::PeerAddress; let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); @@ -231,8 +230,8 @@ fn test_nakamoto_tenure_downloader() { tenure_start_block.header.block_id(), next_tenure_start_block.header.block_id(), naddr.clone(), - aggregate_public_key.clone(), - aggregate_public_key.clone(), + reward_set.clone(), + reward_set.clone(), ); // must be first block @@ -365,7 +364,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let tip_ch = peer.network.stacks_tip.0.clone(); let parent_tip_ch = peer.network.parent_stacks_tip.0.clone(); - let agg_pubkeys = peer.network.aggregate_public_keys.clone(); + let current_reward_sets = peer.network.current_reward_sets.clone(); let unconfirmed_tenure = peer .chainstate() @@ -444,10 +443,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); let tenure_tip = RPCGetTenureInfo { consensus_hash: peer.network.stacks_tip.0.clone(), @@ -472,7 +483,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -507,10 +518,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(mid_tip_block_id)); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -537,7 +560,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -596,10 +619,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(mid_tip_block_id)); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -626,7 +661,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -684,10 +719,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // serve all of the unconfirmed blocks in one shot. { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -714,7 +761,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -751,10 +798,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // bad block signature { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -781,7 +840,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); @@ -1091,7 +1150,7 @@ fn test_make_tenure_downloaders() { assert_eq!(tip.block_height, 51); let test_signers = TestSigners::new(vec![]); - let agg_pubkeys = peer.network.aggregate_public_keys.clone(); + let current_reward_sets = peer.network.current_reward_sets.clone(); // test load_wanted_tenures() { @@ -1794,7 +1853,7 @@ fn test_make_tenure_downloaders() { &mut available, &tenure_block_ids, 6, - &agg_pubkeys, + ¤t_reward_sets, ); // made all 6 downloaders @@ -1832,7 +1891,7 @@ fn test_make_tenure_downloaders() { &mut available, &tenure_block_ids, 12, - &agg_pubkeys, + ¤t_reward_sets, ); // only made 4 downloaders got created From 004f3d61c48daa700edc6657aa34fb611504c92f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 16:33:57 -0400 Subject: [PATCH 0085/1400] chore: API sync --- stackslib/src/net/tests/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index a9534b6d29..45ad71590d 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -736,7 +736,7 @@ impl NakamotoBootPlan { let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_db.conn()) + NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) .unwrap() .unwrap(); (tenure, tip) @@ -811,7 +811,7 @@ impl NakamotoBootPlan { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let tenure = NakamotoChainState::get_highest_nakamoto_tenure( chainstate.db(), - sort_db.conn(), + &sort_db.index_handle_at_tip(), ) .unwrap() .unwrap(); From 6e301e3e77f71b2a1046247cc915276a6963d614 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:38:04 -0400 Subject: [PATCH 0086/1400] fix: load the signer reward cycle for the block based on the fact that the block in reward cycle N at reward cycle index 0 was signed by the signers of reward cycle N - 1 --- stackslib/src/chainstate/burn/db/sortdb.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index eee027b72b..7f8153ce84 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3646,8 +3646,12 @@ impl SortitionDB { db_error::NotFoundError })?; + // NOTE: the .saturating_sub(1) is necessary because the reward set is calculated in epoch + // 2.5 and lower at reward cycle index 1, not 0. This correction ensures that the last + // block is checked against the signers who were active just before the new reward set is + // calculated. let reward_cycle_id = pox_constants - .block_height_to_reward_cycle(first_block_height, tip_sn.block_height) + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height.saturating_sub(1)) .expect("FATAL: stored snapshot with block height < first_block_height"); Self::inner_get_preprocessed_reward_set_for_reward_cycle( From 1f1c8dc5b7dec2e7e25c5e21934bee772a795ece Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:38:34 -0400 Subject: [PATCH 0087/1400] fix: check the *ongoing* tenure, not the last-started tenure --- .../chainstate/nakamoto/coordinator/tests.rs | 42 +++++++++++-------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 63651b3946..b7c0bb5ba9 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -406,7 +406,7 @@ fn replay_reward_cycle( block.clone(), None, ) - .unwrap(); + .unwrap_or(false); if accepted { test_debug!("Accepted Nakamoto block {block_id}"); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); @@ -1612,10 +1612,12 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -1704,10 +1706,12 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -1799,10 +1803,12 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; assert_eq!(highest_tenure.tenure_id_consensus_hash, tip.consensus_hash); @@ -2000,10 +2006,12 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; From a99c09802c9fe306c313c73f607b8a471def2ade Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:38:51 -0400 Subject: [PATCH 0088/1400] feat: add query API for the ongoing tenure in a given sortition (be it an extension of a previously-started tenure, or a newly-started tenure), and clarify that the existing API for getting the highest Nakamoto tenure only pertains to the highest *started* tenure (not extended) --- stackslib/src/chainstate/nakamoto/tenure.rs | 52 +++++++++++++++++++-- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index b68950a875..6238e2905a 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -539,6 +539,17 @@ impl NakamotoChainState { Ok(tenure_opt) } + /// Get the nakamoto tenure by burn view + pub fn get_nakamoto_tenure_change_by_burn_view( + headers_conn: &Connection, + burn_view: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT * FROM nakamoto_tenures WHERE burn_view_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; + let args = rusqlite::params![burn_view]; + let tenure_opt: Option = query_row(headers_conn, sql, args)?; + Ok(tenure_opt) + } + /// Get a nakamoto tenure-change by its tenure ID consensus hash. /// Get the highest such record. It will be the last-processed BlockFound tenure /// for the given sortition consensus hash. @@ -555,7 +566,8 @@ impl NakamotoChainState { Ok(tenure_opt) } - /// Get the highest non-empty processed tenure on the canonical sortition history. + /// Get the highest non-empty processed tenure-change on the canonical sortition history. + /// It will be a BlockFound tenure. pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, sortdb_conn: &SH, @@ -564,16 +576,48 @@ impl NakamotoChainState { // sortition gets invalidated through a reorg. let mut cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &sortdb_conn.tip())? .ok_or(ChainstateError::NoSuchBlockError)?; - loop { + + // if there's been no activity for more than 2*reward_cycle_length sortitions, then the + // chain is dead anyway + for _ in 0..(2 * sortdb_conn.pox_constants().reward_cycle_length) { if let Some(tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( headers_conn, &cursor.consensus_hash, )? { return Ok(Some(tenure)); } - cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? - .ok_or(ChainstateError::NoSuchBlockError)?; + cursor = + SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? + .ok_or(ChainstateError::NoSuchBlockError)?; + } + Ok(None) + } + + /// Get the ongoing tenure (i.e. last tenure-change tx record) from the sortition pointed to by + /// sortdb_conn. + /// It will be a BlockFound or an Extension tenure. + pub fn get_ongoing_nakamoto_tenure( + headers_conn: &Connection, + sortdb_conn: &SH, + ) -> Result, ChainstateError> { + // NOTE: we do a *search* here in case the canonical Stacks pointer stored on the canonical + // sortition gets invalidated through a reorg. + let mut cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &sortdb_conn.tip())? + .ok_or(ChainstateError::NoSuchBlockError)?; + + // if there's been no activity for more than 2*reward_cycle_length sortitions, then the + // chain is dead anyway + for _ in 0..(2 * sortdb_conn.pox_constants().reward_cycle_length) { + if let Some(tenure) = + Self::get_nakamoto_tenure_change_by_burn_view(headers_conn, &cursor.consensus_hash)? + { + return Ok(Some(tenure)); + } + cursor = + SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? + .ok_or(ChainstateError::NoSuchBlockError)?; } + Ok(None) } /// Verify that a tenure change tx is a valid first-ever tenure change. It must connect to an From f930538634200ebe85173049750a60a5a6e293ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:39:48 -0400 Subject: [PATCH 0089/1400] chore: fmt --- .../src/chainstate/nakamoto/tests/mod.rs | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 9aab60ab9b..abc9cf203b 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1844,10 +1844,12 @@ pub fn test_get_highest_nakamoto_tenure() { "stacks tip = {},{},{}", &stacks_ch, &stacks_bhh, stacks_height ); - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let highest_tenure = NakamotoChainState::get_highest_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); let last_tenure_change = last_tenure_change.unwrap(); let last_header = last_header.unwrap(); @@ -1882,10 +1884,12 @@ pub fn test_get_highest_nakamoto_tenure() { ); // new tip doesn't include the last two tenures - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let highest_tenure = NakamotoChainState::get_highest_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); let last_tenure_change = &all_tenure_changes[2]; let last_header = &all_headers[2]; assert_eq!( From 480f2fd3340e05508e4c7cf1c5f32569865001a8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:39:56 -0400 Subject: [PATCH 0090/1400] chore: document tenure-loading behavior --- stackslib/src/net/inv/nakamoto.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index de46d15744..867be5a507 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -72,7 +72,8 @@ pub(crate) struct InvTenureInfo { impl InvTenureInfo { /// Load up cacheable tenure state for a given tenure-ID consensus hash. - /// This only returns Ok(Some(..)) if there was a tenure-change tx for this consensus hash. + /// This only returns Ok(Some(..)) if there was a tenure-change tx for this consensus hash + /// (i.e. it was a BlockFound tenure, not an Extension tenure) pub fn load( chainstate: &StacksChainState, consensus_hash: &ConsensusHash, From d2c2847edb7f1efe2039dd7d610c28b11dbda1f5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 17:40:08 -0400 Subject: [PATCH 0091/1400] chore: fmt --- stackslib/src/net/tests/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 45ad71590d..5e2cb3e6cc 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -735,10 +735,12 @@ impl NakamotoBootPlan { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_db.index_handle_at_tip()) - .unwrap() - .unwrap(); + let tenure = NakamotoChainState::get_highest_nakamoto_tenure( + chainstate.db(), + &sort_db.index_handle_at_tip(), + ) + .unwrap() + .unwrap(); (tenure, tip) }; From b24deed88f1f41276941639c6408570846cff604 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 18:12:20 -0400 Subject: [PATCH 0092/1400] chore: mock aggregate public key until the miner is upgraded to not need it --- .../stacks-node/src/nakamoto_node/miner.rs | 46 +++++++------------ .../src/tests/nakamoto_integrations.rs | 1 - 2 files changed, 17 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ed642c9cd..2be08d7c12 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -42,7 +42,8 @@ use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; +use wsts::curve::ecdsa; +use wsts::curve::point::{Compressed, Point}; use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; @@ -198,7 +199,7 @@ impl BlockMinerThread { }; new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), None, reward_set) { + if let Err(e) = self.broadcast(new_block.clone(), reward_set) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -272,12 +273,7 @@ impl BlockMinerThread { .expect("FATAL: building on a burn block that is before the first burn block"); let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(Some(x)) => x, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set found. Cannot initialize miner coordinator.".into(), - )); - } + Ok(x) => x, Err(e) => { return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" @@ -291,18 +287,18 @@ impl BlockMinerThread { )); }; - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + let chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let sortition_handle = sort_db.index_handle_at_tip(); - let Ok(aggregate_public_key) = NakamotoChainState::get_aggregate_public_key( - &mut chain_state, - &sort_db, - &sortition_handle, - &new_block, - ) else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the active aggregate public key. Cannot mine!".into(), - )); + + // NOTE: this is a placeholder until the API can be fixed + let aggregate_public_key = { + let key_bytes = [ + 0x03, 0xd3, 0xe1, 0x5a, 0x36, 0xf3, 0x2a, 0x9e, 0x71, 0x31, 0x7f, 0xcb, 0x4a, 0x20, + 0x1b, 0x0c, 0x08, 0xb3, 0xbc, 0xfb, 0xdc, 0x8a, 0xee, 0x2e, 0xe4, 0xd2, 0x69, 0x23, + 0x00, 0x06, 0xb1, 0xa0, 0xcb, + ]; + let ecdsa_pk = ecdsa::PublicKey::try_from(key_bytes.as_slice()).unwrap(); + Point::try_from(&Compressed::from(ecdsa_pk.to_bytes())).unwrap() }; let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); @@ -310,6 +306,7 @@ impl BlockMinerThread { &reward_set, reward_cycle, miner_privkey_as_scalar, + // TODO: placeholder until the signer is working aggregate_public_key, &stackerdbs, &self.config, @@ -371,12 +368,7 @@ impl BlockMinerThread { .expect("FATAL: building on a burn block that is before the first burn block"); let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(Some(x)) => x, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set found. Cannot initialize miner coordinator.".into(), - )); - } + Ok(x) => x, Err(e) => { return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" @@ -533,12 +525,9 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } - /// TODO: update to utilize `signer_signature` vec instead of the aggregate - /// public key. fn broadcast( &self, block: NakamotoBlock, - aggregate_public_key: Option<&Point>, reward_set: RewardSet, ) -> Result<(), ChainstateError> { #[cfg(test)] @@ -576,7 +565,6 @@ impl BlockMinerThread { &mut sortition_handle, &staging_tx, headers_conn, - aggregate_public_key, reward_set, )?; staging_tx.commit()?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f46d9a3878..3c7e422e8d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -367,7 +367,6 @@ pub fn read_and_sign_block_proposal( let reward_set = sortdb .get_preprocessed_reward_set_of(&tip.sortition_id) .expect("Failed to get reward cycle info") - .expect("Failed to get reward cycle info") .known_selected_anchor_block_owned() .expect("Expected a reward set"); From bb1aed130af53e820f98d0f130bcd22ce9c48389 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 22 May 2024 18:13:20 -0400 Subject: [PATCH 0093/1400] fix: remove unused var --- testnet/stacks-node/src/nakamoto_node/miner.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 2be08d7c12..a938c4b7be 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -287,9 +287,6 @@ impl BlockMinerThread { )); }; - let chain_state = neon_node::open_chainstate_with_faults(&self.config) - .expect("FATAL: could not open chainstate DB"); - // NOTE: this is a placeholder until the API can be fixed let aggregate_public_key = { let key_bytes = [ From 65a934e454524acbe6f2d3d9db1e87923731dd95 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Thu, 23 May 2024 06:54:01 +0200 Subject: [PATCH 0094/1400] fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3f91b1a9f2..436d45705b 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ cd testnet/stacks-node cargo run --bin stacks-node -- start --config ./conf/testnet-follower-conf.toml ``` -_On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you are have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ +_On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ Additional testnet documentation is available [here](./docs/testnet.md) and [here](https://docs.stacks.co/docs/nodes-and-miners/miner-testnet) From d79f31239726c181eef290acafc7f82d27176f1a Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Thu, 23 May 2024 06:54:50 +0200 Subject: [PATCH 0095/1400] fix typos --- docs/ci-release.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ci-release.md b/docs/ci-release.md index 4e21ed631d..f7881ba675 100644 --- a/docs/ci-release.md +++ b/docs/ci-release.md @@ -23,7 +23,7 @@ All releases are built via a Github Actions workflow named `CI` ([ci.yml](../.gi - `stacks-core:` - An untagged build of any branch will produce a single image built from source on Debian with glibc: - `stacks-core:` -- A tagged release on a non-default branch will produces: +- A tagged release on a non-default branch will produce: - Docker Alpine image for several architectures tagged with: - `stacks-core:` - Docker Debian image for several architectures tagged with: @@ -83,7 +83,7 @@ There are also 2 different methods in use with regard to running tests: A matrix is used when there are several known tests that need to be run. Partitions (shards) are used when there is a large and unknown number of tests to run (ex: `cargo test` to run all tests). There is also a workflow designed to run tests that are manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). -This workflow requires you to select which test(s) you want to run, which then triggers a reusbale workflow via conditional. For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. +This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. Files: From 66f9883858805c9191df8157156ffe8668d9e7d0 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Thu, 23 May 2024 06:55:40 +0200 Subject: [PATCH 0096/1400] fix typos --- docs/rpc-endpoints.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6815adfc61..6163f27b75 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -111,7 +111,7 @@ Returns a JSON list containing the following: ``` The `consensus_hash` field identifies the sortition in which the given block was -chosen. The `header` is the raw block header, a a hex string. The +chosen. The `header` is the raw block header, a hex string. The `parent_block_id` is the block ID hash of this block's parent, and can be used as a `?tip=` query parameter to page through deeper and deeper block headers. @@ -143,8 +143,8 @@ Returns JSON data in the form: } ``` -Where balance is the hex encoding of a unsigned 128-bit integer -(big-endian), nonce is a unsigned 64-bit integer, and the proofs are +Where balance is the hex encoding of an unsigned 128-bit integer +(big-endian), nonce is an unsigned 64-bit integer, and the proofs are provided as hex strings. For non-existent accounts, this _does not_ 404, rather it returns an @@ -212,7 +212,7 @@ JSON object _without_ the `proof` field. ### GET /v2/fees/transfer -Get an estimated fee rate for STX transfer transactions. This a a fee rate / byte, and is returned as a JSON integer. +Get an estimated fee rate for STX transfer transactions. This is a fee rate / byte, and is returned as a JSON integer. ### GET /v2/contracts/interface/[Stacks Address]/[Contract Name] @@ -530,6 +530,6 @@ Return metadata about the highest-known tenure, as the following JSON structure: Here, `consensus_hash` identifies the highest-known tenure (which may not be the highest sortition), `reward_cycle` identifies the reward cycle number of this -tenure, `tip_block_id` idenitifies the highest-known block in this tenure, and +tenure, `tip_block_id` identifies the highest-known block in this tenure, and `tip_height` identifies that block's height. From 7f5d3cd68615f8e9518ee19fea8dc7659c871fb0 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 12:00:56 +0300 Subject: [PATCH 0097/1400] Describe how the `delegatedUntilBurnHt` will be updated - This comment will be of help as we are updating the delegatedUntilBurnHt field to support delegations for an indefinite period. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 15f4d4ddc0..fe1f335176 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -124,6 +124,10 @@ it("statefully interacts with PoX-4", async () => { poolMembers: [], delegatedTo: "", delegatedMaxAmount: 0, + // We initialize delegatedUntilBurnHt to 0. It will be updated + // after successful delegate-stx calls. It's value will be either + // the unwrapped until-burn-ht uint passed to the delegate-stx, + // or undefined for indefinite delegations. delegatedUntilBurnHt: 0, delegatedPoxAddress: "", amountLocked: 0, From 0fe4b35f343d8ec50edab52b4a462e438f46c094 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 12:05:40 +0300 Subject: [PATCH 0098/1400] Enable the none branch for `delegatedUntilBurnHt` This commit: - updates the type of delegatedUntilBurnHt to number | none - updates the generators and the types of the untilBurnHt to support the none branch - updates the untilBurnHt param to support the none branch - adds the undefined check in the comparisons that involve delegatedUntilBurnHt - adds the undefined check in the state refresh comparison that involves delegatedUntilBurnHt --- .../tests/pox-4/pox_CommandModel.ts | 13 ++++++++---- .../tests/pox-4/pox_Commands.ts | 7 +++++-- .../pox-4/pox_DelegateStackExtendCommand.ts | 3 ++- .../pox-4/pox_DelegateStackStxCommand.ts | 3 ++- .../tests/pox-4/pox_DelegateStxCommand.ts | 21 ++++++++++++++----- .../pox-4/pox_RevokeDelegateStxCommand.ts | 10 +++++++-- 6 files changed, 42 insertions(+), 15 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts index 6d4d582b58..cdf211c3ed 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -76,9 +76,14 @@ export class Stub { // Get the wallet's ex-delegators by comparing their delegatedUntilBurnHt // to the current burn block height (only if the wallet is a delegatee). - const expiredDelegators = wallet.poolMembers.filter((stackerAddress) => - this.stackers.get(stackerAddress)!.delegatedUntilBurnHt < - burnBlockHeight + // If the delegatedUntilBurnHt is undefined, the delegator is considered + // active for an indefinite period (until a revoke-delegate-stx call). + const expiredDelegators = wallet.poolMembers.filter( + (stackerAddress) => + this.stackers.get(stackerAddress)!.delegatedUntilBurnHt !== + undefined && + this.stackers.get(stackerAddress)!.delegatedUntilBurnHt as number < + burnBlockHeight, ); // Get the operator's pool stackers that no longer have partially commited @@ -180,7 +185,7 @@ export type Stacker = { poolMembers: StxAddress[]; delegatedTo: StxAddress; delegatedMaxAmount: number; - delegatedUntilBurnHt: number; + delegatedUntilBurnHt: number | undefined; delegatedPoxAddress: BtcAddress; amountLocked: number; amountUnlocked: number; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts index ba7043d5ec..bafbe38a43 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -163,13 +163,16 @@ export function PoxCommands( fc.record({ wallet: fc.constantFrom(...wallets.values()), delegateTo: fc.constantFrom(...wallets.values()), - untilBurnHt: fc.integer({ min: 1 }), + untilBurnHt: fc.oneof( + fc.constant(Cl.none()), + fc.integer({ min: 1 }).map((value) => Cl.some(Cl.uint(value))), + ), amount: fc.bigInt({ min: 0n, max: 100_000_000_000_000n }), }).map(( r: { wallet: Wallet; delegateTo: Wallet; - untilBurnHt: number; + untilBurnHt: OptionalCV; amount: bigint; }, ) => diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts index cfd385cf5a..2875551342 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts @@ -83,7 +83,8 @@ export class DelegateStackExtendCommand implements PoxCommand { stackerWallet.hasDelegated === true && stackerWallet.isStacking === true && stackerWallet.delegatedTo === this.operator.stxAddress && - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight && + (stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && stackerWallet.delegatedMaxAmount >= stackedAmount && operatorWallet.poolMembers.includes(this.stacker.stxAddress) && operatorWallet.lockedAddresses.includes(this.stacker.stxAddress) && diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts index 456983807f..e3d9dd25c1 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts @@ -94,7 +94,8 @@ export class DelegateStackStxCommand implements PoxCommand { Number(this.amountUstx) <= stackerWallet.ustxBalance && Number(this.amountUstx) >= model.stackingMinimum && operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + (stackerWallet.delegatedUntilBurnHt === undefined || + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts index 4a12b0140d..e70d466c9d 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts @@ -7,7 +7,15 @@ import { } from "./pox_CommandModel.ts"; import { poxAddressToTuple } from "@stacks/stacking"; import { expect } from "vitest"; -import { boolCV, Cl } from "@stacks/transactions"; +import { + boolCV, + Cl, + ClarityType, + cvToValue, + isClarityType, + OptionalCV, + UIntCV, +} from "@stacks/transactions"; /** * The `DelegateStxCommand` delegates STX for stacking within PoX-4. This @@ -22,7 +30,7 @@ import { boolCV, Cl } from "@stacks/transactions"; export class DelegateStxCommand implements PoxCommand { readonly wallet: Wallet; readonly delegateTo: Wallet; - readonly untilBurnHt: number; + readonly untilBurnHt: OptionalCV; readonly amount: bigint; /** @@ -37,7 +45,7 @@ export class DelegateStxCommand implements PoxCommand { constructor( wallet: Wallet, delegateTo: Wallet, - untilBurnHt: number, + untilBurnHt: OptionalCV, amount: bigint, ) { this.wallet = wallet; @@ -74,7 +82,7 @@ export class DelegateStxCommand implements PoxCommand { // (delegate-to principal) Cl.principal(this.delegateTo.stxAddress), // (until-burn-ht (optional uint)) - Cl.some(Cl.uint(this.untilBurnHt)), + this.untilBurnHt, // (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) })) Cl.some(poxAddressToTuple(this.delegateTo.btcAddress)), ], @@ -93,7 +101,10 @@ export class DelegateStxCommand implements PoxCommand { wallet.hasDelegated = true; wallet.delegatedTo = this.delegateTo.stxAddress; wallet.delegatedMaxAmount = amountUstx; - wallet.delegatedUntilBurnHt = this.untilBurnHt; + wallet.delegatedUntilBurnHt = + isClarityType(this.untilBurnHt, ClarityType.OptionalNone) + ? undefined + : Number(cvToValue(this.untilBurnHt).value); wallet.delegatedPoxAddress = this.delegateTo.btcAddress; delegatedWallet.poolMembers.push(this.wallet.stxAddress); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index 54e4806757..c39a1a5e42 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -37,7 +37,8 @@ export class RevokeDelegateStxCommand implements PoxCommand { return ( model.stackingMinimum > 0 && stacker.hasDelegated === true && - stacker.delegatedUntilBurnHt > model.burnBlockHeight + (stacker.delegatedUntilBurnHt === undefined || + stacker.delegatedUntilBurnHt > model.burnBlockHeight) ); } @@ -46,6 +47,9 @@ export class RevokeDelegateStxCommand implements PoxCommand { const wallet = model.stackers.get(this.wallet.stxAddress)!; const operatorWallet = model.stackers.get(wallet.delegatedTo)!; + const expectedUntilBurnHt = wallet.delegatedUntilBurnHt === undefined + ? Cl.none() + : Cl.some(Cl.uint(wallet.delegatedUntilBurnHt)); // Act const revokeDelegateStx = real.network.callPublicFn( @@ -66,7 +70,7 @@ export class RevokeDelegateStxCommand implements PoxCommand { "pox-addr": Cl.some( poxAddressToTuple(wallet.delegatedPoxAddress || ""), ), - "until-burn-ht": Cl.some(Cl.uint(wallet.delegatedUntilBurnHt)), + "until-burn-ht": expectedUntilBurnHt, }), ), ); @@ -76,6 +80,8 @@ export class RevokeDelegateStxCommand implements PoxCommand { // Update model so that we know this wallet is not delegating anymore. // This is important in order to prevent the test from revoking the // delegation multiple times with the same address. + // We update delegatedUntilBurnHt to 0, and not undefined. Undefined + // stands for indefinite delegation. wallet.hasDelegated = false; wallet.delegatedTo = ""; wallet.delegatedUntilBurnHt = 0; From 313431707c5088f0e330175bb61a42259c9633f9 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 12:10:25 +0300 Subject: [PATCH 0099/1400] Pass the incremented `start-burn-height` to the `stack-stx` calls --- .../tests/pox-4/pox_StackStxAuthCommand.ts | 2 +- .../tests/pox-4/pox_StackStxSigCommand.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts index 108f0956b5..0b62f55c42 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts @@ -125,7 +125,7 @@ export class StackStxAuthCommand implements PoxCommand { // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight), + Cl.uint(burnBlockHeight + 1), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts index baa87015a1..100d84a6e0 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts @@ -123,7 +123,7 @@ export class StackStxSigCommand implements PoxCommand { // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight), + Cl.uint(burnBlockHeight + 1), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) From 2b3a54e42cc384cae4136ff7d95654edc230aaaa Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 13:06:55 +0300 Subject: [PATCH 0100/1400] Use simnet mineBlock inside `StackStxAuthCommand` This update includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. --- .../tests/pox-4/pox_StackStxAuthCommand.ts | 70 +++++++++---------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts index 0b62f55c42..53f34ca0bb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts @@ -15,6 +15,7 @@ import { isClarityType, } from "@stacks/transactions"; import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; /** * The `StackStxAuthCommand` locks STX for stacking within PoX-4. This self-service @@ -80,31 +81,6 @@ export class StackStxAuthCommand implements PoxCommand { // generated number passed to the constructor of this class. const maxAmount = model.stackingMinimum * this.margin; - const { result: setAuthorization } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.wallet.btcAddress), - // (period uint) - Cl.uint(this.period), - // (reward-cycle uint) - Cl.uint(currentRewCycle), - // (topic (string-ascii 14)) - Cl.stringAscii("stack-stx"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.wallet.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(maxAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - - expect(setAuthorization).toBeOk(Cl.bool(true)); const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); const burnBlockHeight = Number( cvToValue(burnBlockHeightCV as ClarityValue), @@ -115,11 +91,35 @@ export class StackStxAuthCommand implements PoxCommand { // signer key. const amountUstx = maxAmount; - // Act - const stackStx = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-stx", - [ + // Include the authorization and the `stack-stx` transactions in a single + // block. This way we ensure both the authorization and the stack-stx + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-stx call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn("ST000000000000000000002AMW42H.pox-4", "stack-stx", [ // (amount-ustx uint) Cl.uint(amountUstx), // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) @@ -136,9 +136,10 @@ export class StackStxAuthCommand implements PoxCommand { Cl.uint(maxAmount), // (auth-id uint) Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); + ], this.wallet.stxAddress), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); const { result: rewardCycle } = real.network.callReadOnlyFn( "ST000000000000000000002AMW42H.pox-4", @@ -156,8 +157,7 @@ export class StackStxAuthCommand implements PoxCommand { ); assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); - // Assert - expect(stackStx.result).toBeOk( + expect(block[1].result).toBeOk( Cl.tuple({ "lock-amount": Cl.uint(amountUstx), "signer-key": Cl.bufferFromHex(this.wallet.signerPubKey), From aa52fa598cdf9276d9cdae767076ae1dadaed90d Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 15:23:19 +0300 Subject: [PATCH 0101/1400] Use simnet `mineBlock` inside `StackExtendAuthCommand` This update includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. --- .../tests/pox-4/pox_StackExtendAuthCommand.ts | 98 ++++++++++--------- 1 file changed, 52 insertions(+), 46 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts index a7dbf49cbb..fa796673ea 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts @@ -7,6 +7,7 @@ import { } from "./pox_Commands"; import { Cl, ClarityType, isClarityType } from "@stacks/transactions"; import { assert, expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; export class StackExtendAuthCommand implements PoxCommand { readonly wallet: Wallet; @@ -77,51 +78,6 @@ export class StackExtendAuthCommand implements PoxCommand { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const { result: setAuthorization } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.wallet.btcAddress), - // (period uint) - Cl.uint(this.extendCount), - // (reward-cycle uint) - Cl.uint(currentRewCycle), - // (topic (string-ascii 14)) - Cl.stringAscii("stack-extend"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.wallet.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(stacker.amountLocked), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - - expect(setAuthorization).toBeOk(Cl.bool(true)); - const stackExtend = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-extend", - [ - // (extend-count uint) - Cl.uint(this.extendCount), - // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) - poxAddressToTuple(this.wallet.btcAddress), - // (signer-sig (optional (buff 65))) - Cl.none(), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.wallet.signerPubKey), - // (max-amount uint) - Cl.uint(stacker.amountLocked), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - const { result: firstExtendCycle } = real.network.callReadOnlyFn( "ST000000000000000000002AMW42H.pox-4", "burn-height-to-reward-cycle", @@ -143,7 +99,57 @@ export class StackExtendAuthCommand implements PoxCommand { const newUnlockHeight = extendedUnlockHeight.value; - expect(stackExtend.result).toBeOk( + // Include the authorization and the `stack-extend` transactions in a single + // block. This way we ensure both the authorization and the stack-extend + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-extend call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.extendCount), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-extend"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk( Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress), "unlock-burn-height": Cl.uint(newUnlockHeight), From f9b4a1af58a2cb52198294c593c37db0319d5cdb Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 16:04:41 +0300 Subject: [PATCH 0102/1400] Use simnet `mineBlock` inside `StackAggregationCommitIndexedAuthCommand` This update includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. --- ...tackAggregationCommitIndexedAuthCommand.ts | 98 ++++++++++--------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts index dfe7f2beef..cfafccc674 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts @@ -9,6 +9,7 @@ import { poxAddressToTuple } from "@stacks/stacking"; import { expect } from "vitest"; import { Cl } from "@stacks/transactions"; import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; /** * The `StackAggregationCommitIndexedAuthCommand` allows an operator to @@ -65,54 +66,61 @@ export class StackAggregationCommitIndexedAuthCommand implements PoxCommand { const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const committedAmount = operatorWallet.amountToCommit; - const { result: setSignature } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.operator.btcAddress), - // (period uint) - Cl.uint(1), - // (reward-cycle uint) - Cl.uint(currentRewCycle + 1), - // (topic (string-ascii 14)) - Cl.stringAscii("agg-commit"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.operator.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(committedAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.operator.stxAddress, - ); - expect(setSignature).toBeOk(Cl.bool(true)); - // Act - const stackAggregationCommitIndexed = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-aggregation-commit-indexed", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.operator.btcAddress), - // (reward-cycle uint) - Cl.uint(currentRewCycle + 1), - // (signer-sig (optional (buff 65))) - Cl.none(), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.operator.signerPubKey), - // (max-amount uint) - Cl.uint(committedAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.operator.stxAddress, - ); + + // Include the authorization and the `stack-aggregation-commit-indexed` + // transactions in a single block. This way we ensure both the authorization + // and the stack-aggregation-commit-indexed transactions are called during + // the same reward cycle, so the authorization currentRewCycle param is + // relevant for the upcoming stack-aggregation-commit-indexed call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); // Assert - expect(stackAggregationCommitIndexed.result).toBeOk( + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk( Cl.uint(model.nextRewardSetIndex), ); From 9eada5d6aa2c5d8eb1a462ae0b503adde29e0709 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 23 May 2024 16:10:23 +0300 Subject: [PATCH 0103/1400] Use simnet `mineBlock` inside `StackAggregationCommitAuthCommand` This update includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. --- .../pox_StackAggregationCommitAuthCommand.ts | 98 ++++++++++--------- 1 file changed, 53 insertions(+), 45 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts index 5312679833..62622f4bd3 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts @@ -9,6 +9,7 @@ import { poxAddressToTuple } from "@stacks/stacking"; import { expect } from "vitest"; import { Cl } from "@stacks/transactions"; import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; /** * The `StackAggregationCommitAuthCommand` allows an operator to commit @@ -60,54 +61,61 @@ export class StackAggregationCommitAuthCommand implements PoxCommand { const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const committedAmount = operatorWallet.amountToCommit; - const { result: setSignature } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.operator.btcAddress), - // (period uint) - Cl.uint(1), - // (reward-cycle uint) - Cl.uint(currentRewCycle + 1), - // (topic (string-ascii 14)) - Cl.stringAscii("agg-commit"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.operator.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(committedAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.operator.stxAddress, - ); - expect(setSignature).toBeOk(Cl.bool(true)); - // Act - const stackAggregationCommit = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-aggregation-commit", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.operator.btcAddress), - // (reward-cycle uint) - Cl.uint(currentRewCycle + 1), - // (signer-sig (optional (buff 65))) - Cl.none(), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.operator.signerPubKey), - // (max-amount uint) - Cl.uint(committedAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.operator.stxAddress, - ); + + // Include the authorization and the `stack-aggregation-commit` transactions + // in a single block. This way we ensure both the authorization and the + // stack-aggregation-commit transactions are called during the same reward + // cycle, so the authorization currentRewCycle param is relevant for the + // upcoming stack-aggregation-commit call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); // Assert - expect(stackAggregationCommit.result).toBeOk(Cl.bool(true)); + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeOk(Cl.bool(true)); operatorWallet.amountToCommit -= committedAmount; operatorWallet.committedRewCycleIndexes.push(model.nextRewardSetIndex); From 20a1136edd814db00e3f29110372dee2c7b95597 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 23 May 2024 09:55:19 -0400 Subject: [PATCH 0104/1400] Run the v0 signer by default instead of v1 Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index d108fb4540..dcabfffc08 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -43,7 +43,7 @@ use stacks_signer::cli::{ RunSignerArgs, StackerDBArgs, }; use stacks_signer::config::GlobalConfig; -use stacks_signer::v1::SpawnedSigner; +use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; From 72b0e2c51c8d21130dd33581f8f5925e32ffd393 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 23 May 2024 14:53:53 -0400 Subject: [PATCH 0105/1400] chore: address PR feedback --- stackslib/src/chainstate/burn/db/sortdb.rs | 10 +++++----- stackslib/src/chainstate/coordinator/mod.rs | 10 +++++----- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 7f8153ce84..2916f3de3c 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -4678,14 +4678,14 @@ impl SortitionDB { Ok(ret) } - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn index_handle_at_tip<'a>(&'a self) -> SortitionHandleConn<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.index_handle(&sortition_id) } /// Open a tx handle at the burn chain tip - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); self.tx_handle_begin(&sortition_id).unwrap() @@ -4695,7 +4695,7 @@ impl SortitionDB { /// Returns Ok(Some(tip info)) on success /// Returns Ok(None) if there are no Nakamoto blocks in this tip /// Returns Err(..) on other DB error - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_nakamoto_tip_hash_and_height( conn: &Connection, tip: &BlockSnapshot, @@ -4720,7 +4720,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_stacks_chain_tip_hash_and_height( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash, u64), db_error> { @@ -4748,7 +4748,7 @@ impl SortitionDB { } /// Get the canonical Stacks chain tip -- this gets memoized on the canonical burn chain tip. - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_stacks_chain_tip_hash( conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash), db_error> { diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 4c201bfb2c..3d6487d526 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -854,11 +854,11 @@ pub fn get_reward_cycle_info( true }; if need_to_store { - test_debug!( - "Store preprocessed reward set for cycle {} (prepare start sortition {}): {:?}", - prev_reward_cycle, - &first_prepare_sn.sortition_id, - &reward_cycle_info + debug!( + "Store preprocessed reward set for cycle"; + "reward_cycle" => prev_reward_cycle, + "prepare-start sortition" => %first_prepare_sn.sortition_id, + "reward_cycle_info" => format!("{:?}", &reward_cycle_info) ); SortitionDB::store_preprocessed_reward_set( &mut tx, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index c23551cb60..1656eab594 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2009,7 +2009,7 @@ impl NakamotoChainState { } /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) - /// DO NOT CALL FROM CONSENSUS CODE + /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_block_header( chainstate_conn: &Connection, sortdb: &SortitionDB, From 91f3545ad60c31d320d3e35ae39fac4c85e035db Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 23 May 2024 15:41:44 -0400 Subject: [PATCH 0106/1400] chore: comment on why we need to continuously re-check the preprocessed reward set of the anchor block is not yet known --- stackslib/src/chainstate/coordinator/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 3d6487d526..f34e21d1bd 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -847,6 +847,14 @@ pub fn get_reward_cycle_info( let mut tx = sort_db.tx_begin()?; let preprocessed_reward_set = SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)?; + + // It's possible that we haven't processed the PoX anchor block at the time we have + // processed the burnchain block which commits to it. In this case, the PoX anchor block + // status would be SelectedAndUnknown. However, it's overwhelmingly likely (and in + // Nakamoto, _required_) that the PoX anchor block will be processed shortly thereafter. + // When this happens, we need to _update_ the sortition DB with the newly-processed reward + // set. This code performs this check to determine whether or not we need to store this + // calculated reward set. let need_to_store = if let Some(reward_cycle_info) = preprocessed_reward_set { // overwrite if we have an unknown anchor block !reward_cycle_info.is_reward_info_known() From 6b86b953a11d3d0bc3975f306165c7333ba1c980 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 23 May 2024 12:56:09 -0700 Subject: [PATCH 0107/1400] crc: typecasting and returning errors instead of panic in `coordinate_signature` --- stackslib/src/chainstate/nakamoto/mod.rs | 21 ++++++++------- .../stacks-node/src/nakamoto_node/miner.rs | 27 ++++++++++++++++--- 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b5215ee6bc..e0dcd9a95c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -532,11 +532,11 @@ impl NakamotoBlockHeader { }); // HashMap of - let signers_by_pk = signers + let signers_by_pk: HashMap<_, _> = signers .iter() .enumerate() .map(|(i, signer)| (&signer.signing_key, (signer, i))) - .collect::>(); + .collect(); for signature in self.signer_signature.iter() { let public_key = Secp256k1PublicKey::recover_to_pubkey(message.bits(), signature) @@ -585,18 +585,21 @@ impl NakamotoBlockHeader { return Ok(()); } + /// Compute the threshold for the minimum number of signers (by weight) required + /// to approve a Nakamoto block. pub fn compute_voting_weight_threshold(total_weight: u32) -> Result { - let ceil = if (total_weight as u64 * 7) % 10 == 0 { + let threshold = NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD; + let total_weight = u64::from(total_weight); + let ceil = if (total_weight * threshold) % 10 == 0 { 0 } else { 1 }; - u32::try_from((total_weight as u64 * NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD) / 10 + ceil) - .map_err(|_| { - ChainstateError::InvalidStacksBlock( - "Overflow when computing nakamoto block approval threshold".to_string(), - ) - }) + u32::try_from((total_weight * threshold) / 10 + ceil).map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Overflow when computing nakamoto block approval threshold".to_string(), + ) + }) } /// Make an "empty" header whose block data needs to be filled in. diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ed642c9cd..8c8575b2d3 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -255,13 +255,28 @@ impl BlockMinerThread { true, self.burnchain.pox_constants.clone(), ) - .expect("FATAL: could not open sortition DB"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open sortition DB. Cannot mine! {e:?}" + )) + })?; + let tip = SortitionDB::get_block_snapshot_consensus( sort_db.conn(), &new_block.header.consensus_hash, ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to retrieve chain tip: {:?}", + e + )) + }) + .and_then(|result| { + result.ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) + }) + })?; + let reward_cycle = self .burnchain .pox_constants @@ -269,7 +284,11 @@ impl BlockMinerThread { self.burnchain.first_block_height, self.burn_block.block_height, ) - .expect("FATAL: building on a burn block that is before the first burn block"); + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Building on a burn block that is before the first burn block".into(), + ) + })?; let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { Ok(Some(x)) => x, From 0658bb907345ed7d0baf2a091972a0d4e6ebe9c4 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 23 May 2024 13:09:15 -0700 Subject: [PATCH 0108/1400] crc: return err instead of fatal when updating next_bitvec --- .../src/nakamoto_node/sign_coordinator.rs | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 3cf1c6d144..081852d783 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -709,18 +709,18 @@ impl SignCoordinator { let modified_slots = &event.modified_slots.clone(); // Update `next_signers_bitvec` with the slots that were modified in the event - modified_slots.iter().for_each(|chunk| { - if let Ok(slot_id) = chunk.slot_id.try_into() { - match &self.next_signer_bitvec.set(slot_id, true) { - Err(e) => { - warn!("Failed to set bitvec for next signer: {e:?}"); - } - _ => (), - }; - } else { - error!("FATAL: slot_id greater than u16, which should never happen."); - } - }); + for chunk in modified_slots.iter() { + let Ok(slot_id) = chunk.slot_id.try_into() else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Unable to modify next_signer_bitvec: slot_id exceeds u16".into(), + )); + }; + if let Err(e) = &self.next_signer_bitvec.set(slot_id, true) { + return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to set bitvec for next signer: {e:?}" + ))); + }; + } let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); From 74f0cdddde3b9d75ad86306080f9ec5947d30dbd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 23 May 2024 16:57:36 -0400 Subject: [PATCH 0109/1400] chore: address stackerdb audit --- stackslib/src/net/mod.rs | 1 + stackslib/src/net/p2p.rs | 1 + stackslib/src/net/stackerdb/config.rs | 35 ++++++++++- stackslib/src/net/stackerdb/mod.rs | 24 ++++--- stackslib/src/net/stackerdb/tests/config.rs | 70 ++++++++++++++++----- 5 files changed, 103 insertions(+), 28 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c5..a80ce8bb74 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2540,6 +2540,7 @@ pub mod test { &mut stacks_node.chainstate, &sortdb, old_stackerdb_configs, + config.connection_opts.num_neighbors, ) .expect("Failed to refresh stackerdb configs"); diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f853bb795a..b28bd3c4cd 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5363,6 +5363,7 @@ impl PeerNetwork { chainstate, sortdb, stacker_db_configs, + self.connection_opts.num_neighbors, )?; Ok(()) } diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index f2d8521ae4..3d2b7e87cc 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -292,6 +292,7 @@ impl StackerDBConfig { contract_id: &QualifiedContractIdentifier, tip: &StacksBlockId, signers: Vec<(StacksAddress, u32)>, + local_max_neighbors: u64, ) -> Result { let value = chainstate.eval_read_only(burn_dbconn, tip, contract_id, "(stackerdb-get-config)")?; @@ -365,11 +366,12 @@ impl StackerDBConfig { )); } - let max_neighbors = config_tuple + let mut max_neighbors = config_tuple .get("max-neighbors") .expect("FATAL: missing 'max-neighbors'") .clone() .expect_u128()?; + if max_neighbors > usize::MAX as u128 { let reason = format!( "Contract {} stipulates a maximum number of neighbors beyond usize::MAX", @@ -382,6 +384,16 @@ impl StackerDBConfig { )); } + if max_neighbors > u128::from(local_max_neighbors) { + warn!( + "Contract {} stipulates a maximum number of neighbors ({}) beyond locally-configured maximum {}; defaulting to locally-configured maximum", + contract_id, + max_neighbors, + local_max_neighbors, + ); + max_neighbors = u128::from(local_max_neighbors); + } + let hint_replicas_list = config_tuple .get("hint-replicas") .expect("FATAL: missing 'hint-replicas'") @@ -435,7 +447,7 @@ impl StackerDBConfig { )); } - if port < 1024 || port > ((u16::MAX - 1) as u128) { + if port < 1024 || port > u128::from(u16::MAX - 1) { let reason = format!( "Contract {} stipulates a port lower than 1024 or above u16::MAX - 1", contract_id @@ -446,11 +458,20 @@ impl StackerDBConfig { reason, )); } + // NOTE: port is now known to be in range [1024, 65535] let mut pubkey_hash_slice = [0u8; 20]; pubkey_hash_slice.copy_from_slice(&pubkey_hash_bytes[0..20]); let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); + if peer_addr.is_in_private_range() { + debug!( + "Ignoring private IP address '{}' in hint-replias", + &peer_addr.to_socketaddr(port as u16) + ); + continue; + } + let naddr = NeighborAddress { addrbytes: peer_addr, port: port as u16, @@ -475,6 +496,7 @@ impl StackerDBConfig { chainstate: &mut StacksChainState, sortition_db: &SortitionDB, contract_id: &QualifiedContractIdentifier, + max_neighbors: u64, ) -> Result { let chain_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortition_db)? @@ -542,7 +564,14 @@ impl StackerDBConfig { // evaluate the contract for these two functions let signers = Self::eval_signer_slots(chainstate, &dbconn, contract_id, &chain_tip_hash)?; - let config = Self::eval_config(chainstate, &dbconn, contract_id, &chain_tip_hash, signers)?; + let config = Self::eval_config( + chainstate, + &dbconn, + contract_id, + &chain_tip_hash, + signers, + max_neighbors, + )?; Ok(config) } } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index da3ffa4555..5774ab4817 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -267,6 +267,7 @@ impl StackerDBs { chainstate: &mut StacksChainState, sortdb: &SortitionDB, stacker_db_configs: HashMap, + num_neighbors: u64, ) -> Result, net_error> { let existing_contract_ids = self.get_stackerdb_contract_ids()?; let mut new_stackerdb_configs = HashMap::new(); @@ -288,15 +289,20 @@ impl StackerDBs { }) } else { // attempt to load the config from the contract itself - StackerDBConfig::from_smart_contract(chainstate, &sortdb, &stackerdb_contract_id) - .unwrap_or_else(|e| { - warn!( - "Failed to load StackerDB config"; - "contract" => %stackerdb_contract_id, - "err" => ?e, - ); - StackerDBConfig::noop() - }) + StackerDBConfig::from_smart_contract( + chainstate, + &sortdb, + &stackerdb_contract_id, + num_neighbors, + ) + .unwrap_or_else(|e| { + warn!( + "Failed to load StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + StackerDBConfig::noop() + }) }; // Create the StackerDB replica if it does not exist already if !existing_contract_ids.contains(&stackerdb_contract_id) { diff --git a/stackslib/src/net/stackerdb/tests/config.rs b/stackslib/src/net/stackerdb/tests/config.rs index 9600ed79a8..a075d7b974 100644 --- a/stackslib/src/net/stackerdb/tests/config.rs +++ b/stackslib/src/net/stackerdb/tests/config.rs @@ -133,7 +133,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -152,7 +152,7 @@ fn test_valid_and_invalid_stackerdb_configs() { write_freq: 4, max_writes: 56, hint_replicas: vec![NeighborAddress { - addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + addrbytes: PeerAddress::from_ipv4(142, 150, 80, 100), port: 8901, public_key_hash: Hash160::from_hex("0123456789abcdef0123456789abcdef01234567") .unwrap(), @@ -174,7 +174,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -193,7 +193,7 @@ fn test_valid_and_invalid_stackerdb_configs() { write_freq: 4, max_writes: 56, hint_replicas: vec![NeighborAddress { - addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + addrbytes: PeerAddress::from_ipv4(142, 150, 80, 100), port: 8901, public_key_hash: Hash160::from_hex("0123456789abcdef0123456789abcdef01234567") .unwrap(), @@ -212,7 +212,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -234,7 +234,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -256,7 +256,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -278,7 +278,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -300,7 +300,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -322,7 +322,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -344,7 +344,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -366,7 +366,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -388,7 +388,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u18446744073709551617, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u8901, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -432,7 +432,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u1, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -454,7 +454,7 @@ fn test_valid_and_invalid_stackerdb_configs() { max-neighbors: u7, hint-replicas: (list { - addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u127 u0 u0 u1), + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u142 u150 u80 u100), port: u65537, public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 }) @@ -462,6 +462,44 @@ fn test_valid_and_invalid_stackerdb_configs() { "#, None, ), + ( + // valid, but private IP and absurd max neighbors are both handled + r#" + (define-public (stackerdb-get-signer-slots) + (ok (list { signer: 'ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B, num-slots: u3 }))) + + (define-public (stackerdb-get-config) + (ok { + chunk-size: u123, + write-freq: u4, + max-writes: u56, + max-neighbors: u1024, + hint-replicas: (list + { + addr: (list u0 u0 u0 u0 u0 u0 u0 u0 u0 u0 u255 u255 u192 u168 u0 u1), + port: u8901, + public-key-hash: 0x0123456789abcdef0123456789abcdef01234567 + }) + })) + "#, + Some(StackerDBConfig { + chunk_size: 123, + signers: vec![( + StacksAddress { + version: 26, + bytes: Hash160::from_hex("b4fdae98b64b9cd6c9436f3b965558966afe890b") + .unwrap(), + }, + 3, + )], + write_freq: 4, + max_writes: 56, + // no neighbors + hint_replicas: vec![], + // max neighbors is truncated + max_neighbors: 32, + }), + ), ]; for (i, (code, _result)) in testcases.iter().enumerate() { @@ -490,7 +528,7 @@ fn test_valid_and_invalid_stackerdb_configs() { ContractName::try_from(format!("test-{}", i)).unwrap(), ); peer.with_db_state(|sortdb, chainstate, _, _| { - match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id) { + match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id, 32) { Ok(config) => { let expected = result .clone() From 4cb4d15a1ddd71afcc56a9a56e62a82e79d94f36 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 23 May 2024 14:42:29 -0700 Subject: [PATCH 0110/1400] feat: load reward set in gather_signatures instead of `run_miner` to prevent panics --- .../stacks-node/src/nakamoto_node/miner.rs | 86 ++++++++++++------- .../src/nakamoto_node/sign_coordinator.rs | 14 --- 2 files changed, 55 insertions(+), 45 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 09a70b1178..618eed7d6c 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -155,27 +155,6 @@ impl BlockMinerThread { let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &self.burn_block.consensus_hash, - ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); - - let reward_set = sort_db - .get_preprocessed_reward_set_of(&tip.sortition_id) - .expect("FATAL: Error fetching reward set") - .expect("FATAL: No reward set found for miner") - .known_selected_anchor_block_owned() - .expect("FATAL: No reward set found for miner"); - let mut attempts = 0; // now, actually run this tenure loop { @@ -203,12 +182,11 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - let signer_signature = match self.gather_signatures( + let (reward_set, signer_signature) = match self.gather_signatures( &mut new_block, self.burn_block.block_height, &mut stackerdbs, &mut attempts, - &reward_set, ) { Ok(x) => x, Err(e) => { @@ -243,6 +221,15 @@ impl BlockMinerThread { self.mined_blocks.push(new_block); } + let Ok(sort_db) = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) else { + error!("Failed to open sortition DB. Will try mining again."); + continue; + }; + let wait_start = Instant::now(); while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); @@ -351,15 +338,14 @@ impl BlockMinerThread { Ok((aggregate_public_key, signature)) } - /// Gather signatures from the signers for the block + /// Gather a list of signatures from the signers for the block fn gather_signatures( &mut self, new_block: &mut NakamotoBlock, burn_block_height: u64, stackerdbs: &mut StackerDBs, attempts: &mut u64, - reward_set: &RewardSet, - ) -> Result, NakamotoNodeError> { + ) -> Result<(RewardSet, Vec), NakamotoNodeError> { let Some(miner_privkey) = self.config.miner.mining_key else { return Err(NakamotoNodeError::MinerConfigurationFailed( "No mining key configured, cannot mine", @@ -370,13 +356,47 @@ impl BlockMinerThread { true, self.burnchain.pox_constants.clone(), ) - .expect("FATAL: could not open sortition DB"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open sortition DB. Cannot mine! {e:?}" + )) + })?; + let tip = SortitionDB::get_block_snapshot_consensus( sort_db.conn(), &new_block.header.consensus_hash, ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to retrieve chain tip: {:?}", + e + )) + }) + .and_then(|result| { + result.ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) + }) + })?; + + let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { + Ok(Some(x)) => x, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set found. Cannot initialize miner coordinator.".into(), + )); + } + Err(e) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" + ))); + } + }; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Current reward cycle did not select a reward set. Cannot mine!".into(), + )); + }; let reward_cycle = self .burnchain @@ -385,7 +405,11 @@ impl BlockMinerThread { self.burnchain.first_block_height, self.burn_block.block_height, ) - .expect("FATAL: building on a burn block that is before the first burn block"); + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Building on a burn block that is before the first burn block".into(), + ) + })?; let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new( @@ -414,7 +438,7 @@ impl BlockMinerThread { &self.globals.counters, )?; - return Ok(signature); + return Ok((reward_set, signature)); } fn get_stackerdb_contract_and_slots( diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 081852d783..b0b4463d1d 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -708,20 +708,6 @@ impl SignCoordinator { let modified_slots = &event.modified_slots.clone(); - // Update `next_signers_bitvec` with the slots that were modified in the event - for chunk in modified_slots.iter() { - let Ok(slot_id) = chunk.slot_id.try_into() else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Unable to modify next_signer_bitvec: slot_id exceeds u16".into(), - )); - }; - if let Err(e) = &self.next_signer_bitvec.set(slot_id, true) { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to set bitvec for next signer: {e:?}" - ))); - }; - } - let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); }) else { From 2275aa877d7524b83929d63ea3f894d37a1e4adb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 23 May 2024 14:52:32 -0700 Subject: [PATCH 0111/1400] feat: remove aggregate key related stuff from sign coordinator --- .../stacks-node/src/nakamoto_node/miner.rs | 68 +++++-------------- .../src/nakamoto_node/sign_coordinator.rs | 26 +------ 2 files changed, 18 insertions(+), 76 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index f92f3fc60a..8f23bfc82d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -280,19 +280,6 @@ impl BlockMinerThread { }) })?; - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Building on a burn block that is before the first burn block".into(), - ) - })?; - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { Ok(Some(x)) => x, Ok(None) => { @@ -328,19 +315,14 @@ impl BlockMinerThread { }; let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); - let mut coordinator = SignCoordinator::new( - &reward_set, - reward_cycle, - miner_privkey_as_scalar, - Some(aggregate_public_key), - &stackerdbs, - &self.config, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; + let mut coordinator = + SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( + |e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + }, + )?; *attempts += 1; let signature = coordinator.begin_sign_v1( @@ -417,33 +399,15 @@ impl BlockMinerThread { )); }; - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Building on a burn block that is before the first burn block".into(), - ) - })?; - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); - let mut coordinator = SignCoordinator::new( - &reward_set, - reward_cycle, - miner_privkey_as_scalar, - None, - &stackerdbs, - &self.config, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; + let mut coordinator = + SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( + |e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + }, + )?; *attempts += 1; let signature = coordinator.begin_sign_v0( diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b0b4463d1d..0db0ee9e04 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -198,10 +198,7 @@ impl SignCoordinator { /// * `aggregate_public_key` - the active aggregate key for this cycle pub fn new( reward_set: &RewardSet, - reward_cycle: u64, message_key: Scalar, - aggregate_public_key: Option, - stackerdb_conn: &StackerDBs, config: &Config, // v1: bool, ) -> Result { @@ -281,7 +278,7 @@ impl SignCoordinator { }) .collect::, ChainstateError>>()?; - let mut coordinator: FireCoordinator = FireCoordinator::new(coord_config); + let coordinator: FireCoordinator = FireCoordinator::new(coord_config); #[cfg(test)] { // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING @@ -294,7 +291,7 @@ impl SignCoordinator { if replaced_other { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } - let mut sign_coordinator = Self { + let sign_coordinator = Self { coordinator, message_key, receiver: Some(receiver), @@ -306,28 +303,9 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, }; - if let Some(aggregate_public_key) = aggregate_public_key { - sign_coordinator - .coordinator - .set_aggregate_public_key(Some(aggregate_public_key)); - } return Ok(sign_coordinator); } } - if let Some(aggregate_public_key) = aggregate_public_key { - let party_polynomials = get_signer_commitments( - is_mainnet, - reward_set_signers.as_slice(), - stackerdb_conn, - reward_cycle, - &aggregate_public_key, - )?; - if let Err(e) = coordinator - .set_key_and_party_polynomials(aggregate_public_key.clone(), party_polynomials) - { - warn!("Failed to set a valid set of party polynomials"; "error" => %e); - }; - } let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); if replaced_other { From edb3aef02f9474fe5c76595ace7dcf5fb642c17d Mon Sep 17 00:00:00 2001 From: BowTiedRadone <92028479+BowTiedRadone@users.noreply.github.com> Date: Fri, 24 May 2024 12:09:09 +0300 Subject: [PATCH 0112/1400] Fix typo Co-authored-by: Nikos Baxevanis --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index fe1f335176..71d29086ef 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -125,7 +125,7 @@ it("statefully interacts with PoX-4", async () => { delegatedTo: "", delegatedMaxAmount: 0, // We initialize delegatedUntilBurnHt to 0. It will be updated - // after successful delegate-stx calls. It's value will be either + // after successful delegate-stx calls. Its value will be either // the unwrapped until-burn-ht uint passed to the delegate-stx, // or undefined for indefinite delegations. delegatedUntilBurnHt: 0, From 7405ab080c49d8abacbc9cb72364c9754c04032a Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 14:08:26 +0300 Subject: [PATCH 0113/1400] =?UTF-8?q?Add=20unhappy=20path=20for=C2=A0`revo?= =?UTF-8?q?ke-delegate-stx`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The added unhappy path tries to call revoke-delegate-stx with an address that is not delegating. --- .../tests/pox-4/err_Commands.ts | 30 +++++++++ .../pox-4/pox_RevokeDelegateStxCommand_Err.ts | 66 +++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 08a911e68a..a7e05dc5c7 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -9,10 +9,12 @@ import { import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { Simnet } from "@hirosystems/clarinet-sdk"; +import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, ERR_STACKING_ALREADY_DELEGATED: 20, + ERR_DELEGATION_ALREADY_REVOKED: 34, }; export function ErrCommands( @@ -249,6 +251,34 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) ), + // RevokeDelegateStxCommand_Err_Delegation_Already_Revoked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new RevokeDelegateStxCommand_Err( + r.wallet, + function ( + this: RevokeDelegateStxCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stacker.hasDelegated + ) { + model.trackCommandRun( + "RevokeDelegateStxCommand_Err_Delegation_Already_Revoked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_DELEGATION_ALREADY_REVOKED, + ) + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts new file mode 100644 index 0000000000..60b3439e8e --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts @@ -0,0 +1,66 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: RevokeDelegateStxCommand_Err, + model: Readonly, +) => boolean; + +export class RevokeDelegateStxCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `RevokeDelegateStxCommand_Err` to revoke delegate uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor(wallet: Wallet, checkFunc: CheckFunc, errorCode: number) { + this.wallet = wallet; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + // Act + const revokeDelegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "revoke-delegate-stx", + [], + this.wallet.stxAddress, + ); + + // Assert + expect(revokeDelegateStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "revoke-delegate-stx", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.stxAddress} revoke-delegate-stx`; + } +} From 10d0b9b385b8752db2c66e405dc7e11aded6cd65 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 14:16:45 +0300 Subject: [PATCH 0114/1400] Remove `RevokeDelegateStxCommand_Err` from statistics The command run tracking was moved inside the command's check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index d5f7245c25..654c500b6b 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -111,7 +111,8 @@ it("statefully interacts with PoX-4", async () => { file.startsWith("pox_") && file.endsWith(".ts") && file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && file !== "pox_StackStxAuthCommand_Err.ts" && - file !== "pox_StackStxSigCommand_Err.ts" + file !== "pox_StackStxSigCommand_Err.ts" && + file !== "pox_RevokeDelegateStxCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From 0c83016919b30b81984dcdefaead22d762eac7e8 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 14:21:58 +0300 Subject: [PATCH 0115/1400] =?UTF-8?q?Add=20unhappy=20path=20for=C2=A0`dele?= =?UTF-8?q?gate-stx`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The added unhappy path tries to call delegate-stx with an address that is already delegating. --- .../tests/pox-4/err_Commands.ts | 39 +++++++ .../tests/pox-4/pox_DelegateStxCommand_Err.ts | 104 ++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index a7e05dc5c7..6a6c3b7028 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -10,6 +10,7 @@ import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { Simnet } from "@hirosystems/clarinet-sdk"; import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; +import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -279,6 +280,44 @@ export function ErrCommands( POX_4_ERRORS.ERR_DELEGATION_ALREADY_REVOKED, ) ), + // DelegateStxCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + delegateTo: fc.constantFrom(...wallets.values()), + untilBurnHt: fc.integer({ min: 1 }), + amount: fc.bigInt({ min: 0n, max: 100_000_000_000_000n }), + }) + .map(( + r: { + wallet: Wallet; + delegateTo: Wallet; + untilBurnHt: number; + amount: bigint; + }, + ) => + new DelegateStxCommand_Err( + r.wallet, + r.delegateTo, + r.untilBurnHt, + r.amount, + function ( + this: DelegateStxCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.hasDelegated + ) { + model.trackCommandRun( + "DelegateStxCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ) + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts new file mode 100644 index 0000000000..138d99265f --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts @@ -0,0 +1,104 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStxCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStxCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly delegateTo: Wallet; + readonly untilBurnHt: number; + readonly amount: bigint; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStxCommand_Err` to delegate uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param delegateTo - Represents the Delegatee's STX address. + * @param untilBurnHt - The burn block height until the delegation is valid. + * @param amount - The maximum amount the `Stacker` delegates the `Delegatee` + * to stack on his behalf. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + delegateTo: Wallet, + untilBurnHt: number, + amount: bigint, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.delegateTo = delegateTo; + this.untilBurnHt = untilBurnHt; + this.amount = amount; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + // The amount of uSTX delegated by the Stacker to the Delegatee. + // Even if there are no constraints about the delegated amount, + // it will be checked in the future, when calling delegate-stack-stx. + const amountUstx = Number(this.amount); + + // Act + const delegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (delegate-to principal) + Cl.principal(this.delegateTo.stxAddress), + // (until-burn-ht (optional uint)) + Cl.some(Cl.uint(this.untilBurnHt)), + // (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) })) + Cl.some(poxAddressToTuple(this.delegateTo.btcAddress)), + ], + this.wallet.stxAddress, + ); + + // Assert + expect(delegateStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "delegate-stx", + "amount", + amountUstx.toString(), + "delegated to", + this.delegateTo.label, + "until", + this.untilBurnHt.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} delegate-stx to ${this.delegateTo.label} until burn ht ${this.untilBurnHt}`; + } +} From 48b9032e6e600d85a9fe8dfe37a0aa447aa1ddff Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 14:24:20 +0300 Subject: [PATCH 0116/1400] Remove `DelegateStxCommand_Err` from statistics The command run tracking was moved inside the command's check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 654c500b6b..04a596bbb6 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -112,7 +112,8 @@ it("statefully interacts with PoX-4", async () => { file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && file !== "pox_StackStxAuthCommand_Err.ts" && file !== "pox_StackStxSigCommand_Err.ts" && - file !== "pox_RevokeDelegateStxCommand_Err.ts" + file !== "pox_RevokeDelegateStxCommand_Err.ts" && + file !== "pox_DelegateStxCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From b38c224b2e0c99e45a8364287aa3d10e18cc2fed Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 16:26:27 +0300 Subject: [PATCH 0117/1400] Use simnet `mineBlock` inside `StackStxAuthCommand_Err` This commit: - includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. - updates the passed start-burn-ht param, different from the StackStxSigCommand. If not doing it like this, the test fails when the command is called at the limit between 2 reward cycles. - removes unnecessary operations: retrieving the reward cycle, retrieving the unlockBurnHeight. --- .../pox-4/pox_StackStxAuthCommand_Err.ts | 102 +++++++----------- 1 file changed, 39 insertions(+), 63 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts index 6889e89917..37f32a5458 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -6,15 +6,10 @@ import { Wallet, } from "./pox_CommandModel.ts"; import { poxAddressToTuple } from "@stacks/stacking"; -import { assert, expect } from "vitest"; -import { - Cl, - ClarityType, - ClarityValue, - cvToValue, - isClarityType, -} from "@stacks/transactions"; +import { expect } from "vitest"; +import { Cl, ClarityValue, cvToValue } from "@stacks/transactions"; import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; type CheckFunc = ( this: StackStxAuthCommand_Err, @@ -66,53 +61,50 @@ export class StackStxAuthCommand_Err implements PoxCommand { // in the given reward cycle multiplied by the margin, which is a randomly // generated number passed to the constructor of this class. const maxAmount = model.stackingMinimum * this.margin; + const amountUstx = maxAmount; - const { result: setAuthorization } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.wallet.btcAddress), - // (period uint) - Cl.uint(this.period), - // (reward-cycle uint) - Cl.uint(currentRewCycle), - // (topic (string-ascii 14)) - Cl.stringAscii("stack-stx"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.wallet.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(maxAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - - expect(setAuthorization).toBeOk(Cl.bool(true)); const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); const burnBlockHeight = Number( cvToValue(burnBlockHeightCV as ClarityValue), ); - // The amount of uSTX to be locked in the reward cycle. For this test, we - // will use the maximum amount of uSTX that can be used (per tx) with this - // signer key. - const amountUstx = maxAmount; - // Act - const stackStx = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-stx", - [ + + // Include the authorization and the `stack-stx` transactions in a single + // block. This way we ensure both the authorization and the stack-stx + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-stx call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn("ST000000000000000000002AMW42H.pox-4", "stack-stx", [ // (amount-ustx uint) Cl.uint(amountUstx), // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight + 1), + Cl.uint(burnBlockHeight), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) @@ -123,28 +115,12 @@ export class StackStxAuthCommand_Err implements PoxCommand { Cl.uint(maxAmount), // (auth-id uint) Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - - const { result: rewardCycle } = real.network.callReadOnlyFn( - "ST000000000000000000002AMW42H.pox-4", - "burn-height-to-reward-cycle", - [Cl.uint(burnBlockHeight)], - this.wallet.stxAddress, - ); - assert(isClarityType(rewardCycle, ClarityType.UInt)); - - const { result: unlockBurnHeight } = real.network.callReadOnlyFn( - "ST000000000000000000002AMW42H.pox-4", - "reward-cycle-to-burn-height", - [Cl.uint(Number(rewardCycle.value) + this.period + 1)], - this.wallet.stxAddress, - ); - assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + ], this.wallet.stxAddress), + ]); // Assert - expect(stackStx.result).toBeErr(Cl.int(this.errorCode)); + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); // Log to console for debugging purposes. This is not necessary for the // test to pass but it is useful for debugging and eyeballing the test. From 3dccf4023a9a9043b5894ce22bc0dcbfd377cfd3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 14:48:47 -0400 Subject: [PATCH 0118/1400] chore: address PR feedback --- .github/workflows/bitcoin-tests.yml | 3 +- stackslib/src/chainstate/burn/db/sortdb.rs | 227 ++++++++---------- stackslib/src/net/p2p.rs | 81 ++++--- .../stacks-node/src/nakamoto_node/miner.rs | 14 +- 4 files changed, 152 insertions(+), 173 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 87fe5a8f09..8e6997b5e1 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -85,7 +85,8 @@ jobs: - tests::signer::v0::block_proposal_rejection - tests::signer::v1::dkg - tests::signer::v1::sign_request_rejected - - tests::signer::v1::filter_bad_transactions + # TODO: enable these once v1 signer is fixed + # - tests::signer::v1::filter_bad_transactions - tests::signer::v1::delayed_dkg # TODO: enable these once v1 signer is fixed # - tests::signer::v1::mine_2_nakamoto_reward_cycles diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 2916f3de3c..8f416b4c39 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3548,128 +3548,45 @@ impl SortitionDB { Ok(()) } - /// Get the prepare phase start sortition ID of a reward cycle - fn inner_get_prepare_phase_start_sortition_id_for_reward_cycle( - index_conn: &SortitionDBConn, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - let prepare_phase_start = pox_constants - .reward_cycle_to_block_height(first_block_height, reward_cycle_id) - .saturating_sub(pox_constants.prepare_length.into()); - - let first_sortition = get_ancestor_sort_id(index_conn, prepare_phase_start, tip)? - .ok_or_else(|| { - error!( - "Could not find prepare phase start ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_height" => prepare_phase_start - ); - db_error::NotFoundError - })?; - Ok(first_sortition) - } - + /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). + /// See that method for details. pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( &self, tip: &SortitionId, reward_cycle_id: u64, ) -> Result { - Self::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( - &self.index_conn(), - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - - /// Get the reward set for a reward cycle, given the reward cycle tip. - /// Return the reward cycle info for this reward cycle - fn inner_get_preprocessed_reward_set_for_reward_cycle( - index_conn: &SortitionDBConn, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result<(RewardCycleInfo, SortitionId), db_error> { - let first_sortition = Self::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( - index_conn, - pox_constants, - first_block_height, - tip, - reward_cycle_id, - )?; - info!("Fetching preprocessed reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_sortition_id" => %first_sortition, - ); - - Ok(( - Self::get_preprocessed_reward_set(index_conn, &first_sortition)? - .ok_or(db_error::NotFoundError)?, - first_sortition, - )) + self.index_conn() + .get_prepare_phase_start_sortition_id_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) } + /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_for_reward_cycle(). + /// See that method for details. pub fn get_preprocessed_reward_set_for_reward_cycle( &self, tip: &SortitionId, reward_cycle_id: u64, ) -> Result<(RewardCycleInfo, SortitionId), db_error> { - Self::inner_get_preprocessed_reward_set_for_reward_cycle( - &self.index_conn(), - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - - /// Figure out the reward cycle for `tip` and lookup the preprocessed - /// reward set (if it exists) for the active reward cycle during `tip` - fn inner_get_preprocessed_reward_set_of( - index_conn: &SortitionDBConn, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - ) -> Result { - let tip_sn = SortitionDB::get_block_snapshot(index_conn, tip)?.ok_or_else(|| { - error!( - "Could not find snapshot for sortition while fetching reward set"; - "tip_sortition_id" => %tip, - ); - db_error::NotFoundError - })?; - - // NOTE: the .saturating_sub(1) is necessary because the reward set is calculated in epoch - // 2.5 and lower at reward cycle index 1, not 0. This correction ensures that the last - // block is checked against the signers who were active just before the new reward set is - // calculated. - let reward_cycle_id = pox_constants - .block_height_to_reward_cycle(first_block_height, tip_sn.block_height.saturating_sub(1)) - .expect("FATAL: stored snapshot with block height < first_block_height"); - - Self::inner_get_preprocessed_reward_set_for_reward_cycle( - index_conn, - pox_constants, - first_block_height, - tip, - reward_cycle_id, - ) - .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) + self.index_conn() + .get_preprocessed_reward_set_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) } + /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_of(). + /// See that method for details. pub fn get_preprocessed_reward_set_of( &self, tip: &SortitionId, ) -> Result { - Ok(Self::inner_get_preprocessed_reward_set_of( - &self.index_conn(), + Ok(self.index_conn().get_preprocessed_reward_set_of( &self.pox_constants, self.first_block_height, tip, @@ -3695,6 +3612,8 @@ impl SortitionDB { Ok(rc_info) } + /// Get the number of entries in the reward set, given a sortition ID within the reward cycle + /// for which this set is active. pub fn get_preprocessed_reward_set_size(&self, tip: &SortitionId) -> Option { let Ok(reward_info) = &self.get_preprocessed_reward_set_of(&tip) else { return None; @@ -3922,44 +3841,98 @@ impl<'a> SortitionDBConn<'a> { Ok(pox_addrs) } - pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. + pub fn get_preprocessed_reward_set_of( &self, + pox_constants: &PoxConstants, + first_block_height: u64, tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - SortitionDB::inner_get_prepare_phase_start_sortition_id_for_reward_cycle( - self, - &self.context.pox_constants, - self.context.first_block_height, + ) -> Result { + let tip_sn = SortitionDB::get_block_snapshot(self, tip)?.ok_or_else(|| { + error!( + "Could not find snapshot for sortition while fetching reward set"; + "tip_sortition_id" => %tip, + ); + db_error::NotFoundError + })?; + + // NOTE: the .saturating_sub(1) is necessary because the reward set is calculated in epoch + // 2.5 and lower at reward cycle index 1, not 0. This correction ensures that the last + // block is checked against the signers who were active just before the new reward set is + // calculated. + let reward_cycle_id = pox_constants + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height.saturating_sub(1)) + .expect("FATAL: stored snapshot with block height < first_block_height"); + + self.get_preprocessed_reward_set_for_reward_cycle( + pox_constants, + first_block_height, tip, reward_cycle_id, ) + .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) } - pub fn get_preprocessed_reward_set_for_reward_cycle( + /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( &self, + pox_constants: &PoxConstants, + first_block_height: u64, tip: &SortitionId, reward_cycle_id: u64, - ) -> Result<(RewardCycleInfo, SortitionId), db_error> { - SortitionDB::inner_get_preprocessed_reward_set_for_reward_cycle( - self, - &self.context.pox_constants, - self.context.first_block_height, - tip, - reward_cycle_id, - ) + ) -> Result { + let prepare_phase_start = pox_constants + .reward_cycle_to_block_height(first_block_height, reward_cycle_id) + .saturating_sub(pox_constants.prepare_length.into()); + + let first_sortition = + get_ancestor_sort_id(self, prepare_phase_start, tip)?.ok_or_else(|| { + error!( + "Could not find prepare phase start ancestor while fetching reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_height" => prepare_phase_start + ); + db_error::NotFoundError + })?; + Ok(first_sortition) } - pub fn get_preprocessed_reward_set_of( + /// Get the reward set for a reward cycle, given the reward cycle tip. The reward cycle info + /// will be returned for the reward set in which `tip` belongs (i.e. the reward set calculated + /// in the preceding reward cycle). + /// Return the reward cycle info for this reward cycle, as well as the first prepare-phase + /// sortition ID under which this reward cycle info is stored. + /// Returns Error on DB Error, or if the reward cycle info is not processed yet. + pub fn get_preprocessed_reward_set_for_reward_cycle( &self, + pox_constants: &PoxConstants, + first_block_height: u64, tip: &SortitionId, - ) -> Result { - SortitionDB::inner_get_preprocessed_reward_set_of( - self, - &self.context.pox_constants, - self.context.first_block_height, + reward_cycle_id: u64, + ) -> Result<(RewardCycleInfo, SortitionId), db_error> { + let first_sortition = self.get_prepare_phase_start_sortition_id_for_reward_cycle( + pox_constants, + first_block_height, tip, - ) + reward_cycle_id, + )?; + info!("Fetching preprocessed reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_start_sortition_id" => %first_sortition, + ); + + Ok(( + SortitionDB::get_preprocessed_reward_set(self, &first_sortition)? + .ok_or(db_error::NotFoundError)?, + first_sortition, + )) } } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 821d4dbc1d..d1168abe94 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -261,6 +261,10 @@ pub struct PeerNetwork { /// The reward sets of the current and past reward cycle. /// Needed to validate blocks, which are signed by a threshold of stackers pub current_reward_sets: BTreeMap, + /// The sortition IDs that began the prepare-phases for given reward cycles. This is used to + /// determine whether or not the reward cycle info in `current_reward_sets` is still valid -- a + /// burnchain fork may invalidate them, so the code must check that the sortition ID for the + /// start of the prepare-phase is still canonical. pub current_reward_set_ids: BTreeMap, // information about the state of the network's anchor blocks @@ -5427,6 +5431,47 @@ impl PeerNetwork { )) } + /// Clear out old reward cycles + fn free_old_reward_cycles( + &mut self, + sortdb: &SortitionDB, + tip_sortition_id: &SortitionId, + prev_rc: u64, + ) { + if self.current_reward_sets.len() > 3 { + self.current_reward_sets.retain(|old_rc, _| { + if (*old_rc).saturating_add(1) < prev_rc { + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + } + let Some(old_sortition_id) = self.current_reward_set_ids.get(old_rc) else { + // shouldn't happen + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + }; + let Ok(prepare_phase_sort_id) = sortdb + .get_prepare_phase_start_sortition_id_for_reward_cycle( + &tip_sortition_id, + *old_rc, + ) + else { + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + }; + if prepare_phase_sort_id != *old_sortition_id { + // non-canonical reward cycle info + self.current_reward_set_ids.remove(old_rc); + test_debug!("Drop reward cycle info for cycle {}", old_rc); + return false; + } + true + }); + } + } + /// Refresh our view of the last two reward cycles fn refresh_reward_cycles( &mut self, @@ -5469,11 +5514,12 @@ impl PeerNetwork { }) else { // NOTE: this should never be reached + error!("Unreachable code (but not panicking): no reward cycle info for reward cycle {}", rc); continue; }; if !reward_cycle_info.is_reward_info_known() { // haven't yet processed the anchor block, so don't store - test_debug!("Reward cycle info for cycle {} at sortition {} expects the PoX anchor block, so will not cache", rc, &reward_cycle_sort_id); + debug!("Reward cycle info for cycle {} at sortition {} expects the PoX anchor block, so will not cache", rc, &reward_cycle_sort_id); continue; } @@ -5488,38 +5534,7 @@ impl PeerNetwork { } // free memory - if self.current_reward_sets.len() > 3 { - self.current_reward_sets.retain(|old_rc, _| { - if (*old_rc).saturating_add(1) < prev_rc { - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - } - let Some(old_sortition_id) = self.current_reward_set_ids.get(old_rc) else { - // shouldn't happen - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - }; - let Ok(prepare_phase_sort_id) = sortdb - .get_prepare_phase_start_sortition_id_for_reward_cycle( - &tip_sn.sortition_id, - *old_rc, - ) - else { - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - }; - if prepare_phase_sort_id != *old_sortition_id { - // non-canonical reward cycle info - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - } - true - }); - } + self.free_old_reward_cycles(sortdb, &tip_sn.sortition_id, prev_rc); Ok(()) } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a938c4b7be..929ed681c7 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -42,8 +42,7 @@ use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; -use wsts::curve::ecdsa; -use wsts::curve::point::{Compressed, Point}; +use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; @@ -288,16 +287,7 @@ impl BlockMinerThread { }; // NOTE: this is a placeholder until the API can be fixed - let aggregate_public_key = { - let key_bytes = [ - 0x03, 0xd3, 0xe1, 0x5a, 0x36, 0xf3, 0x2a, 0x9e, 0x71, 0x31, 0x7f, 0xcb, 0x4a, 0x20, - 0x1b, 0x0c, 0x08, 0xb3, 0xbc, 0xfb, 0xdc, 0x8a, 0xee, 0x2e, 0xe4, 0xd2, 0x69, 0x23, - 0x00, 0x06, 0xb1, 0xa0, 0xcb, - ]; - let ecdsa_pk = ecdsa::PublicKey::try_from(key_bytes.as_slice()).unwrap(); - Point::try_from(&Compressed::from(ecdsa_pk.to_bytes())).unwrap() - }; - + let aggregate_public_key = Point::new(); let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new( &reward_set, From 3692d5bc62bdf1ad3795efea694c71a23de31201 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:28:35 -0400 Subject: [PATCH 0119/1400] fix: don't send more than MAX_PAYLOAD_LEN bytes --- stackslib/src/net/api/gettenure.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/api/gettenure.rs b/stackslib/src/net/api/gettenure.rs index c3eb4493fe..24c3c87d71 100644 --- a/stackslib/src/net/api/gettenure.rs +++ b/stackslib/src/net/api/gettenure.rs @@ -19,7 +19,7 @@ use std::{fs, io}; use regex::{Captures, Regex}; use serde::de::Error as de_Error; -use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::codec::{StacksMessageCodec, MAX_PAYLOAD_LEN}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks_common::types::net::PeerHost; use stacks_common::util::hash::to_hex; @@ -46,7 +46,7 @@ pub struct RPCNakamotoTenureRequestHandler { /// Block to start streaming from. It and its ancestors will be incrementally streamed until one of /// hte following happens: /// * we reach the first block in the tenure - /// * we would exceed MAX_MESSAGE_LEN bytes transmitted if we started sending the next block + /// * we would exceed MAX_PAYLOAD_LEN bytes transmitted if we started sending the next block pub block_id: Option, /// What's the final block ID to stream from? /// Passed as `stop=` query parameter @@ -132,7 +132,7 @@ impl NakamotoTenureStream { self.total_sent = self .total_sent .saturating_add(self.block_stream.total_bytes); - if self.total_sent.saturating_add(parent_size) > MAX_MESSAGE_LEN.into() { + if self.total_sent.saturating_add(parent_size) > MAX_PAYLOAD_LEN.into() { // out of space to send this return Ok(false); } @@ -284,7 +284,7 @@ impl HttpResponse for RPCNakamotoTenureRequestHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + let bytes = parse_bytes(preamble, body, MAX_PAYLOAD_LEN.into())?; Ok(HttpResponsePayload::Bytes(bytes)) } } From 9478e91c3057e212531f1131baa31af746c2befb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:29:13 -0400 Subject: [PATCH 0120/1400] fix: clear peers after finished downloaders, since it'll be more efficient --- stackslib/src/net/download/nakamoto/tenure_downloader_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 357b588e8a..468a0cf6a5 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -431,8 +431,8 @@ impl NakamotoTenureDownloaderSet { self.num_scheduled_downloaders() ); - self.clear_available_peers(); self.clear_finished_downloaders(); + self.clear_available_peers(); self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); while self.inflight() < count { let Some(ch) = schedule.front() else { From 0659e0ed27de7d28a735ec5cec8dbcb2fa891222 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:29:39 -0400 Subject: [PATCH 0121/1400] fix: avoid a potentially infinite loop by using .retain() to select scheduled downloads, and fix a last-block-height calculation to ensure that it's always one more than the highest desired block height --- .../nakamoto/download_state_machine.rs | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 77cf64dba6..71083b02b2 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -269,8 +269,7 @@ impl NakamotoDownloadStateMachine { .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, tip_rc.saturating_add(1)) .saturating_sub(1) - .min(tip.block_height) - .saturating_add(1); + .min(tip.block_height.saturating_add(1)); test_debug!( "Load tip sortitions between {} and {} (loaded_so_far = {})", @@ -1229,6 +1228,8 @@ impl NakamotoDownloadStateMachine { .any(|(_, available)| available.contains_key(&wt.tenure_id_consensus_hash)); if is_available && !wt.processed { + // a tenure is available but not yet processed, so we can't yet transition to + // fetching unconfirmed tenures (we'd have no way to validate them). return false; } } @@ -1294,14 +1295,16 @@ impl NakamotoDownloadStateMachine { count: usize, downloaders: &mut HashMap, highest_processed_block_id: Option, - ) { - while downloaders.len() < count { - let Some(naddr) = schedule.front() else { - break; - }; + ) -> usize { + let mut added = 0; + schedule.retain(|naddr| { if downloaders.contains_key(naddr) { - continue; + return true; + } + if added >= count { + return true; } + let unconfirmed_tenure_download = NakamotoUnconfirmedTenureDownloader::new( naddr.clone(), highest_processed_block_id.clone(), @@ -1309,8 +1312,10 @@ impl NakamotoDownloadStateMachine { test_debug!("Request unconfirmed tenure state from neighbor {}", &naddr); downloaders.insert(naddr.clone(), unconfirmed_tenure_download); - schedule.pop_front(); - } + added += 1; + false + }); + added } /// Update our unconfirmed tenure download state machines From ba4b2911febcb23f992962f6674ef0995b6c56a3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:30:25 -0400 Subject: [PATCH 0122/1400] fix: only return blocks that we have validated --- .../nakamoto/tenure_downloader_unconfirmed.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 4c48a5762f..e53ba5c2a2 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -311,7 +311,7 @@ impl NakamotoUnconfirmedTenureDownloader { let Some(Some(unconfirmed_aggregate_public_key)) = agg_pubkeys.get(&tenure_rc).cloned() else { warn!( - "No aggregate public key for confirmed tenure {} (rc {})", + "No aggregate public key for unconfirmed tenure {} (rc {})", &local_tenure_sn.consensus_hash, tenure_rc ); return Err(NetError::InvalidState); @@ -447,6 +447,7 @@ impl NakamotoUnconfirmedTenureDownloader { // If there's a tenure-start block, it must be last. let mut expected_block_id = last_block_id; let mut finished_download = false; + let mut last_block_index = None; for (cnt, block) in tenure_blocks.iter().enumerate() { if &block.header.block_id() != expected_block_id { warn!("Unexpected Nakamoto block -- not part of tenure"; @@ -493,6 +494,7 @@ impl NakamotoUnconfirmedTenureDownloader { } finished_download = true; + last_block_index = Some(cnt); break; } @@ -501,7 +503,9 @@ impl NakamotoUnconfirmedTenureDownloader { if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { if expected_block_id == highest_processed_block_id { // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); finished_download = true; + last_block_index = Some(cnt); break; } } @@ -511,15 +515,22 @@ impl NakamotoUnconfirmedTenureDownloader { if let Some(highest_processed_block_height) = self.highest_processed_block_height.as_ref() { - if &block.header.chain_length < highest_processed_block_height { + if &block.header.chain_length <= highest_processed_block_height { // no need to continue this download debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); finished_download = true; + last_block_index = Some(cnt); break; } } expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); } if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { From 51961521adbe453983a228cab16fac5c0b34eeb2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:30:45 -0400 Subject: [PATCH 0123/1400] fix: remove unused NakamotoInvState enum --- stackslib/src/net/inv/nakamoto.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index de46d15744..c173de16b6 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -247,8 +247,6 @@ impl InvGenerator { #[derive(Debug, PartialEq, Clone)] pub struct NakamotoTenureInv { - /// What state is the machine in? - pub state: NakamotoInvState, /// Bitmap of which tenures a peer has. /// Maps reward cycle to bitmap. pub tenures_inv: BTreeMap>, @@ -279,7 +277,6 @@ impl NakamotoTenureInv { neighbor_address: NeighborAddress, ) -> Self { Self { - state: NakamotoInvState::GetNakamotoInvBegin, tenures_inv: BTreeMap::new(), last_updated_at: 0, first_block_height, @@ -335,7 +332,8 @@ impl NakamotoTenureInv { /// Add in a newly-discovered inventory. /// NOTE: inventories are supposed to be aligned to the reward cycle - /// Returns true if we learned about at least one new tenure-start block + /// Returns true if the tenure bitvec has changed -- we either learned about a new tenure-start + /// block, or the remote peer "un-learned" it (e.g. due to a reorg). /// Returns false if not. pub fn merge_tenure_inv(&mut self, tenure_inv: BitVec<2100>, reward_cycle: u64) -> bool { // populate the tenures bitmap to we can fit this tenures inv @@ -367,7 +365,6 @@ impl NakamotoTenureInv { && (self.cur_reward_cycle >= cur_rc || !self.online) { test_debug!("Reset inv comms for {}", &self.neighbor_address); - self.state = NakamotoInvState::GetNakamotoInvBegin; self.online = true; self.start_sync_time = now; self.cur_reward_cycle = start_rc; @@ -474,13 +471,6 @@ impl NakamotoTenureInv { } } -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum NakamotoInvState { - GetNakamotoInvBegin, - GetNakamotoInvFinish, - Done, -} - /// Nakamoto inventory state machine pub struct NakamotoInvStateMachine { /// Communications links From 137651db7f973bd87a6903758a2e04284d46e594 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:31:11 -0400 Subject: [PATCH 0124/1400] feat: test the absence of infinite loops when making tenure downloaders, and test that we only accept unconfirmed tenure blocks we've validated --- stackslib/src/net/tests/download/nakamoto.rs | 126 ++++++++++++++++++- 1 file changed, 125 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 31c42c8afb..3ef0469022 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::sync::mpsc::sync_channel; use std::thread; @@ -439,6 +439,62 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { burn_height: peer.network.burnchain_tip.block_height, }; + // we can make unconfirmed tenure downloaders + { + let mut empty_schedule = VecDeque::new(); + let mut full_schedule = { + let mut sched = VecDeque::new(); + sched.push_back(naddr.clone()); + sched + }; + let mut empty_downloaders = HashMap::new(); + let mut full_downloaders = { + let mut dl = HashMap::new(); + let utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); + dl.insert(naddr.clone(), utd); + dl + }; + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut empty_schedule, + 10, + &mut empty_downloaders, + None + ), + 0 + ); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut empty_schedule, + 10, + &mut full_downloaders, + None + ), + 0 + ); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut full_schedule, + 10, + &mut full_downloaders, + None + ), + 0 + ); + assert_eq!(full_schedule.len(), 1); + assert_eq!( + NakamotoDownloadStateMachine::make_unconfirmed_tenure_downloaders( + &mut full_schedule, + 10, + &mut empty_downloaders, + None + ), + 1 + ); + assert_eq!(full_schedule.len(), 0); + assert_eq!(empty_downloaders.len(), 1); + } + // we've processed the tip already, so we transition straight to the Done state { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); @@ -796,6 +852,74 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .try_accept_unconfirmed_tenure_blocks(vec![bad_block]) .is_err()); } + + // Does not consume blocks beyond the highest processed block ID + { + let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); + utd.confirmed_aggregate_public_key = + Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.unconfirmed_aggregate_public_key = + Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + + assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); + + let tenure_tip = RPCGetTenureInfo { + consensus_hash: peer.network.stacks_tip.0.clone(), + tenure_start_block_id: peer.network.tenure_start_block_id.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_tenure_start_block_id: StacksBlockId::new( + &peer.network.parent_stacks_tip.0, + &peer.network.parent_stacks_tip.1, + ), + tip_block_id: StacksBlockId::new( + &peer.network.stacks_tip.0, + &peer.network.stacks_tip.1, + ), + tip_height: peer.network.stacks_tip.2, + reward_cycle: tip_rc, + }; + + let sortdb = peer.sortdb.take().unwrap(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + utd.try_accept_tenure_info( + &sortdb, + &sort_tip, + peer.chainstate(), + tenure_tip.clone(), + &agg_pubkeys, + ) + .unwrap(); + + peer.sortdb = Some(sortdb); + + assert!(utd.unconfirmed_tenure_start_block.is_some()); + + utd.highest_processed_block_id = Some(unconfirmed_tenure[1].header.block_id()); + let res = utd + .try_accept_unconfirmed_tenure_blocks( + unconfirmed_tenure.clone().into_iter().rev().collect(), + ) + .unwrap(); + assert_eq!(res.unwrap().as_slice(), &unconfirmed_tenure[1..]); + + assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::Done); + + // we can request the highest-complete tenure + assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); + + let ntd = utd + .make_highest_complete_tenure_downloader( + &highest_confirmed_wanted_tenure, + &unconfirmed_wanted_tenure, + ) + .unwrap(); + assert_eq!( + ntd.state, + NakamotoTenureDownloadState::GetTenureStartBlock( + unconfirmed_wanted_tenure.winning_block_id.clone() + ) + ); + } } #[test] From 812f70fda4fdd6eaaa55b6d46b83dfcfcd5452d7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:42:36 -0400 Subject: [PATCH 0125/1400] fix: compile error --- testnet/stacks-node/src/neon_node.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b6ac17e51e..a6b3035938 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4610,7 +4610,12 @@ impl StacksNode { stackerdb_configs.insert(contract.clone(), StackerDBConfig::noop()); } let stackerdb_configs = stackerdbs - .create_or_reconfigure_stackerdbs(&mut chainstate, &sortdb, stackerdb_configs) + .create_or_reconfigure_stackerdbs( + &mut chainstate, + &sortdb, + stackerdb_configs, + config.connection_options.num_neighbors, + ) .unwrap(); let stackerdb_contract_ids: Vec = From b2c65e965dc0157b5423af88a0f57f91795d4d18 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 24 May 2024 15:43:13 -0400 Subject: [PATCH 0126/1400] fix: typo --- stackslib/src/net/stackerdb/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 3d2b7e87cc..5545aa46cd 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -466,7 +466,7 @@ impl StackerDBConfig { let peer_addr = PeerAddress::from_slice(&addr_bytes).expect("FATAL: not 16 bytes"); if peer_addr.is_in_private_range() { debug!( - "Ignoring private IP address '{}' in hint-replias", + "Ignoring private IP address '{}' in hint-replicas", &peer_addr.to_socketaddr(port as u16) ); continue; From 93bfa6c8c80d54214283f4350bf2be6340682e6b Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 12:22:29 +0300 Subject: [PATCH 0127/1400] Add the unhappy path cases for `StackAggCommitSigCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-aggregation-commit` PoX-4 method, called using a signature. - adds the command run tracking inside the `check` method. - adds the expected `stack-aggregation-commit` PoX-4 errors to the POX_4_ERRORS dictionary. --- .../tests/pox-4/err_Commands.ts | 89 +++++++++++++ ...ox_StackAggregationCommitSigCommand_Err.ts | 121 ++++++++++++++++++ 2 files changed, 210 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 6a6c3b7028..8c1bdf774d 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -11,9 +11,12 @@ import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { Simnet } from "@hirosystems/clarinet-sdk"; import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; +import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, + ERR_STACKING_NO_SUCH_PRINCIPAL: 4, + ERR_STACKING_THRESHOLD_NOT_MET: 11, ERR_STACKING_ALREADY_DELEGATED: 20, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -318,6 +321,92 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) ), + // StackAggregationCommitSigCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit > 0 + ) { + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit == 0 + ) { + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !(operator.lockedAddresses.length > 0) && + !(operator.amountToCommit >= model.stackingMinimum) + ) { + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts new file mode 100644 index 0000000000..ca53b56d1c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts @@ -0,0 +1,121 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitSigCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitAuthCommand_Err` to lock uSTX for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommit = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommit.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "signature", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} From 16311d89ec1c4b7cbc9c60a9b0e1f4fdcb3d6aca Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 12:26:54 +0300 Subject: [PATCH 0128/1400] Remove `StackAggregationCommitSigCommand_Err` from statistics The command run tracking was moved inside the command's check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 04a596bbb6..edf454eafa 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -113,7 +113,8 @@ it("statefully interacts with PoX-4", async () => { file !== "pox_StackStxAuthCommand_Err.ts" && file !== "pox_StackStxSigCommand_Err.ts" && file !== "pox_RevokeDelegateStxCommand_Err.ts" && - file !== "pox_DelegateStxCommand_Err.ts" + file !== "pox_DelegateStxCommand_Err.ts" && + file !== "pox_StackAggregationCommitSigCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From a8e7ea3d0febf0f8a31a02fdbca790241f998974 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 13:29:41 +0300 Subject: [PATCH 0129/1400] Add the unhappy path cases for `StackAggCommitAuthCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-aggregation-commit` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 87 ++++++++++++ ...x_StackAggregationCommitAuthCommand_Err.ts | 127 ++++++++++++++++++ 2 files changed, 214 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 8c1bdf774d..8276d02f12 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -12,6 +12,7 @@ import { Simnet } from "@hirosystems/clarinet-sdk"; import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; +import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -407,6 +408,92 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), ), + // StackAggregationCommitAuthCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit > 0 + ) { + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit === 0 + ) { + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !(operator.lockedAddresses.length > 0) && + !(operator.amountToCommit >= model.stackingMinimum) + ) { + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts new file mode 100644 index 0000000000..3580061fae --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts @@ -0,0 +1,127 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitAuthCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitAuthCommand_Err` to lock uSTX for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + // Include the authorization and the `stack-aggregation-commit` transactions + // in a single block. This way we ensure both the authorization and the + // stack-aggregation-commit transactions are called during the same reward + // cycle, so the authorization currentRewCycle param is relevant for the + // upcoming stack-aggregation-commit call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} From b5929989ecb5971c61b25d7b6f3ba9cfe6a96f3c Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 13:31:25 +0300 Subject: [PATCH 0130/1400] Remove `StackAggregationCommitAuthCommand_Err` from statistics The command run tracking was moved inside the command's check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index edf454eafa..e007cbb2ad 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -114,7 +114,8 @@ it("statefully interacts with PoX-4", async () => { file !== "pox_StackStxSigCommand_Err.ts" && file !== "pox_RevokeDelegateStxCommand_Err.ts" && file !== "pox_DelegateStxCommand_Err.ts" && - file !== "pox_StackAggregationCommitSigCommand_Err.ts" + file !== "pox_StackAggregationCommitSigCommand_Err.ts" && + file !== "pox_StackAggregationCommitAuthCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From 41c0386b4fc40499e677e5c78d474b3d82b83652 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 15:23:30 +0300 Subject: [PATCH 0131/1400] Use strict equality inside generator --- .../tests/pox-4/err_Commands.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 8276d02f12..3c3d242d33 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -369,7 +369,7 @@ export function ErrCommands( if ( operator.lockedAddresses.length > 0 && !(operator.amountToCommit >= model.stackingMinimum) && - operator.amountToCommit == 0 + operator.amountToCommit === 0 ) { model.trackCommandRun( "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1", From 788f986652015d5ccb6b4eac173c6936cb83b908 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 15:44:36 +0300 Subject: [PATCH 0132/1400] Order statistics alphabetically This commit improves the unhappy paths execution visibility after the test suite run is complete. --- .../tests/pox-4/pox_CommandModel.ts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts index 6d4d582b58..b867994889 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -46,7 +46,13 @@ export class Stub { reportCommandRuns() { console.log("Command run method execution counts:"); - this.statistics.forEach((count, commandName) => { + const orderedStatistics = Array.from(this.statistics.entries()).sort( + ([keyA], [keyB]) => { + return keyA.localeCompare(keyB); + }, + ); + + orderedStatistics.forEach(([commandName, count]) => { console.log(`${commandName}: ${count}`); }); } From b4c5e64a0312452d679ba00fc391fea5f5093532 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 27 May 2024 22:25:13 +0300 Subject: [PATCH 0133/1400] add output to print sqlite version --- stacks-signer/src/signerdb.rs | 18 ++++++++++++++++++ stackslib/src/chainstate/nakamoto/mod.rs | 21 ++++++++++++--------- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 8c6b3ba187..04dc1f65bb 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -144,6 +144,17 @@ impl SignerDb { ) } + /// Get the sqlite version from the database + pub fn get_sqlite_version( + &self, + ) -> Result, DBError> { + query_row( + &self.db, + "SELECT sqlite_version()", + NO_PARAMS, + ) + } + /// Insert the given state in the `signer_states` table for the given reward cycle pub fn insert_encrypted_signer_state( &self, @@ -391,4 +402,11 @@ mod tests { .expect("Failed to get signer state") .is_none()); } + + #[test] + fn test_display_sqlite_version() { + let db_path = tmp_db_path(); + let db = SignerDb::new(db_path).expect("Failed to create signer db"); + println!("sqlite version is: {:#?}", db.get_sqlite_version()); + } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index bcc03cdeca..d3541fae22 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -207,18 +207,21 @@ lazy_static! { ); CREATE INDEX nakamoto_block_headers_by_consensus_hash ON nakamoto_block_headers(consensus_hash); "#.into(), - format!( - r#"ALTER TABLE payments - ADD COLUMN schedule_type TEXT NOT NULL DEFAULT "{}"; - "#, - HeaderTypeNames::Epoch2.get_name_str()), - r#" - UPDATE db_config SET version = "4"; - "#.into(), + format!( + r#"ALTER TABLE payments + ADD COLUMN schedule_type TEXT NOT NULL DEFAULT "{}"; + "#, + HeaderTypeNames::Epoch2.get_name_str()), + r#" + UPDATE db_config SET version = "4"; + "#.into(), ]; pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_2: Vec = vec![ - NAKAMOTO_TENURES_SCHEMA_2.into() + NAKAMOTO_TENURES_SCHEMA_2.into(), + r#" + UPDATE db_config SET version = "5"; + "#.into(), ]; } diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index fde669760d..2edf9f1e87 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -186,7 +186,7 @@ pub static NAKAMOTO_TENURES_SCHEMA_2: &'static str = r#" tenure_index INTEGER NOT NULL, PRIMARY KEY(burn_view_consensus_hash,tenure_index) - ) STRICT; + ); CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); From 7b423bdfea2a2dbdbf22352f204f6a45fb3a0194 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 27 May 2024 22:42:22 +0300 Subject: [PATCH 0134/1400] update format --- stacks-signer/src/signerdb.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 04dc1f65bb..7b6c40745f 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -144,15 +144,9 @@ impl SignerDb { ) } - /// Get the sqlite version from the database - pub fn get_sqlite_version( - &self, - ) -> Result, DBError> { - query_row( - &self.db, - "SELECT sqlite_version()", - NO_PARAMS, - ) + /// Get the sqlite version from the database + pub fn get_sqlite_version(&self) -> Result, DBError> { + query_row(&self.db, "SELECT sqlite_version()", NO_PARAMS) } /// Insert the given state in the `signer_states` table for the given reward cycle From 23e6bc1f91e236547911aed3384407688b4e9c35 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 22:53:56 +0300 Subject: [PATCH 0135/1400] Add the unhappy path cases for `StackAggCommitIndexedSigCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-aggregation-commit-indexed` PoX-4 method, called using a signature. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 87 ++++++++++++ ...kAggregationCommitIndexedSigCommand_Err.ts | 124 ++++++++++++++++++ 2 files changed, 211 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 3c3d242d33..bfe600e594 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -13,6 +13,7 @@ import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; +import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -494,6 +495,92 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit > 0 + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + !(operator.amountToCommit > 0) + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !(operator.lockedAddresses.length > 0) && + !(operator.amountToCommit >= model.stackingMinimum) + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts new file mode 100644 index 0000000000..22b5a4f923 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts @@ -0,0 +1,124 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitIndexedSigCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitIndexedSigCommand_Err` to lock uSTX + * for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommitIndexed = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommitIndexed.result).toBeErr( + Cl.int(this.errorCode), + ); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} From d96e65def38b92dc7d9b735277fdfcbddce077ae Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 28 May 2024 09:21:21 -0400 Subject: [PATCH 0136/1400] Add MinerReason to a spawned thread to trigger a tenure extend Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/burn/db/sortdb.rs | 22 +++ .../stacks-node/src/nakamoto_node/miner.rs | 142 ++++++++++++++---- .../stacks-node/src/nakamoto_node/relayer.rs | 90 +++++++++-- 3 files changed, 215 insertions(+), 39 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..08f78ec6cf 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2226,6 +2226,28 @@ impl<'a> SortitionHandleConn<'a> { }) } + /// Get the latest block snapshot on this fork where a sortition occured. + pub fn get_last_snapshot_with_sortition_from_tip(&self) -> Result { + let ancestor_hash = + match self.get_indexed(&self.context.chain_tip, &db_keys::last_sortition())? { + Some(hex_str) => BurnchainHeaderHash::from_hex(&hex_str).unwrap_or_else(|_| { + panic!( + "FATAL: corrupt database: failed to parse {} into a hex string", + &hex_str + ) + }), + None => { + // no prior sortitions, so get the first + return self.get_first_block_snapshot(); + } + }; + + self.get_block_snapshot(&ancestor_hash).map(|snapshot_opt| { + snapshot_opt + .unwrap_or_else(|| panic!("FATAL: corrupt index: no snapshot {}", ancestor_hash)) + }) + } + pub fn get_leader_key_at( &self, key_block_height: u64, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d6edd79963..d20abe0ea5 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -73,9 +73,13 @@ pub enum MinerDirective { StopTenure, } -struct ParentTenureInfo { - parent_tenure_blocks: u64, - parent_tenure_consensus_hash: ConsensusHash, +#[derive(PartialEq, Debug, Clone)] +/// Tenure info needed to construct a tenure change or tenure extend transaction +pub struct ParentTenureInfo { + /// The number of blocks in the parent tenure + pub parent_tenure_blocks: u64, + /// The consensus hash of the parent tenure + pub parent_tenure_consensus_hash: ConsensusHash, } /// Metadata required for beginning a new tenure @@ -87,6 +91,32 @@ struct ParentStacksBlockInfo { parent_tenure: Option, } +/// The reason the miner thread was spawned +#[derive(PartialEq, Clone, Debug)] +pub enum MinerReason { + /// The miner thread was spawned to begin a new tenure + BlockFound, + /// The miner thread was spawned to extend an existing tenure + Extended { + /// The parent tenure info to extend + parent_tenure_info: ParentTenureInfo, + /// Wether the tenure change transaction was mined + tenure_change_mined: bool, + }, +} + +impl std::fmt::Display for MinerReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MinerReason::BlockFound => write!(f, "BlockFound"), + MinerReason::Extended { parent_tenure_info, tenure_change_mined } => write!( + f, + "Extended: tenure_info = {parent_tenure_info:?}, tenure_change_mined = {tenure_change_mined:?}", + ), + } + } +} + pub struct BlockMinerThread { /// node config struct config: Config, @@ -106,6 +136,8 @@ pub struct BlockMinerThread { parent_tenure_id: StacksBlockId, /// Handle to the node's event dispatcher event_dispatcher: EventDispatcher, + /// The reason the miner thread was spawned + reason: MinerReason, } impl BlockMinerThread { @@ -115,6 +147,7 @@ impl BlockMinerThread { registered_key: RegisteredKey, burn_block: BlockSnapshot, parent_tenure_id: StacksBlockId, + reason: MinerReason, ) -> BlockMinerThread { BlockMinerThread { config: rt.config.clone(), @@ -126,6 +159,7 @@ impl BlockMinerThread { burn_block, event_dispatcher: rt.event_dispatcher.clone(), parent_tenure_id, + reason, } } @@ -146,6 +180,7 @@ impl BlockMinerThread { "had_prior_miner" => prior_miner.is_some(), "parent_tenure_id" => %self.parent_tenure_id, "thread_id" => ?thread::current().id(), + "reason" => %self.reason, ); if let Some(prior_miner) = prior_miner { Self::stop_miner(&self.globals, prior_miner); @@ -207,6 +242,14 @@ impl BlockMinerThread { "consensus_hash" => %new_block.header.consensus_hash, ); self.globals.coord().announce_new_stacks_block(); + if let MinerReason::Extended { + tenure_change_mined, + .. + } = &mut self.reason + { + // We should not issue multiple tenure change transactions for the same tenure + *tenure_change_mined = true; + } } self.globals.counters.bump_naka_mined_blocks(); @@ -500,12 +543,13 @@ impl BlockMinerThread { } fn generate_tenure_change_tx( - &mut self, + &self, nonce: u64, parent_block_id: StacksBlockId, parent_tenure_consensus_hash: ConsensusHash, parent_tenure_blocks: u64, miner_pkh: Hash160, + cause: TenureChangeCause, ) -> Result { let is_mainnet = self.config.is_mainnet(); let chain_id = self.config.burnchain.chain_id; @@ -516,7 +560,7 @@ impl BlockMinerThread { previous_tenure_end: parent_block_id, previous_tenure_blocks: u32::try_from(parent_tenure_blocks) .expect("FATAL: more than u32 blocks in a tenure"), - cause: TenureChangeCause::BlockFound, + cause, pubkey_hash: miner_pkh, }); @@ -541,7 +585,7 @@ impl BlockMinerThread { /// Create a coinbase transaction. fn generate_coinbase_tx( - &mut self, + &self, nonce: u64, epoch_id: StacksEpochId, vrf_proof: VRFProof, @@ -723,28 +767,8 @@ impl BlockMinerThread { } // create our coinbase if this is the first block we've mined this tenure - let tenure_start_info = if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { - let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); - let current_miner_nonce = parent_block_info.coinbase_nonce; - let tenure_change_tx = self.generate_tenure_change_tx( - current_miner_nonce, - parent_block_id, - par_tenure_info.parent_tenure_consensus_hash, - par_tenure_info.parent_tenure_blocks, - self.keychain.get_nakamoto_pkh(), - )?; - let coinbase_tx = - self.generate_coinbase_tx(current_miner_nonce + 1, target_epoch_id, vrf_proof); - NakamotoTenureInfo { - coinbase_tx: Some(coinbase_tx), - tenure_change_tx: Some(tenure_change_tx), - } - } else { - NakamotoTenureInfo { - coinbase_tx: None, - tenure_change_tx: None, - } - }; + let tenure_start_info = + self.make_tenure_start_info(&parent_block_info, vrf_proof, target_epoch_id)?; parent_block_info.stacks_parent_header.microblock_tail = None; @@ -816,6 +840,68 @@ impl BlockMinerThread { Ok(block) } + /// Create the tenure start info for the block we're going to build + fn make_tenure_start_info( + &self, + parent_block_info: &ParentStacksBlockInfo, + vrf_proof: VRFProof, + target_epoch_id: StacksEpochId, + ) -> Result { + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); + let current_miner_nonce = parent_block_info.coinbase_nonce; + let (coinbase_tx, tenure_change_tx) = match &self.reason { + MinerReason::BlockFound => { + // create our coinbase if this is the first block we've mined this tenure + if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { + let tenure_change_tx = self.generate_tenure_change_tx( + current_miner_nonce, + parent_block_id, + par_tenure_info.parent_tenure_consensus_hash, + par_tenure_info.parent_tenure_blocks, + self.keychain.get_nakamoto_pkh(), + TenureChangeCause::BlockFound, + )?; + let coinbase_tx = self.generate_coinbase_tx( + current_miner_nonce + 1, + target_epoch_id, + vrf_proof, + ); + (Some(coinbase_tx), Some(tenure_change_tx)) + } else { + (None, None) + } + } + MinerReason::Extended { + parent_tenure_info, + tenure_change_mined, + } => { + if !tenure_change_mined { + let tenure_change_tx = self.generate_tenure_change_tx( + current_miner_nonce, + parent_block_id, + parent_tenure_info.parent_tenure_consensus_hash, + parent_tenure_info.parent_tenure_blocks, + self.keychain.get_nakamoto_pkh(), + TenureChangeCause::Extended, + )?; + let coinbase_tx = self.generate_coinbase_tx( + current_miner_nonce + 1, + target_epoch_id, + vrf_proof, + ); + (Some(coinbase_tx), Some(tenure_change_tx)) + } else { + (None, None) + } + } + }; + + Ok(NakamotoTenureInfo { + coinbase_tx, + tenure_change_tx, + }) + } + /// Check if the tenure needs to change -- if so, return a BurnchainTipChanged error fn check_burn_tip_changed(&self, sortdb: &SortitionDB) -> Result<(), NakamotoNodeError> { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index fc4ca1ae0d..be405dda14 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -51,12 +51,13 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; +use super::miner::MinerReason; use super::{ BlockCommits, Config, Error as NakamotoNodeError, EventDispatcher, Keychain, BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; +use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective, ParentTenureInfo}; use crate::neon_node::{ fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState, }; @@ -162,6 +163,8 @@ pub struct RelayerThread { /// This is the last snapshot in which the relayer committed, and the parent_tenure_id /// which was committed to last_committed: Option<(BlockSnapshot, StacksBlockId)>, + /// The last commit that the relayer submitted which won the sortition + current_mining_commit_tx: Option, } impl RelayerThread { @@ -219,6 +222,7 @@ impl RelayerThread { is_miner, next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay), last_committed: None, + current_mining_commit_tx: None, } } @@ -310,9 +314,7 @@ impl RelayerThread { .expect("FATAL: unknown consensus hash"); self.globals.set_last_sortition(sn.clone()); - let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); - info!( "Relayer: Process sortition"; "sortition_ch" => %consensus_hash, @@ -325,6 +327,7 @@ impl RelayerThread { if won_sortition { increment_stx_blocks_mined_counter(); + self.current_mining_commit_tx = Some(sn.winning_block_txid); } if sn.sortition { @@ -541,6 +544,7 @@ impl RelayerThread { registered_key: RegisteredKey, last_burn_block: BlockSnapshot, parent_tenure_id: StacksBlockId, + reason: MinerReason, ) -> Result { if fault_injection_skip_mining(&self.config.node.rpc_bind, last_burn_block.block_height) { debug!( @@ -570,10 +574,16 @@ impl RelayerThread { "height" => last_burn_block.block_height, "burn_header_hash" => %burn_header_hash, "parent_tenure_id" => %parent_tenure_id, + "reason" => %reason ); - let miner_thread_state = - BlockMinerThread::new(self, registered_key, last_burn_block, parent_tenure_id); + let miner_thread_state = BlockMinerThread::new( + self, + registered_key, + last_burn_block, + parent_tenure_id, + reason, + ); Ok(miner_thread_state) } @@ -581,6 +591,7 @@ impl RelayerThread { &mut self, parent_tenure_start: StacksBlockId, burn_tip: BlockSnapshot, + reason: MinerReason, ) -> Result<(), NakamotoNodeError> { // when starting a new tenure, block the mining thread if its currently running. // the new mining thread will join it (so that the new mining thread stalls, not the relayer) @@ -593,7 +604,8 @@ impl RelayerThread { warn!("Trying to start new tenure, but no VRF key active"); NakamotoNodeError::NoVRFKeyActive })?; - let new_miner_state = self.create_block_miner(vrf_key, burn_tip, parent_tenure_start)?; + let new_miner_state = + self.create_block_miner(vrf_key, burn_tip, parent_tenure_start, reason)?; let new_miner_handle = std::thread::Builder::new() .name(format!("miner.{parent_tenure_start}")) @@ -648,7 +660,11 @@ impl RelayerThread { MinerDirective::BeginTenure { parent_tenure_start, burnchain_tip, - } => match self.start_new_tenure(parent_tenure_start, burnchain_tip) { + } => match self.start_new_tenure( + parent_tenure_start, + burnchain_tip, + MinerReason::BlockFound, + ) { Ok(()) => { debug!("Relayer: successfully started new tenure."); } @@ -656,13 +672,65 @@ impl RelayerThread { error!("Relayer: Failed to start new tenure: {:?}", e); } }, - MinerDirective::ContinueTenure { new_burn_view: _ } => { - // TODO: in this case, we eventually want to undergo a tenure - // change to switch to the new burn view, but right now, we will - // simply end our current tenure if it exists + MinerDirective::ContinueTenure { new_burn_view } => { match self.stop_tenure() { Ok(()) => { debug!("Relayer: successfully stopped tenure."); + // Check if we should undergo a tenure change to switch to the new burn view + let Ok(block_snapshot) = self + .sortdb + .index_handle_at_tip() + .get_last_snapshot_with_sortition_from_tip() + else { + error!("Relayer: failed to get snapshot for current tip"); + return false; + }; + let Ok(Some(block_header)) = + NakamotoChainState::get_block_header_by_consensus_hash( + self.chainstate.db(), + &block_snapshot.consensus_hash, + ) + else { + error!("Relayer: failed to get block header for the last sortition snapshsot"); + return false; + }; + + let Some(current_mining_commit_tx) = self.current_mining_commit_tx else { + error!("Relayer: no current mining commit txid following a ContinueTenure directive. This implies the miner won a sortition without a commit transaction."); + return false; + }; + if block_snapshot.winning_block_txid == current_mining_commit_tx { + let Ok(Some(last_parent_tenure_header)) = + NakamotoChainState::get_nakamoto_tenure_finish_block_header( + self.chainstate.db(), + &block_header.consensus_hash, + ) + else { + warn!("Failed loading last block of parent tenure"; "consensus_hash" => %block_header.consensus_hash); + return false; + }; + let parent_tenure_info = ParentTenureInfo { + parent_tenure_blocks: 1 + last_parent_tenure_header + .stacks_block_height + - block_header.stacks_block_height, + parent_tenure_consensus_hash: new_burn_view, + }; + match self.start_new_tenure( + block_header.index_block_hash(), + block_snapshot, + MinerReason::Extended { + parent_tenure_info, + tenure_change_mined: false, + }, + ) { + Ok(()) => { + debug!("Relayer: successfully started new tenure."); + } + Err(e) => { + error!("Relayer: Failed to start new tenure: {:?}", e); + } + } + } } Err(e) => { error!("Relayer: Failed to stop tenure: {:?}", e); From 9df9ca604440e98878e8d59bb5eb5991f98ef86d Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 28 May 2024 16:44:15 +0300 Subject: [PATCH 0137/1400] display nakamoto db sqlite version version is "3.33.0" --- stackslib/src/chainstate/stacks/db/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index f10a87dccc..1a6e59f4c0 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1031,6 +1031,14 @@ impl StacksChainState { Ok(config.expect("BUG: no db_config installed")) } + pub fn get_db_version(conn: &DBConn) -> Result { + let option_version = query_row::(conn, "SELECT sqlite_version()", NO_PARAMS)?; + if let Some(version) = option_version { + return Ok(version); + } + Ok("no version".into()) + } + fn apply_schema_migrations<'a>( tx: &DBTx<'a>, mainnet: bool, @@ -1054,6 +1062,9 @@ impl StacksChainState { ); return Err(Error::InvalidChainstateDB); } + + println!("This is the sqlite version: {:#?}", Self::get_db_version(tx)); + if db_config.version != CHAINSTATE_VERSION { while db_config.version != CHAINSTATE_VERSION { From 37809a42f2c3c0b0632c8d12be38c146287512b5 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 28 May 2024 17:11:58 +0300 Subject: [PATCH 0138/1400] remove sqlite version info --- stacks-signer/src/signerdb.rs | 12 ------------ stackslib/src/chainstate/stacks/db/mod.rs | 11 ----------- 2 files changed, 23 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 7b6c40745f..8c6b3ba187 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -144,11 +144,6 @@ impl SignerDb { ) } - /// Get the sqlite version from the database - pub fn get_sqlite_version(&self) -> Result, DBError> { - query_row(&self.db, "SELECT sqlite_version()", NO_PARAMS) - } - /// Insert the given state in the `signer_states` table for the given reward cycle pub fn insert_encrypted_signer_state( &self, @@ -396,11 +391,4 @@ mod tests { .expect("Failed to get signer state") .is_none()); } - - #[test] - fn test_display_sqlite_version() { - let db_path = tmp_db_path(); - let db = SignerDb::new(db_path).expect("Failed to create signer db"); - println!("sqlite version is: {:#?}", db.get_sqlite_version()); - } } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 1a6e59f4c0..f10a87dccc 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1031,14 +1031,6 @@ impl StacksChainState { Ok(config.expect("BUG: no db_config installed")) } - pub fn get_db_version(conn: &DBConn) -> Result { - let option_version = query_row::(conn, "SELECT sqlite_version()", NO_PARAMS)?; - if let Some(version) = option_version { - return Ok(version); - } - Ok("no version".into()) - } - fn apply_schema_migrations<'a>( tx: &DBTx<'a>, mainnet: bool, @@ -1062,9 +1054,6 @@ impl StacksChainState { ); return Err(Error::InvalidChainstateDB); } - - println!("This is the sqlite version: {:#?}", Self::get_db_version(tx)); - if db_config.version != CHAINSTATE_VERSION { while db_config.version != CHAINSTATE_VERSION { From c24aee1ef314230d05fcfcda95e2be45d1585fd4 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 17:30:07 +0300 Subject: [PATCH 0139/1400] Update the model to support multiple allowed contract callers This commit: - updates the old way of updating the model after successful `allow-contract-caller` function calls. Before it was updating the allowed contract caller. The current approach pushes the allowed contract caller to a list (only if needed). - updates the way of updating the model after successful disallow-contract-caller function calls. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 6 ++-- .../pox-4/pox_AllowContractCallerCommand.ts | 29 +++++-------------- .../tests/pox-4/pox_CommandModel.ts | 2 +- .../pox_DisallowContractCallerCommand.ts | 11 +++++-- 4 files changed, 20 insertions(+), 28 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 71d29086ef..bf8b63ffe7 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -125,8 +125,8 @@ it("statefully interacts with PoX-4", async () => { delegatedTo: "", delegatedMaxAmount: 0, // We initialize delegatedUntilBurnHt to 0. It will be updated - // after successful delegate-stx calls. Its value will be either - // the unwrapped until-burn-ht uint passed to the delegate-stx, + // after successful delegate-stx calls. It's value will be either + // the unwrapped until-burn-ht uint passed to the delegate-stx, // or undefined for indefinite delegations. delegatedUntilBurnHt: 0, delegatedPoxAddress: "", @@ -134,7 +134,7 @@ it("statefully interacts with PoX-4", async () => { amountUnlocked: 100_000_000_000_000, unlockHeight: 0, firstLockedRewardCycle: 0, - allowedContractCaller: "", + allowedContractCallers: [], callerAllowedBy: [], committedRewCycleIndexes: [], }])), diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts index dad1a381a5..141676cdae 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts @@ -74,32 +74,17 @@ export class AllowContractCallerCommand implements PoxCommand { // Get the wallets involved from the model and update it with the new state. const wallet = model.stackers.get(this.wallet.stxAddress)!; - const callerAllowedBefore = wallet.allowedContractCaller; - - const callerAllowedBeforeState = model.stackers.get(callerAllowedBefore) || - null; - - if (callerAllowedBeforeState) { - // Remove the allower from the ex-allowed caller's allowance list. - - const walletIndexInsideAllowedByList = callerAllowedBeforeState - .callerAllowedBy.indexOf( - this.wallet.stxAddress, - ); - - expect(walletIndexInsideAllowedByList).toBeGreaterThan(-1); - - callerAllowedBeforeState.callerAllowedBy.splice( - walletIndexInsideAllowedByList, - 1, - ); - } const callerToAllow = model.stackers.get(this.allowanceTo.stxAddress)!; // Update model so that we know this wallet has authorized a contract-caller. + // If the caller is already allowed, there's no need to add it again. + const callerToAllowIndexInAllowedList = wallet.allowedContractCallers + .indexOf(this.allowanceTo.stxAddress); - wallet.allowedContractCaller = this.allowanceTo.stxAddress; - callerToAllow.callerAllowedBy.push(this.wallet.stxAddress); + if (callerToAllowIndexInAllowedList == -1) { + wallet.allowedContractCallers.push(this.allowanceTo.stxAddress); + callerToAllow.callerAllowedBy.push(this.wallet.stxAddress); + } // Log to console for debugging purposes. This is not necessary for the // test to pass but it is useful for debugging and eyeballing the test. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts index cdf211c3ed..ce1d2a28b4 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -191,7 +191,7 @@ export type Stacker = { amountUnlocked: number; unlockHeight: number; firstLockedRewardCycle: number; - allowedContractCaller: StxAddress; + allowedContractCallers: StxAddress[]; callerAllowedBy: StxAddress[]; committedRewCycleIndexes: number[]; }; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts index 09618db49c..16b830b5fb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts @@ -42,7 +42,9 @@ export class DisallowContractCallerCommand implements PoxCommand { this.callerToDisallow.stxAddress, )!; return ( - stacker.allowedContractCaller === this.callerToDisallow.stxAddress && + stacker.allowedContractCallers.includes( + this.callerToDisallow.stxAddress, + ) && callerToDisallow.callerAllowedBy.includes( this.stacker.stxAddress, ) === @@ -76,7 +78,12 @@ export class DisallowContractCallerCommand implements PoxCommand { // Update model so that we know that the stacker has revoked stacking // allowance. const stacker = model.stackers.get(this.stacker.stxAddress)!; - stacker.allowedContractCaller = ""; + const callerToDisallowIndex = stacker.allowedContractCallers.indexOf( + this.callerToDisallow.stxAddress, + ); + + expect(callerToDisallowIndex).toBeGreaterThan(-1); + stacker.allowedContractCallers.splice(callerToDisallowIndex, 1); // Remove the operator from the caller to disallow's allowance list. const walletIndexAllowedByList = callerToDisallow.callerAllowedBy.indexOf( From 0e68198516e6c90a5a4bf61b714c104a881aa201 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 28 May 2024 13:49:52 -0400 Subject: [PATCH 0140/1400] chore: indicate in the docs that the reward set must correspond to the sortition ID --- stackslib/src/net/p2p.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index d1168abe94..fbb6c375ed 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -265,6 +265,8 @@ pub struct PeerNetwork { /// determine whether or not the reward cycle info in `current_reward_sets` is still valid -- a /// burnchain fork may invalidate them, so the code must check that the sortition ID for the /// start of the prepare-phase is still canonical. + /// This needs to be in 1-to-1 correspondence with `current_reward_sets` -- the sortition IDs + /// that make up the values need to correspond to the reward sets computed as of the sortition. pub current_reward_set_ids: BTreeMap, // information about the state of the network's anchor blocks From aa711728f708e4c4f76edd7f110bb017c975b2b2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 28 May 2024 14:35:38 -0400 Subject: [PATCH 0141/1400] Move continue tenure logic to seperate function that passes up errors Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node.rs | 2 + .../stacks-node/src/nakamoto_node/relayer.rs | 131 ++++++++++-------- 2 files changed, 75 insertions(+), 58 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 8a1d80de32..a60370f612 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -103,6 +103,8 @@ pub enum Error { SigningCoordinatorFailure(String), // The thread that we tried to send to has closed ChannelClosed, + /// The block header for the tenure start is missing + MissingTenureStartBlockHeader, } impl StacksNode { diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index be405dda14..e25b0c7aa0 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -647,6 +647,75 @@ impl RelayerThread { Ok(()) } + fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { + if let Err(e) = self.stop_tenure() { + error!("Relayer: Failed to stop tenure: {:?}", e); + return Ok(()); + } + debug!("Relayer: successfully stopped tenure."); + // Check if we should undergo a tenure change to switch to the new burn view + let block_snapshot = self + .sortdb + .index_handle_at_tip() + .get_last_snapshot_with_sortition_from_tip() + .map_err(|e| { + error!("Relayer: failed to get last sortition snapshot: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + let Some(block_header) = NakamotoChainState::get_block_header_by_consensus_hash( + self.chainstate.db(), + &block_snapshot.consensus_hash, + ) + .map_err(|e| { + error!("Relayer: failed to get block header for the last sortition snapshsot: {e:?}"); + NakamotoNodeError::MissingTenureStartBlockHeader + })? + else { + error!("Relayer: failed to get block header for the last sortition snapshsot"); + return Err(NakamotoNodeError::MissingTenureStartBlockHeader); + }; + + if Some(block_snapshot.winning_block_txid) != self.current_mining_commit_tx { + debug!("Relayer: the miner did not win the last sortition. No tenure to continue."); + return Ok(()); + }; + + let Some(last_parent_tenure_header) = + NakamotoChainState::get_nakamoto_tenure_finish_block_header( + self.chainstate.db(), + &block_header.consensus_hash, + ) + .map_err(|e| { + error!("Relayer: failed to get last block of parent tenure: {e:?}"); + NakamotoNodeError::ParentNotFound + })? + else { + warn!("Failed loading last block of parent tenure"; "consensus_hash" => %block_header.consensus_hash); + return Err(NakamotoNodeError::ParentNotFound); + }; + let parent_tenure_info = ParentTenureInfo { + parent_tenure_blocks: 1 + last_parent_tenure_header.stacks_block_height + - block_header.stacks_block_height, + parent_tenure_consensus_hash: new_burn_view, + }; + match self.start_new_tenure( + block_header.index_block_hash(), + block_snapshot, + MinerReason::Extended { + parent_tenure_info, + tenure_change_mined: false, + }, + ) { + Ok(()) => { + debug!("Relayer: successfully started new tenure."); + } + Err(e) => { + error!("Relayer: Failed to start new tenure: {:?}", e); + } + } + Ok(()) + } + fn handle_sortition( &mut self, consensus_hash: ConsensusHash, @@ -673,67 +742,13 @@ impl RelayerThread { } }, MinerDirective::ContinueTenure { new_burn_view } => { - match self.stop_tenure() { + match self.continue_tenure(new_burn_view) { Ok(()) => { - debug!("Relayer: successfully stopped tenure."); - // Check if we should undergo a tenure change to switch to the new burn view - let Ok(block_snapshot) = self - .sortdb - .index_handle_at_tip() - .get_last_snapshot_with_sortition_from_tip() - else { - error!("Relayer: failed to get snapshot for current tip"); - return false; - }; - let Ok(Some(block_header)) = - NakamotoChainState::get_block_header_by_consensus_hash( - self.chainstate.db(), - &block_snapshot.consensus_hash, - ) - else { - error!("Relayer: failed to get block header for the last sortition snapshsot"); - return false; - }; - - let Some(current_mining_commit_tx) = self.current_mining_commit_tx else { - error!("Relayer: no current mining commit txid following a ContinueTenure directive. This implies the miner won a sortition without a commit transaction."); - return false; - }; - if block_snapshot.winning_block_txid == current_mining_commit_tx { - let Ok(Some(last_parent_tenure_header)) = - NakamotoChainState::get_nakamoto_tenure_finish_block_header( - self.chainstate.db(), - &block_header.consensus_hash, - ) - else { - warn!("Failed loading last block of parent tenure"; "consensus_hash" => %block_header.consensus_hash); - return false; - }; - let parent_tenure_info = ParentTenureInfo { - parent_tenure_blocks: 1 + last_parent_tenure_header - .stacks_block_height - - block_header.stacks_block_height, - parent_tenure_consensus_hash: new_burn_view, - }; - match self.start_new_tenure( - block_header.index_block_hash(), - block_snapshot, - MinerReason::Extended { - parent_tenure_info, - tenure_change_mined: false, - }, - ) { - Ok(()) => { - debug!("Relayer: successfully started new tenure."); - } - Err(e) => { - error!("Relayer: Failed to start new tenure: {:?}", e); - } - } - } + debug!("Relayer: handled continue tenure."); } Err(e) => { - error!("Relayer: Failed to stop tenure: {:?}", e); + error!("Relayer: Failed to continue tenure: {:?}", e); + return false; } } } From 9920ce5e8f44a13a0a993d5ce25bc01a677675ac Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 May 2024 22:52:05 -0400 Subject: [PATCH 0142/1400] fix: use sortition handle at correct tip --- stackslib/src/chainstate/burn/db/sortdb.rs | 14 +++++ stackslib/src/chainstate/coordinator/mod.rs | 4 +- .../chainstate/nakamoto/coordinator/mod.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 50 ++++++++------- .../chainstate/stacks/boot/signers_tests.rs | 44 +++++++------ stackslib/src/chainstate/stacks/db/blocks.rs | 5 +- .../src/chainstate/stacks/db/unconfirmed.rs | 61 ++++++++++--------- stackslib/src/clarity_vm/database/mod.rs | 15 +++-- stackslib/src/core/mempool.rs | 9 ++- stackslib/src/main.rs | 2 +- stackslib/src/net/api/callreadonly.rs | 2 +- stackslib/src/net/api/getaccount.rs | 2 +- stackslib/src/net/api/getconstantval.rs | 2 +- stackslib/src/net/api/getcontractabi.rs | 2 +- stackslib/src/net/api/getcontractsrc.rs | 2 +- stackslib/src/net/api/getdatavar.rs | 2 +- .../src/net/api/getistraitimplemented.rs | 2 +- stackslib/src/net/api/getmapentry.rs | 2 +- stackslib/src/net/api/getpoxinfo.rs | 26 ++++---- stackslib/src/net/api/postblock_proposal.rs | 2 +- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/relay.rs | 6 +- stackslib/src/net/stackerdb/config.rs | 2 +- stackslib/src/util_lib/db.rs | 12 ++++ .../stacks-node/src/nakamoto_node/miner.rs | 20 ++++-- testnet/stacks-node/src/neon_node.rs | 17 +++--- testnet/stacks-node/src/node.rs | 3 +- testnet/stacks-node/src/run_loop/helium.rs | 23 ++++--- testnet/stacks-node/src/tests/epoch_21.rs | 4 +- testnet/stacks-node/src/tests/epoch_22.rs | 4 +- testnet/stacks-node/src/tests/epoch_24.rs | 4 +- .../src/tests/neon_integrations.rs | 30 +++++++-- 32 files changed, 239 insertions(+), 140 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index eb49daa50a..3fa528995a 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2653,6 +2653,20 @@ impl SortitionDB { ) } + pub fn index_handle_at_block<'a>( + &'a self, + chainstate: &StacksChainState, + stacks_block_id: &StacksBlockId, + ) -> Result, db_error> { + let (consensus_hash, bhh) = match chainstate.get_block_header_hashes(stacks_block_id) { + Ok(Some(x)) => x, + _ => return Err(db_error::NotFoundError), + }; + let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &consensus_hash)? + .ok_or(db_error::NotFoundError)?; + Ok(self.index_handle(&snapshot.sortition_id)) + } + pub fn tx_handle_begin<'a>( &'a mut self, chain_tip: &SortitionId, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 2836ec7b4c..96eae44641 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -3274,7 +3274,7 @@ impl< if let Some(ref mut estimator) = self.cost_estimator { let stacks_epoch = self .sortition_db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); estimator.notify_block( @@ -3288,7 +3288,7 @@ impl< if let Some(ref mut estimator) = self.fee_estimator { let stacks_epoch = self .sortition_db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Err(e) = diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index abb89e1839..f399615c80 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -656,7 +656,7 @@ impl< if let Some(ref mut estimator) = self.cost_estimator { let stacks_epoch = self .sortition_db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); estimator.notify_block( @@ -670,7 +670,7 @@ impl< if let Some(ref mut estimator) = self.fee_estimator { let stacks_epoch = self .sortition_db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index e6e02eaab5..9c2262d958 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -569,7 +569,7 @@ impl StacksChainState { boot_contract_name: &str, code: &str, ) -> Result { - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(self, stacks_block_id)?; let dbconn = self.state_index.sqlite_conn(); self.clarity_state .eval_read_only( @@ -631,24 +631,28 @@ impl StacksChainState { let cost_track = LimitedCostTracker::new_free(); let sender = PrincipalData::Standard(StandardPrincipalData::transient()); let result = self - .maybe_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |clarity_tx| { - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity1, - sender, - None, - cost_track, - |env| { - env.execute_contract( - &contract_identifier, - function, - &[SymbolicExpression::atom_value(Value::UInt(reward_cycle))], - true, - ) - }, - ) - })? + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(self, tip)?, + tip, + |clarity_tx| { + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity1, + sender, + None, + cost_track, + |env| { + env.execute_contract( + &contract_identifier, + function, + &[SymbolicExpression::atom_value(Value::UInt(reward_cycle))], + true, + ) + }, + ) + }, + )? .ok_or_else(|| Error::NoSuchBlockError)?? .expect_u128() .expect("FATAL: unexpected PoX structure"); @@ -1843,7 +1847,9 @@ pub mod test { let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb + .index_handle_at_block(&chainstate, &stacks_block_id) + .unwrap(), &stacks_block_id, |clarity_tx| StacksChainState::get_account(clarity_tx, addr), ) @@ -1859,7 +1865,9 @@ pub mod test { let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); chainstate .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb + .index_handle_at_block(chainstate, &stacks_block_id) + .unwrap(), &stacks_block_id, |clarity_tx| StacksChainState::get_contract(clarity_tx, addr).unwrap(), ) diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index 67fffd878a..ad4a8ae2db 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -483,26 +483,30 @@ pub fn readonly_call_with_sortdb( args: Vec, ) -> Value { chainstate - .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |connection| { - connection - .with_readonly_clarity_env( - false, - 0x80000000, - ClarityVersion::Clarity2, - PrincipalData::from(boot_code_addr(false)), - None, - LimitedCostTracker::new_free(), - |env| { - env.execute_contract_allow_private( - &boot_code_id(&boot_contract, false), - &function_name, - &symbols_from_values(args), - true, - ) - }, - ) - .unwrap() - }) + .with_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, tip).unwrap(), + tip, + |connection| { + connection + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::from(boot_code_addr(false)), + None, + LimitedCostTracker::new_free(), + |env| { + env.execute_contract_allow_private( + &boot_code_id(&boot_contract, false), + &function_name, + &symbols_from_values(args), + true, + ) + }, + ) + .unwrap() + }, + ) .unwrap() } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 19c7f59969..dfff8c52a1 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -11926,9 +11926,12 @@ pub mod test { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh); + let iconn = sortdb + .index_handle_at_block(peer.chainstate(), &tip_hash) + .unwrap(); let account = peer .chainstate() - .with_read_only_clarity_tx(&sortdb.index_handle_at_tip(), &tip_hash, |conn| { + .with_read_only_clarity_tx(&iconn, &tip_hash, |conn| { StacksChainState::get_account(conn, &addr.to_account_principal()) }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 0e3ae3ae88..fc928fa196 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -777,8 +777,9 @@ mod test { // build 1-block microblock stream let microblocks = { let sortdb = peer.sortdb.take().unwrap(); - let sort_iconn = sortdb.index_handle_at_tip(); - + let sort_iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) .unwrap(); @@ -851,22 +852,22 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) + .reload_unconfirmed_state(&iconn, canonical_tip.clone()) .unwrap(); let recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx( - &sortdb.index_handle_at_tip(), - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }, - ) + .with_read_only_unconfirmed_clarity_tx(&iconn, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }) .unwrap() .unwrap(); peer.sortdb = Some(sortdb); @@ -877,19 +878,18 @@ mod test { SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); let confirmed_recv_balance = peer .chainstate() - .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), - &canonical_tip, - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_stx_balance(&recv_addr.into()) - .unwrap() - }) - }, - ) + .with_read_only_clarity_tx(&iconn, &canonical_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_stx_balance(&recv_addr.into()) + .unwrap() + }) + }) .unwrap(); peer.sortdb = Some(sortdb); @@ -1014,9 +1014,11 @@ mod test { // build microblock stream iteratively, and test balances at each additional microblock let sortdb = peer.sortdb.take().unwrap(); let microblocks = { - let sort_iconn = sortdb.index_handle_at_tip(); + let sort_iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) + .reload_unconfirmed_state(&sort_iconn, canonical_tip.clone()) .unwrap(); let mut microblock_builder = StacksMicroblockBuilder::new( @@ -1399,13 +1401,16 @@ mod test { // process microblock stream to generate unconfirmed state let sortdb = peer.sortdb.take().unwrap(); + let iconn = sortdb + .index_handle_at_block(&peer.chainstate(), &canonical_tip) + .unwrap(); peer.chainstate() - .reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip.clone()) + .reload_unconfirmed_state(&iconn, canonical_tip.clone()) .unwrap(); let db_recv_balance = peer .chainstate() - .with_read_only_unconfirmed_clarity_tx(&sortdb.index_handle_at_tip(), |clarity_tx| { + .with_read_only_unconfirmed_clarity_tx(&iconn, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { clarity_db .get_account_stx_balance(&recv_addr.into()) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 8282a5fd8d..1aff287eaf 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -440,10 +440,7 @@ impl SortitionDBRef for SortitionHandleConn<'_> { parent_stacks_block_burn_ht: u64, cycle_index: u64, ) -> Result, ChainstateError> { - let readonly_marf = self - .index - .reopen_readonly() - .expect("BUG: failure trying to get a read-only interface into the sortition db."); + let readonly_marf = self.index.reopen_readonly()?; let mut context = self.context.clone(); context.chain_tip = sortition_id.clone(); let mut handle = SortitionHandleConn::new(&readonly_marf, context); @@ -592,12 +589,18 @@ impl BurnStateDB for SortitionHandleTx<'_> { impl BurnStateDB for SortitionHandleConn<'_> { fn get_tip_burn_block_height(&self) -> Option { - let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; + let tip = match SortitionDB::get_block_snapshot(self.conn(), &self.context.chain_tip) { + Ok(Some(x)) => x, + _ => return None, + }; tip.block_height.try_into().ok() } fn get_tip_sortition_id(&self) -> Option { - let tip = SortitionDB::get_canonical_burn_chain_tip(self.conn()).ok()?; + let tip = match SortitionDB::get_block_snapshot(self.conn(), &self.context.chain_tip) { + Ok(Some(x)) => x, + _ => return None, + }; Some(tip.sortition_id) } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 078e25586d..dc67539573 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -321,8 +321,15 @@ impl MemPoolAdmitter { tx: &StacksTransaction, tx_size: u64, ) -> Result<(), MemPoolRejection> { + let sortition_id = match SortitionDB::get_sortition_id_by_consensus( + &sortdb.conn(), + &self.cur_consensus_hash, + ) { + Ok(Some(x)) => x, + _ => return Err(MemPoolRejection::DBError(db_error::NotFoundError)), + }; chainstate.will_admit_mempool_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&sortition_id), &self.cur_consensus_hash, &self.cur_block, tx, diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index e2f3110d67..a10353ced3 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -641,7 +641,7 @@ simulating a miner. let result = StacksBlockBuilder::build_anchored_block( &chain_state, - &sort_db.index_handle_at_tip(), + &sort_db.index_handle(&chain_tip.sortition_id), &mut mempool_db, &parent_header, chain_tip.total_burn, diff --git a/stackslib/src/net/api/callreadonly.rs b/stackslib/src/net/api/callreadonly.rs index dc24de1ae4..150ed1ca1e 100644 --- a/stackslib/src/net/api/callreadonly.rs +++ b/stackslib/src/net/api/callreadonly.rs @@ -235,7 +235,7 @@ impl RPCRequestHandler for RPCCallReadOnlyRequestHandler { cost_limit.write_count = 0; chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { let epoch = clarity_tx.get_epoch(); diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs index cbd4338ac6..7cbf0a8210 100644 --- a/stackslib/src/net/api/getaccount.rs +++ b/stackslib/src/net/api/getaccount.rs @@ -147,7 +147,7 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let account_opt_res = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { diff --git a/stackslib/src/net/api/getconstantval.rs b/stackslib/src/net/api/getconstantval.rs index 4b3068dd5d..b08d1c6835 100644 --- a/stackslib/src/net/api/getconstantval.rs +++ b/stackslib/src/net/api/getconstantval.rs @@ -145,7 +145,7 @@ impl RPCRequestHandler for RPCGetConstantValRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { diff --git a/stackslib/src/net/api/getcontractabi.rs b/stackslib/src/net/api/getcontractabi.rs index 35914de9e9..d98c2c6623 100644 --- a/stackslib/src/net/api/getcontractabi.rs +++ b/stackslib/src/net/api/getcontractabi.rs @@ -133,7 +133,7 @@ impl RPCRequestHandler for RPCGetContractAbiRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { let epoch = clarity_tx.get_epoch(); diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs index 1c20bffd1b..139995988e 100644 --- a/stackslib/src/net/api/getcontractsrc.rs +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -141,7 +141,7 @@ impl RPCRequestHandler for RPCGetContractSrcRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs index 124fb4856f..f3a4acb7d3 100644 --- a/stackslib/src/net/api/getdatavar.rs +++ b/stackslib/src/net/api/getdatavar.rs @@ -155,7 +155,7 @@ impl RPCRequestHandler for RPCGetDataVarRequestHandler { let data_opt = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { diff --git a/stackslib/src/net/api/getistraitimplemented.rs b/stackslib/src/net/api/getistraitimplemented.rs index aac4079074..3b8e07ad1a 100644 --- a/stackslib/src/net/api/getistraitimplemented.rs +++ b/stackslib/src/net/api/getistraitimplemented.rs @@ -161,7 +161,7 @@ impl RPCRequestHandler for RPCGetIsTraitImplementedRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|db| { diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs index 5d0cd7504f..cb318b5996 100644 --- a/stackslib/src/net/api/getmapentry.rs +++ b/stackslib/src/net/api/getmapentry.rs @@ -184,7 +184,7 @@ impl RPCRequestHandler for RPCGetMapEntryRequestHandler { let data_resp = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { chainstate.maybe_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle_at_block(chainstate, &tip)?, &tip, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs index c3de3ab0da..81868c81f8 100644 --- a/stackslib/src/net/api/getpoxinfo.rs +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -190,17 +190,21 @@ impl RPCPoxInfoData { + 1; let data = chainstate - .maybe_read_only_clarity_tx(&sortdb.index_handle_at_tip(), tip, |clarity_tx| { - clarity_tx.with_readonly_clarity_env( - mainnet, - chain_id, - ClarityVersion::Clarity2, - sender, - None, - cost_track, - |env| env.execute_contract(&contract_identifier, function, &[], true), - ) - }) + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(chainstate, tip)?, + tip, + |clarity_tx| { + clarity_tx.with_readonly_clarity_env( + mainnet, + chain_id, + ClarityVersion::Clarity2, + sender, + None, + cost_track, + |env| env.execute_contract(&contract_identifier, function, &[], true), + ) + }, + ) .map_err(|_| NetError::NotFoundError)?; let res = match data { diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 0423d5c57b..43189b6847 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -206,8 +206,8 @@ impl NakamotoBlockProposal { }); } - let burn_dbconn: SortitionHandleConn = sortdb.index_handle_at_tip(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; + let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); let mut db_handle = sortdb.index_handle(&sort_tip); let expected_burn_opt = NakamotoChainState::get_expected_burns(&mut db_handle, chainstate.db(), &self.block)?; diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index c1318e6647..f853bb795a 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5892,7 +5892,7 @@ impl PeerNetwork { return false; } let stacks_epoch = match sortdb - .index_handle_at_tip() + .index_conn() .get_stacks_epoch(burnchain_tip.block_height as u32) { Some(epoch) => epoch, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index cb8605ee41..fa11b575d4 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1840,8 +1840,10 @@ impl Relayer { "Reload unconfirmed state off of {}/{}", &canonical_consensus_hash, &canonical_block_hash ); - let processed_unconfirmed_state = - chainstate.reload_unconfirmed_state(&sortdb.index_handle_at_tip(), canonical_tip)?; + let processed_unconfirmed_state = chainstate.reload_unconfirmed_state( + &sortdb.index_handle_at_block(chainstate, &canonical_tip)?, + canonical_tip, + )?; Ok(processed_unconfirmed_state) } diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index de642b98bb..284d3d52b4 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -493,7 +493,7 @@ impl StackerDBConfig { let cur_epoch = SortitionDB::get_stacks_epoch(sortition_db.conn(), burn_tip.block_height)? .expect("FATAL: no epoch defined"); - let dbconn = sortition_db.index_handle_at_tip(); + let dbconn = sortition_db.index_handle_at_block(chainstate, &chain_tip_hash)?; // check the target contract let res = chainstate.with_read_only_clarity_tx(&dbconn, &chain_tip_hash, |clarity_tx| { diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 940d79bafe..aa947046f4 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -34,8 +34,10 @@ use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::stacks::index::marf::{MarfConnection, MarfTransaction, MARF}; use crate::chainstate::stacks::index::{Error as MARFError, MARFValue, MarfTrieId}; +use crate::core::{StacksEpoch, StacksEpochId}; pub type DBConn = rusqlite::Connection; pub type DBTx<'a> = rusqlite::Transaction<'a>; @@ -630,6 +632,16 @@ impl<'a, C, T: MarfTrieId> IndexDBConn<'a, C, T> { pub fn conn(&self) -> &DBConn { self.index.sqlite_conn() } + + pub fn get_stacks_epoch_by_epoch_id(&self, epoch_id: &StacksEpochId) -> Option { + SortitionDB::get_stacks_epoch_by_epoch_id(self.conn(), epoch_id) + .expect("BUG: failed to get epoch for epoch id") + } + + pub fn get_stacks_epoch(&self, height: u32) -> Option { + SortitionDB::get_stacks_epoch(self.conn(), height as u64) + .expect("BUG: failed to get epoch for burn block height") + } } impl<'a, C, T: MarfTrieId> Deref for IndexDBConn<'a, C, T> { diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a7e78bc37d..40799bafa9 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -288,7 +288,7 @@ impl BlockMinerThread { let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let sortition_handle = sort_db.index_handle_at_tip(); + let sortition_handle = sort_db.index_handle(&tip.sortition_id); let Ok(aggregate_public_key) = NakamotoChainState::get_aggregate_public_key( &mut chain_state, &sort_db, @@ -411,7 +411,9 @@ impl BlockMinerThread { // Get all nonces for the signers from clarity DB to use to validate transactions let account_nonces = chainstate .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb + .index_handle_at_block(chainstate, &stacks_block_id) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_block_id, |clarity_tx| { clarity_tx.with_clarity_db_readonly(|clarity_db| { @@ -479,7 +481,8 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = sort_db.index_handle_at_tip(); + let mut sortition_handle = + sort_db.index_handle_at_block(&chain_state, &block.block_id())?; let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, @@ -726,9 +729,10 @@ impl BlockMinerThread { } } + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); + // create our coinbase if this is the first block we've mined this tenure let tenure_start_info = if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { - let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let current_miner_nonce = parent_block_info.coinbase_nonce; let tenure_change_tx = self.generate_tenure_change_tx( current_miner_nonce, @@ -761,7 +765,9 @@ impl BlockMinerThread { // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, - &burn_db.index_handle_at_tip(), + &burn_db + .index_handle_at_block(&chain_state, &parent_block_id) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &mut mem_pool, &parent_block_info.stacks_parent_header, &self.burn_block.consensus_hash, @@ -937,7 +943,9 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_handle_at_tip(), + &burn_db + .index_handle_at_block(&chain_state, &stacks_tip_header.index_block_hash()) + .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 7a82b7ce80..e84e69fc85 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -727,7 +727,7 @@ impl MicroblockMinerThread { .unwrap_or(0) ); - let burn_height = + let block_snapshot = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &self.parent_consensus_hash) .map_err(|e| { error!("Failed to find block snapshot for mined block: {}", e); @@ -736,8 +736,8 @@ impl MicroblockMinerThread { .ok_or_else(|| { error!("Failed to find block snapshot for mined block"); ChainstateError::NoSuchBlockError - })? - .block_height; + })?; + let burn_height = block_snapshot.block_height; let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { error!("Failed to get AST rules for microblock: {}", e); @@ -753,7 +753,10 @@ impl MicroblockMinerThread { .epoch_id; let mint_result = { - let ic = sortdb.index_handle_at_tip(); + let ic = sortdb.index_handle_at_block( + &chainstate, + &block_snapshot.get_canonical_stacks_block_id(), + )?; let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( chainstate, &ic, @@ -2352,7 +2355,7 @@ impl BlockMinerThread { } let (anchored_block, _, _) = match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_handle_at_tip(), + &burn_db.index_handle(&burn_tip.sortition_id), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -2382,7 +2385,7 @@ impl BlockMinerThread { // try again match StacksBlockBuilder::build_anchored_block( &chain_state, - &burn_db.index_handle_at_tip(), + &burn_db.index_handle(&burn_tip.sortition_id), &mut mem_pool, &parent_block_info.stacks_parent_header, parent_block_info.parent_block_total_burn, @@ -4066,7 +4069,7 @@ impl ParentStacksBlockInfo { let principal = miner_address.into(); let account = chain_state .with_read_only_clarity_tx( - &burn_db.index_handle_at_tip(), + &burn_db.index_handle(&burn_chain_tip.sortition_id), &StacksBlockHeader::make_index_block_hash(mine_tip_ch, mine_tip_bh), |conn| StacksChainState::get_account(conn, &principal), ) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index ba5b7e204e..4c1d8d39cb 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -3,7 +3,6 @@ use std::net::SocketAddr; use std::thread::JoinHandle; use std::{env, thread, time}; -use clarity::vm::database::BurnStateDB; use rand::RngCore; use stacks::burnchains::bitcoin::BitcoinNetworkType; use stacks::burnchains::db::BurnchainDB; @@ -891,7 +890,7 @@ impl Node { let mut fee_estimator = self.config.make_fee_estimator(); let stacks_epoch = db - .index_handle_at_tip() + .index_conn() .get_stacks_epoch_by_epoch_id(&processed_block.evaluated_epoch) .expect("Could not find a stacks epoch."); if let Some(estimator) = cost_estimator.as_mut() { diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index 4c81867369..2db7a3a090 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -89,11 +89,14 @@ impl RunLoop { let _ = burnchain.sortdb_mut(); // Run the tenure, keep the artifacts - let artifacts_from_1st_tenure = - match first_tenure.run(&burnchain.sortdb_ref().index_handle_at_tip()) { - Some(res) => res, - None => panic!("Error while running 1st tenure"), - }; + let artifacts_from_1st_tenure = match first_tenure.run( + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), + ) { + Some(res) => res, + None => panic!("Error while running 1st tenure"), + }; // Tenures are instantiating their own chainstate, so that nodes can keep a clean chainstate, // while having the option of running multiple tenures concurrently and try different strategies. @@ -136,7 +139,9 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_handle_at_tip(), + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), ); // If the node we're looping on won the sortition, initialize and configure the next tenure @@ -160,7 +165,11 @@ impl RunLoop { &chain_tip, &mut tenure, ); - tenure.run(&burnchain.sortdb_ref().index_handle_at_tip()) + tenure.run( + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), + ) } None => None, }; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 947eb633ee..ea9fe27569 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -5143,7 +5143,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -5423,7 +5423,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sortdb = btc_regtest_controller.sortdb_mut(); for height in 211..tip_info.burn_block_height { - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index f3c48adc86..289d09be64 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -400,7 +400,7 @@ fn disable_pox() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1069,7 +1069,7 @@ fn pox_2_unlock_all() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index d2e2258a1a..3fc3b3d590 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -493,7 +493,7 @@ fn fix_to_pox_contract() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, @@ -1213,7 +1213,7 @@ fn verify_auto_unlock_behavior() { reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); } - let iconn = sortdb.index_handle_at_tip(); + let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate .clarity_eval_read_only( &iconn, diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 236a14d94b..157daf0bde 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3464,14 +3464,20 @@ fn microblock_fork_poison_integration_test() { chainstate .reload_unconfirmed_state( - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), tip_hash, ) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -3722,14 +3728,20 @@ fn microblock_integration_test() { chainstate .reload_unconfirmed_state( - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), tip_hash, ) .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -9141,7 +9153,10 @@ fn use_latest_tip_integration_test() { // Initialize the unconfirmed state. chainstate .reload_unconfirmed_state( - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), tip_hash, ) .unwrap(); @@ -9166,7 +9181,10 @@ fn use_latest_tip_integration_test() { let mblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller.sortdb_ref().index_handle_at_tip(), + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), consensus_hash, stacks_block.clone(), vec_tx, From 74b4a92e221d5360072362893f31f721fb4f4504 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 14:59:18 +0300 Subject: [PATCH 0143/1400] Add the unhappy path cases for `StackAggCommitIndexedAuthCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-aggregation-commit-indexed` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 87 ++++++++++++ ...AggregationCommitIndexedAuthCommand_Err.ts | 133 ++++++++++++++++++ 2 files changed, 220 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index bfe600e594..4bdb6f5da3 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -14,6 +14,7 @@ import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; +import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -581,6 +582,92 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + !(operator.amountToCommit > 0) + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !(operator.lockedAddresses.length > 0) && + !(operator.amountToCommit >= model.stackingMinimum) + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit > 0 + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts new file mode 100644 index 0000000000..92ebfa0d19 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts @@ -0,0 +1,133 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitIndexedAuthCommand_Err + implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitIndexedAuthCommand_Err` to lock uSTX + * for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + // Act + + // Include the authorization and the `stack-aggregation-commit-indexed` + // transactions in a single block. This way we ensure both the authorization + // and the stack-aggregation-commit-indexed transactions are called during + // the same reward cycle, so the authorization currentRewCycle param is + // relevant for the upcoming stack-aggregation-commit-indexed call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr( + Cl.int(this.errorCode), + ); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} From 91f99db0e1b1a8a671f1a54689361d2eb5124b6d Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 15:09:19 +0300 Subject: [PATCH 0144/1400] Remove all files containing `_Err` from command tracking The command run tracking for the unhappy paths was moved inside the commands' check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index e007cbb2ad..1f15e6a56c 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -110,12 +110,7 @@ it("statefully interacts with PoX-4", async () => { const statistics = fs.readdirSync(path.join(__dirname)).filter((file) => file.startsWith("pox_") && file.endsWith(".ts") && file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && - file !== "pox_StackStxAuthCommand_Err.ts" && - file !== "pox_StackStxSigCommand_Err.ts" && - file !== "pox_RevokeDelegateStxCommand_Err.ts" && - file !== "pox_DelegateStxCommand_Err.ts" && - file !== "pox_StackAggregationCommitSigCommand_Err.ts" && - file !== "pox_StackAggregationCommitAuthCommand_Err.ts" + !file.includes("_Err") ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From 48a759aa574f8fd6baab91b650445e0482a3d360 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 16:00:05 +0300 Subject: [PATCH 0145/1400] Add one unhappy path case for `StackAggIncreaseCommand_Err` This commit: - adds one unhappy path case for the `stack-aggregation-increase` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 44 ++++++ ...pox_StackAggregationIncreaseCommand_Err.ts | 143 ++++++++++++++++++ 2 files changed, 187 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 4bdb6f5da3..26da594c60 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -15,6 +15,7 @@ import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationComm import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; +import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -668,6 +669,49 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, ), ), + // StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.wallet.stxAddress)!; + const committedRewCycleIndexesOrFallback = + operator.committedRewCycleIndexes.length > 0 + ? operator.committedRewCycleIndexes + : [-1]; + return fc + .record({ + rewardCycleIndex: fc.constantFrom( + ...committedRewCycleIndexesOrFallback, + ), + }) + .map((cycleIndex) => ({ ...r, ...cycleIndex })); + }) + .map( + (r: { wallet: Wallet; rewardCycleIndex: number; authId: number }) => + new StackAggregationIncreaseCommand_Err( + r.wallet, + r.rewardCycleIndex, + r.authId, + function ( + this: StackAggregationIncreaseCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + if ( + operator.lockedAddresses.length > 0 && + this.rewardCycleIndex >= 0 && + !(operator.amountToCommit > 0) + ) { + model.trackCommandRun( + "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts new file mode 100644 index 0000000000..26fc49eb60 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts @@ -0,0 +1,143 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationIncreaseCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationIncreaseCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly rewardCycleIndex: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationIncreaseCommand_Err` to commit partially + * stacked STX to a PoX address which has already received some STX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param rewardCycleIndex - The cycle index to increase the commit for. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + rewardCycleIndex: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.rewardCycleIndex = rewardCycleIndex; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const existingEntryCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-pox-address-list", + Cl.tuple({ + index: Cl.uint(this.rewardCycleIndex), + "reward-cycle": Cl.uint(currentRewCycle + 1), + }), + ); + + const totalStackedBefore = + cvToJSON(existingEntryCV).value.value["total-ustx"].value; + const maxAmount = committedAmount + Number(totalStackedBefore); + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase, agg-commit or agg-increase. + topic: Pox4SignatureTopic.AggregateIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // Act + const stackAggregationIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-increase", + [ + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (reward-cycle-index uint)) + Cl.uint(this.rewardCycleIndex), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationIncrease.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-increase", + "amount committed", + committedAmount.toString(), + "cycle index", + this.rewardCycleIndex.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-increase for index ${this.rewardCycleIndex}`; + } +} From 70990a4d7bee04ea1448b5ea416f3977a3051d2c Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 20:22:13 +0300 Subject: [PATCH 0146/1400] Format using `deno` according to the other generators --- .../tests/pox-4/err_Commands.ts | 51 +++++++++---------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 26da594c60..4a01ac827a 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -686,32 +686,31 @@ export function ErrCommands( ), }) .map((cycleIndex) => ({ ...r, ...cycleIndex })); - }) - .map( - (r: { wallet: Wallet; rewardCycleIndex: number; authId: number }) => - new StackAggregationIncreaseCommand_Err( - r.wallet, - r.rewardCycleIndex, - r.authId, - function ( - this: StackAggregationIncreaseCommand_Err, - model: Readonly, - ): boolean { - const operator = model.stackers.get(this.operator.stxAddress)!; - if ( - operator.lockedAddresses.length > 0 && - this.rewardCycleIndex >= 0 && - !(operator.amountToCommit > 0) - ) { - model.trackCommandRun( - "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", - ); - return true; - } else return false; - }, - POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, - ), - ), + }).map( + (r: { wallet: Wallet; rewardCycleIndex: number; authId: number }) => + new StackAggregationIncreaseCommand_Err( + r.wallet, + r.rewardCycleIndex, + r.authId, + function ( + this: StackAggregationIncreaseCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + if ( + operator.lockedAddresses.length > 0 && + this.rewardCycleIndex >= 0 && + !(operator.amountToCommit > 0) + ) { + model.trackCommandRun( + "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; From c66e6794b8022e3f1aefcc663af3d7e1699ed375 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 29 May 2024 14:18:18 +0300 Subject: [PATCH 0147/1400] Add unhappy path cases for `DelegateStackStxCommand_Err` This commit: - adds 3 unhappy path cases for the `delegate-stack-stx` PoX-4 method. - adds the command run tracking inside the `check` method. - adds the expected `delegate-stack-stx` PoX-4 errors to the `POX_4_ERRORS` dictionary. - exports the `nextCycleFirstBlock` method from pox_commands, as it is used inside err_Commands. --- .../tests/pox-4/err_Commands.ts | 226 ++++++++++++++++++ .../tests/pox-4/pox_Commands.ts | 2 +- .../pox-4/pox_DelegateStackStxCommand_Err.ts | 105 ++++++++ 3 files changed, 332 insertions(+), 1 deletion(-) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 4a01ac827a..74c0170af5 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -16,12 +16,16 @@ import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCom import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; +import { currentCycleFirstBlock, nextCycleFirstBlock } from "./pox_Commands"; +import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, ERR_STACKING_NO_SUCH_PRINCIPAL: 4, + ERR_STACKING_PERMISSION_DENIED: 9, ERR_STACKING_THRESHOLD_NOT_MET: 11, ERR_STACKING_ALREADY_DELEGATED: 20, + ERR_DELEGATION_TOO_MUCH_LOCKED: 22, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -711,6 +715,228 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), ), + // DelegateStackStxCommand_Err_Delegation_Too_Much_Locked + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: 100_000_000_000_000n, + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function ( + this: DelegateStackStxCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stackerWallet.isStacking && + stackerWallet.hasDelegated && + !(stackerWallet.delegatedMaxAmount >= Number(this.amountUstx)) && + Number(this.amountUstx) <= stackerWallet.ustxBalance && + Number(this.amountUstx) >= model.stackingMinimum && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + ) { + model.trackCommandRun( + "DelegateStackStxCommand_Err_Delegation_Too_Much_Locked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_DELEGATION_TOO_MUCH_LOCKED, + ); + }), + // DelegateStackStxCommand_Err_Stacking_Permission_Denied + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: BigInt( + stackers.get(resultWithUnlockHeight.stacker.stxAddress)! + .delegatedMaxAmount, + ), + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function ( + this: DelegateStackStxCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stackerWallet.isStacking && + stackerWallet.hasDelegated && + stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) && + Number(this.amountUstx) <= stackerWallet.ustxBalance && + Number(this.amountUstx) >= model.stackingMinimum && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + ) { + model.trackCommandRun( + "DelegateStackStxCommand_Err_Stacking_Permission_Denied_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ); + }), + // DelegateStackStxCommand_Err_Stacking_Permission_Denied_2 + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: 100_000_000_000_000n, + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function ( + this: DelegateStackStxCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stackerWallet.isStacking && + !(stackerWallet.hasDelegated) && + !(stackerWallet.delegatedMaxAmount >= Number(this.amountUstx)) && + Number(this.amountUstx) <= stackerWallet.ustxBalance && + Number(this.amountUstx) >= model.stackingMinimum && + !(operatorWallet.poolMembers.includes(this.stacker.stxAddress)) && + !(this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) + ) { + model.trackCommandRun( + "DelegateStackStxCommand_Err_Stacking_Permission_Denied_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ); + }), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts index d44ef23b22..f937efa16f 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -476,7 +476,7 @@ export const currentCycleFirstBlock = (network: Simnet) => ).result, )); -const nextCycleFirstBlock = (network: Simnet) => +export const nextCycleFirstBlock = (network: Simnet) => Number(cvToValue( network.callReadOnlyFn( "ST000000000000000000002AMW42H.pox-4", diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts new file mode 100644 index 0000000000..b4e5a491dd --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts @@ -0,0 +1,105 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, ClarityValue, cvToValue } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStackStxCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStackStxCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly period: number; + readonly amountUstx: bigint; + readonly unlockBurnHt: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStackStxCommand` to lock uSTX as a Pool Operator + * on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the STacker's wallet. + * @param period - Number of reward cycles to lock uSTX. + * @param amountUstx - The uSTX amount stacked by the Operator on behalf + * of the Stacker. + * @param unlockBurnHt - The burn height at which the uSTX is unlocked. + */ + constructor( + operator: Wallet, + stacker: Wallet, + period: number, + amountUstx: bigint, + unlockBurnHt: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.period = period; + this.amountUstx = amountUstx; + this.unlockBurnHt = unlockBurnHt; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // Act + const delegateStackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-stx", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (amount-ustx uint) + Cl.uint(this.amountUstx), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + ], + this.operator.stxAddress, + ); + + // Assert + expect(delegateStackStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label} Ӿ ${this.stacker.label}`, + "delegate-stack-stx", + "lock-amount", + this.amountUstx.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-stx stacker ${this.stacker.label} period ${this.period}`; + } +} From 4804c17908b34b6c9be1dc4d6a50d17967da9d65 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 15 May 2024 21:56:51 -0400 Subject: [PATCH 0148/1400] test: Check that Nakamoto miner produces blocks using `nakamoto_attempt_time_ms` --- docs/mining.md | 2 +- testnet/stacks-node/src/config.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 301 +++++++++++++++++- 3 files changed, 302 insertions(+), 3 deletions(-) diff --git a/docs/mining.md b/docs/mining.md index 891358af03..e113f12d93 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -26,7 +26,7 @@ subsequent_attempt_time_ms = 60000 # Time to spend mining a microblock, in milliseconds. microblock_attempt_time_ms = 30000 # Time to spend mining a Nakamoto block, in milliseconds. -nakamoto_attempt_time_ms = 10000 +nakamoto_attempt_time_ms = 20000 ``` You can verify that your node is operating as a miner by checking its log output diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index ad02341343..c101da090d 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2320,7 +2320,7 @@ impl Default for MinerConfig { first_attempt_time_ms: 10, subsequent_attempt_time_ms: 120_000, microblock_attempt_time_ms: 30_000, - nakamoto_attempt_time_ms: 10_000, + nakamoto_attempt_time_ms: 20_000, probability_pick_no_estimate_tx: 25, block_reward_recipient: None, segwit: false, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 55eb6753bf..c9bf52aa43 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -45,7 +45,10 @@ use stacks::chainstate::stacks::boot::{ }; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; -use stacks::chainstate::stacks::{StacksTransaction, ThresholdSignature, TransactionPayload}; +use stacks::chainstate::stacks::{ + StacksTransaction, ThresholdSignature, TransactionPayload, MAX_BLOCK_LEN, +}; +use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -3895,3 +3898,299 @@ fn check_block_heights() { run_loop_thread.join().unwrap(); } + +/// Test config parameter `nakamoto_attempt_time_ms` +#[test] +#[ignore] +fn test_nakamoto_attempt_time() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + naka_conf.connection_options.block_proposal_token = Some(password.clone()); + // Use fixed timing params for this test + let nakamoto_attempt_time_ms = 20_000; + naka_conf.miner.nakamoto_attempt_time_ms = nakamoto_attempt_time_ms; + let stacker_sk = setup_stacker(&mut naka_conf); + + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 1_000_000_000, + ); + + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100_000, + ); + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + // We'll need a lot of accounts for one subtest to avoid MAXIMUM_MEMPOOL_TX_CHAINING + struct Account { + nonce: u64, + privk: Secp256k1PrivateKey, + _address: StacksAddress, + } + let num_accounts = 1_000; + let init_account_balance = 1_000_000_000; + let account_keys = add_initial_balances(&mut naka_conf, num_accounts, init_account_balance); + let mut account = account_keys + .into_iter() + .map(|privk| { + let _address = tests::to_addr(&privk); + Account { + nonce: 0, + privk, + _address, + } + }) + .collect::>(); + + // only subscribe to the block proposal events + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::BlockProposal], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + Some(&signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let _block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // Mine 3 nakamoto tenures + for _ in 0..3 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + } + + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + // ----- Setup boilerplate finished, test block proposal API endpoint ----- + + let mut sender_nonce = 0; + let tenure_count = 3; + let inter_blocks_per_tenure = 10; + + // Subtest 1 + // Mine nakamoto tenures with a few transactions + // Blocks should be produced at least every 20 seconds + for _ in 0..tenure_count { + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for _ in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let txs_per_block = 3; + let tx_fee = 500; + let amount = 500; + + for _ in 0..txs_per_block { + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, tx_fee, &recipient, amount); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + } + + // Sleep a bit longer than what our max block time should be + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms + 100)); + + // Miner should have made a new block by now + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + assert!(blocks_processed > blocks_processed_before); + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // Subtest 2 + // Confirm that no blocks are mined if there are no transactions + for _ in 0..2 { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let info_before = get_chain_info_result(&naka_conf).unwrap(); + + // Wait long enough for a block to be mined + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2)); + + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + + // Assert that no block was mined while waiting + assert_eq!(blocks_processed, blocks_processed_before); + assert_eq!(info.stacks_tip, info_before.stacks_tip); + assert_eq!(info.stacks_tip_height, info_before.stacks_tip_height); + } + + // Subtest 3 + // Add more than `nakamoto_attempt_time_ms` worth of transactions into mempool + // Multiple blocks should be mined + for _ in 0..tenure_count { + let info_before = get_chain_info_result(&naka_conf).unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let tx_limit = 10000; + let tx_fee = 500; + let amount = 500; + let mut tx_total_size = 0; + let mut tx_count = 0; + let mut acct_idx = 0; + + // Submit max # of txs from each account to reach tx_limit + 'submit_txs: loop { + let acct = &mut account[acct_idx]; + for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { + let transfer_tx = + make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); + submit_tx(&http_origin, &transfer_tx); + tx_total_size += transfer_tx.len(); + tx_count += 1; + acct.nonce += 1; + if tx_count >= tx_limit { + break 'submit_txs; + } + } + acct_idx += 1; + } + + // Make sure that these transactions *could* fit into a single block + assert!(tx_total_size < MAX_BLOCK_LEN as usize); + + // Wait long enough for 2 blocks to be made + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2 + 100)); + + // Check that 2 blocks were made + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let blocks_mined = blocks_processed - blocks_processed_before; + assert!(blocks_mined > 2); + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, info_before.stacks_tip); + assert_ne!(info.stacks_tip_height, info_before.stacks_tip_height); + } + + // ----- Clean up ----- + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From b379a0a55e8c3f47b46cf8dadcf67ae8f8196ed7 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 29 May 2024 13:05:19 -0400 Subject: [PATCH 0149/1400] test: Add `nakamoto_attempt_time` to `bitcoin-tests.yml` --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 87fe5a8f09..b0262313c2 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -82,6 +82,7 @@ jobs: - tests::nakamoto_integrations::vote_for_aggregate_key_burn_op - tests::nakamoto_integrations::follower_bootup - tests::nakamoto_integrations::forked_tenure_is_ignored + - tests::nakamoto_integrations::nakamoto_attempt_time - tests::signer::v0::block_proposal_rejection - tests::signer::v1::dkg - tests::signer::v1::sign_request_rejected diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c9bf52aa43..e76387868f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3902,7 +3902,7 @@ fn check_block_heights() { /// Test config parameter `nakamoto_attempt_time_ms` #[test] #[ignore] -fn test_nakamoto_attempt_time() { +fn nakamoto_attempt_time() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } From fa482af24f123efe598d6c6407f15028a28026f4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 29 May 2024 13:30:23 -0400 Subject: [PATCH 0150/1400] fix: `index_handle_at_block` for Nakamoto blocks --- stackslib/src/chainstate/burn/db/sortdb.rs | 14 ++++++---- .../src/chainstate/nakamoto/tests/mod.rs | 5 +++- .../src/tests/neon_integrations.rs | 27 ++++++++++--------- 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3fa528995a..bba8257cbe 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -67,7 +67,7 @@ use crate::chainstate::burn::{ use crate::chainstate::coordinator::{ Error as CoordinatorError, PoxAnchorBlockStatus, RewardCycleInfo, SortitionDBMigrator, }; -use crate::chainstate::nakamoto::NakamotoBlockHeader; +use crate::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use crate::chainstate::stacks::boot::PoxStartCycleInfo; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; @@ -2658,12 +2658,16 @@ impl SortitionDB { chainstate: &StacksChainState, stacks_block_id: &StacksBlockId, ) -> Result, db_error> { - let (consensus_hash, bhh) = match chainstate.get_block_header_hashes(stacks_block_id) { + let header = match NakamotoChainState::get_block_header(chainstate.db(), stacks_block_id) { Ok(Some(x)) => x, - _ => return Err(db_error::NotFoundError), + x => { + debug!("Failed to get block header: {:?}", x); + return Err(db_error::NotFoundError); + } }; - let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &consensus_hash)? - .ok_or(db_error::NotFoundError)?; + let snapshot = + SortitionDB::get_block_snapshot_consensus(&self.conn(), &header.consensus_hash)? + .ok_or(db_error::NotFoundError)?; Ok(self.index_handle(&snapshot.sortition_id)) } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 3cd7d2dde5..4a1b0ad714 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -126,9 +126,12 @@ pub fn get_account( &tip ); + let snapshot = SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &tip.consensus_hash) + .unwrap() + .unwrap(); chainstate .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&snapshot.sortition_id), &tip.index_block_hash(), |clarity_conn| { StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 157daf0bde..b81e82b57c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3471,13 +3471,14 @@ fn microblock_fork_poison_integration_test() { tip_hash, ) .unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), + &iconn, consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -3735,13 +3736,14 @@ fn microblock_integration_test() { tip_hash, ) .unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); let first_microblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), + &iconn, consensus_hash, stacks_block.clone(), vec![unconfirmed_tx], @@ -9178,13 +9180,14 @@ fn use_latest_tip_integration_test() { let vec_tx = vec![tx_1, tx_2]; let privk = find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024).unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); let mblock = make_microblock( &privk, &mut chainstate, - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), + &iconn, consensus_hash, stacks_block.clone(), vec_tx, From fdcaea8b91972ad721d9d9e0579ac89fb531ddc0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 May 2024 13:11:02 -0500 Subject: [PATCH 0151/1400] first round of addressing PR review --- stacks-common/src/util/macros.rs | 5 + stacks-signer/src/chainstate.rs | 135 +++++++++--------- stacks-signer/src/client/stacks_client.rs | 44 +++++- stacks-signer/src/signerdb.rs | 7 + stackslib/src/chainstate/burn/db/sortdb.rs | 22 +++ stackslib/src/chainstate/stacks/mod.rs | 7 + .../src/net/api/get_tenures_fork_info.rs | 104 ++++---------- stackslib/src/net/api/getsortition.rs | 98 ++----------- stackslib/src/net/api/mod.rs | 92 +++++++++++- .../net/api/tests/get_tenures_fork_info.rs | 15 ++ stackslib/src/net/api/tests/getsortition.rs | 45 +++--- stackslib/src/net/api/tests/mod.rs | 98 ++++++++++++- .../src/tests/nakamoto_integrations.rs | 31 ++-- 13 files changed, 426 insertions(+), 277 deletions(-) diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 845b3b452b..5009e984cb 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -574,6 +574,11 @@ macro_rules! impl_byte_array_newtype { to_hex(&self.0) } } + impl std::fmt::LowerHex for $thing { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_hex()) + } + } impl std::fmt::Display for $thing { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}", self.to_hex()) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index ee03b6df85..540d714bc8 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; @@ -10,7 +25,7 @@ use crate::client::{ClientError, StacksClient}; use crate::signerdb::SignerDb; /// Captures this signer's current view of a sortition's miner. -#[derive(PartialEq, Eq)] +#[derive(PartialEq, Eq, Debug)] pub enum SortitionMinerStatus { /// The signer thinks this sortition's miner is invalid, and hasn't signed any blocks for them. InvalidatedBeforeFirstBlock, @@ -21,7 +36,13 @@ pub enum SortitionMinerStatus { } /// Captures the Stacks sortition related state for -/// a successful sortition +/// a successful sortition. +/// +/// Sortition state in this struct is +/// is indexed using consensus hashes, and fetched from a single "get latest" RPC call +/// to the stacks node. This ensures that the state in this struct is consistent with itself +/// (i.e., it does not span a bitcoin fork) and up to date. +#[derive(Debug)] pub struct SortitionState { /// The miner's pub key hash pub miner_pkh: Hash160, @@ -33,22 +54,20 @@ pub struct SortitionState { pub parent_tenure_id: ConsensusHash, /// this sortition's consensus hash pub consensus_hash: ConsensusHash, - /// did the miner in this sortition do something - /// to become invalidated as a miner? - pub invalidated: SortitionMinerStatus, + /// what is this signer's view of the this sortition's miner? did they misbehave? + pub miner_status: SortitionMinerStatus, } /// The signer's current view of the stacks chain's sortition /// state +#[derive(Debug)] pub struct SortitionsView { /// the prior successful sortition (this corresponds to the "prior" miner slot) pub last_sortition: Option, /// the current successful sortition (this corresponds to the "current" miner slot) - pub cur_sortition: Option, - /// is the view fresh? - pub fresh: bool, - /// the hash at which the sortitions view was last fetched - pub latest_consensus_hash: Option, + pub cur_sortition: SortitionState, + /// the hash at which the sortitions view was fetched + pub latest_consensus_hash: ConsensusHash, } impl TryFrom for SortitionState { @@ -66,7 +85,7 @@ impl TryFrom for SortitionState { parent_tenure_id: value .stacks_parent_ch .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, - invalidated: SortitionMinerStatus::Valid, + miner_status: SortitionMinerStatus::Valid, }) } } @@ -79,44 +98,27 @@ enum ProposedBy<'a> { impl<'a> ProposedBy<'a> { pub fn state(&self) -> &SortitionState { match self { - ProposedBy::LastSortition(ref x) => x, - ProposedBy::CurrentSortition(ref x) => x, + ProposedBy::LastSortition(x) => x, + ProposedBy::CurrentSortition(x) => x, } } } impl SortitionsView { - /// Initialize an empty sortitions view struct -- it will refresh() before - /// checking any proposals. - pub fn new() -> Self { - Self { - last_sortition: None, - cur_sortition: None, - fresh: false, - latest_consensus_hash: None, - } - } - /// Apply checks from the SortitionsView on the block proposal. - /// pub fn check_proposal( - &mut self, + &self, client: &StacksClient, signer_db: &SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, ) -> Result { - self.refresh_view(client)?; let block_pkh = Hash160::from_data(&block_pk.to_bytes_compressed()); - let Some(proposed_by) = self - .cur_sortition - .as_ref() - .and_then(|cur_sortition| { - if block.header.consensus_hash == cur_sortition.consensus_hash { - Some(ProposedBy::CurrentSortition(cur_sortition)) - } else { - None - } + let Some(proposed_by) = + (if block.header.consensus_hash == self.cur_sortition.consensus_hash { + Some(ProposedBy::CurrentSortition(&self.cur_sortition)) + } else { + None }) .or_else(|| { self.last_sortition.as_ref().and_then(|last_sortition| { @@ -132,7 +134,7 @@ impl SortitionsView { "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "current_sortition_consensus_hash" => ?self.cur_sortition.as_ref().map(|x| x.consensus_hash), + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); return Ok(false); @@ -153,7 +155,7 @@ impl SortitionsView { // check that this miner is the most recent sortition match proposed_by { ProposedBy::CurrentSortition(sortition) => { - if sortition.invalidated != SortitionMinerStatus::Valid { + if sortition.miner_status != SortitionMinerStatus::Valid { warn!( "Current miner behaved improperly, this signer views the miner as invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, @@ -163,19 +165,17 @@ impl SortitionsView { } } ProposedBy::LastSortition(_last_sortition) => { - if let Some(cur_sortition) = &self.cur_sortition { - // should only consider blocks from the last sortition if the new sortition was invalidated - // before we signed their first block. - if cur_sortition.invalidated - != SortitionMinerStatus::InvalidatedBeforeFirstBlock - { - warn!( - "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; - "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - ); - return Ok(false); - } + // should only consider blocks from the last sortition if the new sortition was invalidated + // before we signed their first block. + if self.cur_sortition.miner_status + != SortitionMinerStatus::InvalidatedBeforeFirstBlock + { + warn!( + "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + ); + return Ok(false); } } }; @@ -258,7 +258,7 @@ impl SortitionsView { &sortition_state.parent_tenure_id, &sortition_state.prior_sortition, )?; - if tenures_reorged.len() == 0 { + if tenures_reorged.is_empty() { warn!("Miner is not building off of most recent tenure, but stacks node was unable to return information about the relevant sortitions. Marking miner invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), @@ -281,7 +281,7 @@ impl SortitionsView { } } - return Ok(true); + Ok(true) } fn check_tenure_change_block_confirmation( @@ -314,7 +314,7 @@ impl SortitionsView { return Ok(true); }; if block.header.chain_length > last_known_block.block.header.chain_length { - return Ok(true); + Ok(true) } else { warn!( "Miner block proposal's tenure change transaction does not confirm as many blocks as we expect in the parent tenure"; @@ -323,23 +323,20 @@ impl SortitionsView { "proposed_chain_length" => block.header.chain_length, "expected_at_least" => last_known_block.block.header.chain_length + 1, ); - return Ok(false); + Ok(false) } } /// Has the current tenure lasted long enough to extend the block limit? pub fn tenure_time_passed_block_lim() -> Result { // TODO - return Ok(false); + Ok(false) } - /// If necessary, fetch a new view of the recent sortitions - pub fn refresh_view(&mut self, client: &StacksClient) -> Result<(), ClientError> { - if self.fresh { - return Ok(()); - } + /// Fetch a new view of the recent sortitions + pub fn fetch_view(client: &StacksClient) -> Result { let latest_state = client.get_latest_sortition()?; - let latest_ch = latest_state.consensus_hash.clone(); + let latest_ch = latest_state.consensus_hash; // figure out what cur_sortition will be set to. // if the latest sortition wasn't successful, query the last one that was. @@ -361,15 +358,19 @@ impl SortitionsView { .map(|ch| client.get_sortition(ch)) .transpose()?; - self.cur_sortition = Some(SortitionState::try_from(latest_success)?); - self.last_sortition = last_sortition + let cur_sortition = SortitionState::try_from(latest_success)?; + let last_sortition = last_sortition .map(SortitionState::try_from) .transpose() .ok() .flatten(); - self.fresh = true; - self.latest_consensus_hash = Some(latest_ch); - Ok(()) + let latest_consensus_hash = latest_ch; + + Ok(Self { + cur_sortition, + last_sortition, + latest_consensus_hash, + }) } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 38a8f78d1e..7e082558f9 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -42,14 +42,14 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use reqwest::header::AUTHORIZATION; use serde_json::json; -use slog::{slog_debug, slog_warn}; +use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::types::StacksEpochId; -use stacks_common::{debug, warn}; +use stacks_common::{debug, info, warn}; use wsts::curve::point::{Compressed, Point}; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -369,6 +369,45 @@ impl StacksClient { &self, chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, + ) -> Result, ClientError> { + let mut tenures = self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; + if tenures.is_empty() { + return Ok(tenures); + } + while tenures.last().map(|x| &x.consensus_hash) != Some(chosen_parent) { + let new_start = tenures.last().ok_or_else(|| { + ClientError::InvalidResponse( + "Should have tenure data in forking info response".into(), + ) + })?; + let mut next_results = + self.get_tenure_forking_info_step(chosen_parent, &new_start.consensus_hash)?; + if next_results.is_empty() { + return Err(ClientError::InvalidResponse( + "Could not fetch forking info all the way back to the requested chosen_parent" + .into(), + )); + } + // SAFETY check: next_results isn't empty, because of the above check. otherwise, remove(0) could panic. + next_results.remove(0); + let info_log: Vec<_> = tenures.iter().map(|t| t.consensus_hash).collect(); + info!("Current tenures = {:?}", info_log); + if next_results.is_empty() { + return Err(ClientError::InvalidResponse( + "Could not fetch forking info all the way back to the requested chosen_parent" + .into(), + )); + } + tenures.extend(next_results.into_iter()); + } + + Ok(tenures) + } + + fn get_tenure_forking_info_step( + &self, + chosen_parent: &ConsensusHash, + last_sortition: &ConsensusHash, ) -> Result, ClientError> { let send_request = || { self.stacks_node_client @@ -381,6 +420,7 @@ impl StacksClient { return Err(ClientError::RequestFailure(response.status())); } let tenures = response.json()?; + Ok(tenures) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5ef24d1c87..7450476397 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -96,6 +96,11 @@ CREATE TABLE IF NOT EXISTS blocks ( PRIMARY KEY (reward_cycle, signer_signature_hash) )"; +const CREATE_INDEXES: &str = " +CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); +CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); +"; + const CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -125,6 +130,8 @@ impl SignerDb { self.db.execute(CREATE_SIGNER_STATE_TABLE, NO_PARAMS)?; } + self.db.execute_batch(CREATE_INDEXES)?; + Ok(()) } diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..08f78ec6cf 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2226,6 +2226,28 @@ impl<'a> SortitionHandleConn<'a> { }) } + /// Get the latest block snapshot on this fork where a sortition occured. + pub fn get_last_snapshot_with_sortition_from_tip(&self) -> Result { + let ancestor_hash = + match self.get_indexed(&self.context.chain_tip, &db_keys::last_sortition())? { + Some(hex_str) => BurnchainHeaderHash::from_hex(&hex_str).unwrap_or_else(|_| { + panic!( + "FATAL: corrupt database: failed to parse {} into a hex string", + &hex_str + ) + }), + None => { + // no prior sortitions, so get the first + return self.get_first_block_snapshot(); + } + }; + + self.get_block_snapshot(&ancestor_hash).map(|snapshot_opt| { + snapshot_opt + .unwrap_or_else(|| panic!("FATAL: corrupt index: no snapshot {}", ancestor_hash)) + }) + } + pub fn get_leader_key_at( &self, key_block_height: u64, diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index f9ad4fff3f..fa5572e536 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -99,6 +99,8 @@ pub enum Error { StacksTransactionSkipped(String), PostConditionFailed(String), NoSuchBlockError, + /// The supplied Sortition IDs, consensus hashes, or stacks blocks are not in the same fork. + NotInSameFork, InvalidChainstateDB, BlockTooBigError, TransactionTooBigError, @@ -224,6 +226,9 @@ impl fmt::Display for Error { Error::NoRegisteredSigners(reward_cycle) => { write!(f, "No registered signers for reward cycle {reward_cycle}") } + Error::NotInSameFork => { + write!(f, "The supplied block identifiers are not in the same fork") + } } } } @@ -268,6 +273,7 @@ impl error::Error for Error { Error::InvalidChildOfNakomotoBlock => None, Error::ExpectedTenureChange => None, Error::NoRegisteredSigners(_) => None, + Error::NotInSameFork => None, } } } @@ -312,6 +318,7 @@ impl Error { Error::InvalidChildOfNakomotoBlock => "InvalidChildOfNakomotoBlock", Error::ExpectedTenureChange => "ExpectedTenureChange", Error::NoRegisteredSigners(_) => "NoRegisteredSigners", + Error::NotInSameFork => "NotInSameFork", } } diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 13ed91810e..da2b1cd3d9 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -34,6 +34,7 @@ use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoSta use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::api::{prefix_hex, prefix_opt_hex}; use crate::net::http::{ parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, @@ -46,7 +47,7 @@ use crate::net::httpcore::{ use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; use crate::util_lib::db::{DBConn, Error as DBError}; -pub static RPC_TENURE_FORKING_INFO_PATH: &str = "/v3/tenures_fork_info"; +pub static RPC_TENURE_FORKING_INFO_PATH: &str = "/v3/tenures/fork_info"; static DEPTH_LIMIT: usize = 10; @@ -81,81 +82,6 @@ pub struct TenureForkingInfo { pub first_block_mined: Option, } -mod prefix_opt_hex { - pub fn serialize( - val: &Option, - s: S, - ) -> Result { - match val { - Some(ref some_val) => { - let val_str = format!("0x{some_val}"); - s.serialize_some(&val_str) - } - None => s.serialize_none(), - } - } - - pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( - d: D, - ) -> Result, D::Error> { - let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; - let Some(inst_str) = opt_inst_str else { - return Ok(None); - }; - let Some(hex_str) = inst_str.get(2..) else { - return Err(serde::de::Error::invalid_length( - inst_str.len(), - &"at least length 2 string", - )); - }; - let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; - Ok(Some(val)) - } -} - -mod prefix_hex { - pub fn serialize( - val: &T, - s: S, - ) -> Result { - s.serialize_str(&format!("0x{val}")) - } - - pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( - d: D, - ) -> Result { - let inst_str: String = serde::Deserialize::deserialize(d)?; - let Some(hex_str) = inst_str.get(2..) else { - return Err(serde::de::Error::invalid_length( - inst_str.len(), - &"at least length 2 string", - )); - }; - T::try_from(&hex_str).map_err(serde::de::Error::custom) - } -} - -trait HexDeser: Sized { - fn try_from(hex: &str) -> Result; -} - -macro_rules! impl_hex_deser { - ($thing:ident) => { - impl HexDeser for $thing { - fn try_from(hex: &str) -> Result { - $thing::from_hex(hex) - } - } - }; -} - -impl_hex_deser!(BurnchainHeaderHash); -impl_hex_deser!(StacksBlockId); -impl_hex_deser!(SortitionId); -impl_hex_deser!(ConsensusHash); -impl_hex_deser!(BlockHeaderHash); -impl_hex_deser!(Hash160); - #[derive(Clone, Default)] pub struct GetTenuresForkInfo { pub start_sortition: Option, @@ -289,6 +215,11 @@ impl RPCRequestHandler for GetTenuresForkInfo { .start_sortition .clone() .ok_or_else(|| ChainError::NoSuchBlockError)?; + let recurse_end_snapshot = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &recurse_end)? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + let height_bound = recurse_end_snapshot.block_height; + let mut results = vec![]; let mut cursor = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &start_from)? .ok_or_else(|| ChainError::NoSuchBlockError)?; @@ -299,7 +230,16 @@ impl RPCRequestHandler for GetTenuresForkInfo { let mut depth = 0; while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { depth += 1; - cursor = handle.get_last_snapshot_with_sortition(cursor.block_height)?; + info!("Handling fork info request"; + "cursor.consensus_hash" => %cursor.consensus_hash, + "cursor.block_height" => cursor.block_height, + "recurse_end" => %recurse_end, + "height_bound" => height_bound + ); + if height_bound >= cursor.block_height { + return Err(ChainError::NotInSameFork); + } + cursor = handle.get_last_snapshot_with_sortition(cursor.block_height - 1)?; results.push(TenureForkingInfo::from_snapshot( &cursor, sortdb, chainstate, )?); @@ -310,6 +250,16 @@ impl RPCRequestHandler for GetTenuresForkInfo { let tenures = match result { Ok(tenures) => tenures, + Err(ChainError::NotInSameFork) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpBadRequest::new_json(serde_json::json!( + "Supplied start and end sortitions are not in the same sortition fork" + )), + ) + .try_into_contents() + .map_err(NetError::from); + } Err(ChainError::NoSuchBlockError) => { return StacksHttpResponse::new_error( &preamble, diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index a4fba89fb5..ed084a084d 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -32,6 +32,7 @@ use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoSta use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::api::{prefix_hex, prefix_opt_hex}; use crate::net::http::{ parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, @@ -53,6 +54,7 @@ pub enum QuerySpecifier { } pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortition"; +static PATH_REGEX: &str = "^/v3/sortition(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; /// Struct for sortition information returned via the GetSortition API call #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -98,90 +100,16 @@ pub struct SortitionInfo { pub committed_block_hash: Option, } -mod prefix_opt_hex { - pub fn serialize( - val: &Option, - s: S, - ) -> Result { - match val { - Some(ref some_val) => { - let val_str = format!("0x{some_val}"); - s.serialize_some(&val_str) - } - None => s.serialize_none(), - } - } - - pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( - d: D, - ) -> Result, D::Error> { - let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; - let Some(inst_str) = opt_inst_str else { - return Ok(None); - }; - let Some(hex_str) = inst_str.get(2..) else { - return Err(serde::de::Error::invalid_length( - inst_str.len(), - &"at least length 2 string", - )); - }; - let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; - Ok(Some(val)) - } -} - -mod prefix_hex { - pub fn serialize( - val: &T, - s: S, - ) -> Result { - s.serialize_str(&format!("0x{val}")) - } - - pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( - d: D, - ) -> Result { - let inst_str: String = serde::Deserialize::deserialize(d)?; - let Some(hex_str) = inst_str.get(2..) else { - return Err(serde::de::Error::invalid_length( - inst_str.len(), - &"at least length 2 string", - )); - }; - T::try_from(&hex_str).map_err(serde::de::Error::custom) - } -} - -trait HexDeser: Sized { - fn try_from(hex: &str) -> Result; -} - -macro_rules! impl_hex_deser { - ($thing:ident) => { - impl HexDeser for $thing { - fn try_from(hex: &str) -> Result { - $thing::from_hex(hex) - } - } - }; -} - -impl_hex_deser!(BurnchainHeaderHash); -impl_hex_deser!(SortitionId); -impl_hex_deser!(ConsensusHash); -impl_hex_deser!(BlockHeaderHash); -impl_hex_deser!(Hash160); - -impl TryFrom<(&String, &String)> for QuerySpecifier { +impl TryFrom<(&str, &str)> for QuerySpecifier { type Error = Error; - fn try_from(value: (&String, &String)) -> Result { + fn try_from(value: (&str, &str)) -> Result { let hex_str = if value.1.starts_with("0x") { &value.1[2..] } else { - value.1.as_str() + value.1 }; - match value.0.as_str() { + match value.0 { "consensus" => Ok(Self::ConsensusHash( ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, )), @@ -219,7 +147,7 @@ impl HttpRequest for GetSortitionHandler { } fn path_regex(&self) -> Regex { - Regex::new(&format!("^{RPC_SORTITION_INFO_PATH}$")).unwrap() + Regex::new(PATH_REGEX).unwrap() } /// Try to decode this request. @@ -227,7 +155,7 @@ impl HttpRequest for GetSortitionHandler { fn try_parse_request( &mut self, preamble: &HttpRequestPreamble, - _captures: &Captures, + captures: &Captures, query: Option<&str>, _body: &[u8], ) -> Result { @@ -238,14 +166,10 @@ impl HttpRequest for GetSortitionHandler { } let req_contents = HttpRequestContents::new().query_string(query); - if req_contents.get_query_args().len() > 1 { - return Err(Error::DecodeError( - "May only supply up to one query argument".into(), - )); - } self.query = QuerySpecifier::Latest; - for (key, value) in req_contents.get_query_args().iter() { - self.query = QuerySpecifier::try_from((key, value))?; + eprintln!("{captures:?}"); + if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { + self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; } Ok(req_contents) diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 58425b4955..34fa1ec4c3 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -16,7 +16,11 @@ use clarity::vm::costs::ExecutionCost; use stacks_common::codec::read_next; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, +}; +use stacks_common::util::hash::Hash160; +use stacks_common::util::HexError; use crate::burnchains::Txid; use crate::chainstate::stacks::{StacksMicroblock, StacksTransaction}; @@ -111,8 +115,10 @@ impl StacksHttp { getstackerdbmetadata::RPCGetStackerDBMetadataRequestHandler::new(), ); self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); + self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); self.register_rpc_endpoint(gettenure::RPCNakamotoTenureRequestHandler::new()); self.register_rpc_endpoint(gettenureinfo::RPCNakamotoTenureInfoRequestHandler::new()); + self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), ); @@ -128,9 +134,6 @@ impl StacksHttp { self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); self.register_rpc_endpoint(poststackerdbchunk::RPCPostStackerDBChunkRequestHandler::new()); self.register_rpc_endpoint(posttransaction::RPCPostTransactionRequestHandler::new()); - self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); - self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); - self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); } } @@ -143,3 +146,84 @@ impl From for Error { } } } + +/// This module serde encodes and decodes optional byte fields in RPC +/// responses as Some(String) where the String is a `0x` prefixed +/// hex string. +pub mod prefix_opt_hex { + pub fn serialize( + val: &Option, + s: S, + ) -> Result { + match val { + Some(ref some_val) => { + let val_str = format!("0x{some_val:x}"); + s.serialize_some(&val_str) + } + None => s.serialize_none(), + } + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result, D::Error> { + let opt_inst_str: Option = serde::Deserialize::deserialize(d)?; + let Some(inst_str) = opt_inst_str else { + return Ok(None); + }; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + let val = T::try_from(&hex_str).map_err(serde::de::Error::custom)?; + Ok(Some(val)) + } +} + +/// This module serde encodes and decodes byte fields in RPC +/// responses as a String where the String is a `0x` prefixed +/// hex string. +pub mod prefix_hex { + pub fn serialize( + val: &T, + s: S, + ) -> Result { + s.serialize_str(&format!("0x{val:x}")) + } + + pub fn deserialize<'de, D: serde::Deserializer<'de>, T: super::HexDeser>( + d: D, + ) -> Result { + let inst_str: String = serde::Deserialize::deserialize(d)?; + let Some(hex_str) = inst_str.get(2..) else { + return Err(serde::de::Error::invalid_length( + inst_str.len(), + &"at least length 2 string", + )); + }; + T::try_from(&hex_str).map_err(serde::de::Error::custom) + } +} + +pub trait HexDeser: Sized { + fn try_from(hex: &str) -> Result; +} + +macro_rules! impl_hex_deser { + ($thing:ident) => { + impl HexDeser for $thing { + fn try_from(hex: &str) -> Result { + $thing::from_hex(hex) + } + } + }; +} + +impl_hex_deser!(BurnchainHeaderHash); +impl_hex_deser!(StacksBlockId); +impl_hex_deser!(SortitionId); +impl_hex_deser!(ConsensusHash); +impl_hex_deser!(BlockHeaderHash); +impl_hex_deser!(Hash160); diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs index 6c9e552759..88e3d875ff 100644 --- a/stackslib/src/net/api/tests/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::BTreeMap; use std::fmt::Display; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index 40cfaf53cf..d48bc54a3a 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::BTreeMap; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -32,13 +47,13 @@ fn test_parse_request() { let tests = vec![ (make_preamble(""), Ok(QuerySpecifier::Latest)), ( - make_preamble("?consensus=deadbeef00deadbeef01deadbeef02deadbeef03"), + make_preamble("/consensus/deadbeef00deadbeef01deadbeef02deadbeef03"), Ok(QuerySpecifier::ConsensusHash( ConsensusHash::from_hex("deadbeef00deadbeef01deadbeef02deadbeef03").unwrap(), )), ), ( - make_preamble("?burn=00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"), + make_preamble("/burn/00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"), Ok(QuerySpecifier::BurnchainHeaderHash( BurnchainHeaderHash::from_hex( "00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff", @@ -47,41 +62,39 @@ fn test_parse_request() { )), ), ( - make_preamble("?burn_height=100"), + make_preamble("/burn_height/100"), Ok(QuerySpecifier::BlockHeight(100)), ), ( - make_preamble("?burn_height=a1be"), - Err(HttpError::DecodeError( - "invalid digit found in string".into(), - )), + make_preamble("/burn_height/a1be"), + Err(HttpError::DecodeError("invalid digit found in string".into()).into()), ), ( - make_preamble("?burn=a1be0000"), - Err(HttpError::DecodeError("bad length 8 for hex string".into())), + make_preamble("/burn/a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into()).into()), ), ( - make_preamble("?consensus=a1be0000"), - Err(HttpError::DecodeError("bad length 8 for hex string".into())), + make_preamble("/consensus/a1be0000"), + Err(HttpError::DecodeError("bad length 8 for hex string".into()).into()), ), ( - make_preamble("?burn_height=20&consensus=deadbeef00deadbeef01deadbeef02deadbeef03"), - Err(HttpError::DecodeError( - "May only supply up to one query argument".into(), - )), + make_preamble("/burn_height/20/consensus/deadbeef00deadbeef01deadbeef02deadbeef03"), + Err(NetError::NotFoundError), ), ]; for (inp, expected_result) in tests.into_iter() { handler.restart(); let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + eprintln!("{}", &inp.path_and_query_str); + eprintln!("{parsed_request:?}"); match expected_result { Ok(query) => { assert!(parsed_request.is_ok()); assert_eq!(&handler.query, &query); } Err(e) => { - assert_eq!(NetError::Http(e), parsed_request.unwrap_err()); + assert_eq!(e, parsed_request.unwrap_err()); } } } diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 591a12131c..de26412fcd 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -22,10 +22,11 @@ use libstackerdb::SlotMetadata; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, + StacksPrivateKey, StacksPublicKey, }; use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::pipe::Pipe; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; @@ -40,6 +41,7 @@ use crate::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionVersion, }; use crate::core::MemPoolDB; +use crate::net::api::{prefix_hex, prefix_opt_hex}; use crate::net::db::PeerDB; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::relay::Relayer; @@ -1075,3 +1077,95 @@ pub fn test_rpc(test_name: &str, requests: Vec) -> Vec = + prefix_opt_hex::deserialize(&mut deserializer).unwrap(); + + assert_eq!(out, inp); + if test.is_some() { + assert_eq!( + hex_str, + format!("\"0x{}\"", to_hex(&inp.as_ref().unwrap().0)) + ); + } else { + assert_eq!(hex_str, "null"); + } + } +} + +#[test] +fn prefixed_hex_bad_desers() { + let inp = "\"1\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "invalid length 1, expected at least length 2 string".to_string(), + ); + let inp = "\"0x\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "bad length 0 for hex string".to_string(), + ); + let inp = "\"0x00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff00\""; + let mut opt_deserializer = serde_json::Deserializer::from_str(inp); + assert_eq!( + prefix_opt_hex::deserialize::<_, BurnchainHeaderHash>(&mut opt_deserializer) + .unwrap_err() + .to_string(), + "bad length 66 for hex string".to_string(), + ); +} + +#[test] +fn prefixed_hex_serialization() { + let tests_32b = [ + [0u8; 32], + [1; 32], + [15; 32], + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, + ], + ]; + + for test in tests_32b.iter() { + let inp = BurnchainHeaderHash(test.clone()); + let mut out_buff = Vec::new(); + let mut serializer = serde_json::Serializer::new(&mut out_buff); + prefix_hex::serialize(&inp, &mut serializer).unwrap(); + let hex_str = String::from_utf8(out_buff).unwrap(); + eprintln!("{hex_str}"); + + let mut deserializer = serde_json::Deserializer::from_str(&hex_str); + let out: BurnchainHeaderHash = prefix_hex::deserialize(&mut deserializer).unwrap(); + + assert_eq!(out, inp); + assert_eq!(hex_str, format!("\"0x{}\"", to_hex(&inp.0))); + } +} diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index caeee26fae..d09c81bcba 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4013,8 +4013,6 @@ fn signer_chainstate() { let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - let mut sortitions_view = SortitionsView::new(); - // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { @@ -4051,15 +4049,6 @@ fn signer_chainstate() { false, ); - // there hasn't been a successful nakamoto sortition yet, so expect an error - assert!( - matches!( - sortitions_view.refresh_view(&signer_client).unwrap_err(), - ClientError::UnexpectedSortitionInfo - ), - "Sortitions view should fail to refresh if there are no successful nakamoto sortitions yet", - ); - // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { let vrf_count = vrfs_submitted.load(Ordering::SeqCst); @@ -4084,7 +4073,7 @@ fn signer_chainstate() { None; // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals let mut first_tenure_blocks: Option> = None; - for i in 0..5 { + for i in 0..15 { next_block_and_mine_commit( &mut btc_regtest_controller, 60, @@ -4093,8 +4082,7 @@ fn signer_chainstate() { ) .unwrap(); - sortitions_view.fresh = false; - sortitions_view.refresh_view(&signer_client).unwrap(); + let sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); // check the prior tenure's proposals again, confirming that the sortitions_view // will reject them. @@ -4188,7 +4176,7 @@ fn signer_chainstate() { ); // force the view to refresh and check again - sortitions_view.fresh = false; + let sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); let valid = sortitions_view .check_proposal( &signer_client, @@ -4246,6 +4234,8 @@ fn signer_chainstate() { txs: vec![], }; + let mut sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); + assert!( !sortitions_view .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) @@ -4362,12 +4352,7 @@ fn signer_chainstate() { // Case: the block contains a tenure change, but the parent tenure is a reorg let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); // make the sortition_view *think* that our block commit pointed at this old tenure - sortitions_view - .cur_sortition - .as_mut() - .map(|sortition_state| { - sortition_state.parent_tenure_id = reorg_to_block.header.consensus_hash.clone() - }); + sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash.clone(); let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: reorg_to_block.header.chain_length + 1, @@ -4422,7 +4407,9 @@ fn signer_chainstate() { "A sibling of a previously approved block must be rejected." ); - sortitions_view.fresh = false; + // view is stale, if we ever expand this test, sortitions_view should + // be fetched again, so drop it here. + drop(sortitions_view); coord_channel .lock() From 1740bc4cff0061ada2396ebdf9214000200c3d65 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 May 2024 13:27:54 -0500 Subject: [PATCH 0152/1400] more PR reviews --- stackslib/src/net/api/get_tenures_fork_info.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index da2b1cd3d9..055103dce4 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -77,7 +77,7 @@ pub struct TenureForkingInfo { /// block or miner was chosen). pub was_sortition: bool, /// If the sortition occurred, and a block was mined during the tenure, this is the - /// tenure's block. + /// tenure's first block. #[serde(with = "prefix_opt_hex")] pub first_block_mined: Option, } From 17fddaf35f4cdc95ce1ee8145aa04cb29acf1eeb Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 29 May 2024 21:47:33 +0300 Subject: [PATCH 0153/1400] Add unhappy path cases for `StackIncreaseSigCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-increase` PoX-4 method, called using a signature. - adds the command run tracking inside the `check` method. - adds the expected `stack-increase` PoX-4 errors to the `POX_4_ERRORS` dictionary. --- .../tests/pox-4/err_Commands.ts | 106 +++++++++++++ .../pox-4/pox_StackIncreaseSigCommand_Err.ts | 143 ++++++++++++++++++ 2 files changed, 249 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 74c0170af5..113d52ef46 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -18,14 +18,18 @@ import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggrega import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; import { currentCycleFirstBlock, nextCycleFirstBlock } from "./pox_Commands"; import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; +import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; const POX_4_ERRORS = { + ERR_STACKING_INSUFFICIENT_FUNDS: 1, ERR_STACKING_ALREADY_STACKED: 3, ERR_STACKING_NO_SUCH_PRINCIPAL: 4, ERR_STACKING_PERMISSION_DENIED: 9, ERR_STACKING_THRESHOLD_NOT_MET: 11, + ERR_STACKING_INVALID_AMOUNT: 18, ERR_STACKING_ALREADY_DELEGATED: 20, ERR_DELEGATION_TOO_MUCH_LOCKED: 22, + ERR_STACKING_IS_DELEGATED: 30, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -937,6 +941,108 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, ); }), + // StackIncreaseSigCommand_Err_Stacking_Is_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + this.increaseBy >= 1 + ) { + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Is_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackIncreaseSigCommand_Err_Stacking_Insufficient_Funds + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(100_000_000_000_000), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + !(this.increaseBy <= stacker.amountUnlocked) && + this.increaseBy >= 1 + ) { + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Insufficient_Funds", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, + ), + ), + // StackIncreaseSigCommand_Err_Stacking_Invalid_Amount + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(0), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + !(this.increaseBy >= 1) + ) { + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Invalid_Amount", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts new file mode 100644 index 0000000000..4a122784b3 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts @@ -0,0 +1,143 @@ +import { Pox4SignatureTopic } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + Cl, + ClarityType, + ClarityValue, + cvToJSON, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { assert, expect } from "vitest"; + +type CheckFunc = ( + this: StackIncreaseSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackIncreaseSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackIncreaseSigCommand_Err` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + increaseBy: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + const { result: rewardCycleNextBlockCV } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycleNextBlockCV, ClarityType.UInt)); + + const rewardCycleNextBlock = cvToValue(rewardCycleNextBlockCV); + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: rewardCycleNextBlock, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + const stackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(stackIncrease.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-increase-sig", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase sig increase-by ${this.increaseBy}`; + } +} From 4d7cf4dc829bb89efe5b949a9542faf1c9962ac5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 May 2024 16:37:29 -0500 Subject: [PATCH 0154/1400] add unit tests for stacks-signer::chainstate --- stacks-signer/src/chainstate.rs | 2 +- stacks-signer/src/client/stacks_client.rs | 6 +- stacks-signer/src/lib.rs | 4 + stacks-signer/src/tests/chainstate.rs | 225 ++++++++++++++++++ stacks-signer/src/tests/mod.rs | 1 + .../src/net/api/get_tenures_fork_info.rs | 6 - 6 files changed, 233 insertions(+), 11 deletions(-) create mode 100644 stacks-signer/src/tests/chainstate.rs create mode 100644 stacks-signer/src/tests/mod.rs diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 540d714bc8..a79251f73f 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -223,7 +223,7 @@ impl SortitionsView { let changed_burn_view = tenure_extend.burn_view_consensus_hash != proposed_by.state().consensus_hash; let enough_time_passed = Self::tenure_time_passed_block_lim()?; - if !changed_burn_view || !enough_time_passed { + if !changed_burn_view && !enough_time_passed { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 7e082558f9..8a7ade028c 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -42,14 +42,14 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use reqwest::header::AUTHORIZATION; use serde_json::json; -use slog::{slog_debug, slog_info, slog_warn}; +use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::types::StacksEpochId; -use stacks_common::{debug, info, warn}; +use stacks_common::{debug, warn}; use wsts::curve::point::{Compressed, Point}; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -390,8 +390,6 @@ impl StacksClient { } // SAFETY check: next_results isn't empty, because of the above check. otherwise, remove(0) could panic. next_results.remove(0); - let info_log: Vec<_> = tenures.iter().map(|t| t.consensus_hash).collect(); - info!("Current tenures = {:?}", info_log); if next_results.is_empty() { return Err(ClientError::InvalidResponse( "Could not fetch forking info all the way back to the requested chosen_parent" diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index a6856bb732..0ff622896c 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -39,6 +39,10 @@ pub mod signerdb; pub mod v0; /// The v1 implementation of the singer. This includes WSTS support pub mod v1; + +#[cfg(test)] +mod tests; + use std::fmt::{Debug, Display}; use std::sync::mpsc::{channel, Receiver, Sender}; diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs new file mode 100644 index 0000000000..d12d941ecc --- /dev/null +++ b/stacks-signer/src/tests/chainstate.rs @@ -0,0 +1,225 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::fs; +use std::net::{Ipv4Addr, SocketAddrV4}; + +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::chainstate::stacks::{ + SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangeCause, + TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, + TransactionVersion, +}; +use stacks_common::bitvec::BitVec; +use stacks_common::types::chainstate::{ + ConsensusHash, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, +}; +use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::MessageSignature; + +use crate::chainstate::{SortitionMinerStatus, SortitionState, SortitionsView}; +use crate::client::StacksClient; +use crate::signerdb::SignerDb; + +fn setup_test_environment( + fn_name: &str, +) -> ( + StacksClient, + SignerDb, + StacksPublicKey, + SortitionsView, + NakamotoBlock, +) { + let block_sk = StacksPrivateKey::from_seed(&[0, 1]); + let block_pk = StacksPublicKey::from_private(&block_sk); + let block_pkh = Hash160::from_node_public_key(&block_pk); + + let cur_sortition = SortitionState { + miner_pkh: block_pkh, + miner_pubkey: None, + prior_sortition: ConsensusHash([0; 20]), + parent_tenure_id: ConsensusHash([0; 20]), + consensus_hash: ConsensusHash([1; 20]), + miner_status: SortitionMinerStatus::Valid, + }; + + let last_sortition = Some(SortitionState { + miner_pkh: block_pkh, + miner_pubkey: None, + prior_sortition: ConsensusHash([128; 20]), + parent_tenure_id: ConsensusHash([128; 20]), + consensus_hash: ConsensusHash([0; 20]), + miner_status: SortitionMinerStatus::Valid, + }); + + let view = SortitionsView { + latest_consensus_hash: cur_sortition.consensus_hash, + cur_sortition, + last_sortition, + }; + + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), + SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).into(), + "FOO".into(), + false, + ); + + let signer_db_dir = "/tmp/stacks-node-tests/signer-units/"; + let signer_db_path = format!("{signer_db_dir}/{fn_name}.{}.sqlite", get_epoch_time_secs()); + fs::create_dir_all(signer_db_dir).unwrap(); + let signer_db = SignerDb::new(signer_db_path).unwrap(); + + let block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: ConsensusHash([15; 20]), + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + signer_bitvec: BitVec::zeros(1).unwrap(), + }, + txs: vec![], + }; + + (stacks_client, signer_db, block_pk, view, block) +} + +#[test] +fn check_proposal_units() { + let (stacks_client, signer_db, block_pk, mut view, block) = + setup_test_environment("check_proposal_units"); + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .unwrap()); + + view.last_sortition = None; + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .unwrap()); +} + +#[test] +fn check_proposal_miner_pkh_mismatch() { + let (stacks_client, signer_db, _block_pk, view, mut block) = + setup_test_environment("miner_pkh_mismatch"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + let different_block_pk = StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 3])); + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .unwrap()); + + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .unwrap()); +} + +#[test] +fn check_proposal_invalid_status() { + let (stacks_client, signer_db, block_pk, mut view, mut block) = + setup_test_environment("invalid_status"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; + // this block passes the signer state checks, even though it doesn't have a tenure change tx. + // this is because the signer state does not perform the tenure change logic checks: it needs + // the stacks-node to do that (because the stacks-node actually knows whether or not their + // parent blocks have been seen before, while the signer state checks are only reasoning about + // stacks blocks seen by the signer, which may be a subset) + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); +} + +fn make_tenure_change_payload() -> TenureChangePayload { + TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0; 20]), + prev_tenure_consensus_hash: ConsensusHash([0; 20]), + burn_view_consensus_hash: ConsensusHash([0; 20]), + previous_tenure_end: StacksBlockId([0; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::Extended, + pubkey_hash: Hash160([0; 20]), + } +} + +fn make_tenure_change_tx(payload: TenureChangePayload) -> StacksTransaction { + StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 1, + auth: TransactionAuth::Standard(TransactionSpendingCondition::Singlesig( + SinglesigSpendingCondition { + hash_mode: SinglesigHashMode::P2PKH, + signer: Hash160([0; 20]), + nonce: 0, + tx_fee: 0, + key_encoding: TransactionPublicKeyEncoding::Compressed, + signature: MessageSignature([0; 65]), + }, + )), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TenureChange(payload), + } +} + +#[test] +fn check_proposal_tenure_extend_invalid_conditions() { + let (stacks_client, signer_db, block_pk, view, mut block) = + setup_test_environment("tenure_extend"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + let mut extend_payload = make_tenure_change_payload(); + extend_payload.burn_view_consensus_hash = view.cur_sortition.consensus_hash; + extend_payload.tenure_consensus_hash = block.header.consensus_hash; + extend_payload.prev_tenure_consensus_hash = block.header.consensus_hash; + let tx = make_tenure_change_tx(extend_payload); + block.txs = vec![tx]; + assert!(!view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); + + let mut extend_payload = make_tenure_change_payload(); + extend_payload.burn_view_consensus_hash = ConsensusHash([64; 20]); + extend_payload.tenure_consensus_hash = block.header.consensus_hash; + extend_payload.prev_tenure_consensus_hash = block.header.consensus_hash; + let tx = make_tenure_change_tx(extend_payload); + block.txs = vec![tx]; + assert!(view + .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .unwrap()); +} diff --git a/stacks-signer/src/tests/mod.rs b/stacks-signer/src/tests/mod.rs new file mode 100644 index 0000000000..a92c85da71 --- /dev/null +++ b/stacks-signer/src/tests/mod.rs @@ -0,0 +1 @@ +mod chainstate; diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 055103dce4..4abc8ab6e1 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -230,12 +230,6 @@ impl RPCRequestHandler for GetTenuresForkInfo { let mut depth = 0; while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { depth += 1; - info!("Handling fork info request"; - "cursor.consensus_hash" => %cursor.consensus_hash, - "cursor.block_height" => cursor.block_height, - "recurse_end" => %recurse_end, - "height_bound" => height_bound - ); if height_bound >= cursor.block_height { return Err(ChainError::NotInSameFork); } From 2a7135f0ee07cb2756422a0a149da41909001dae Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 30 May 2024 13:21:34 +0300 Subject: [PATCH 0155/1400] Add unhappy path cases for `StackIncreaseAuthCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-increase` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 103 ++++++++++++++ .../pox-4/pox_StackIncreaseAuthCommand_Err.ts | 133 ++++++++++++++++++ 2 files changed, 236 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 113d52ef46..e51283f3d1 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -19,6 +19,7 @@ import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncre import { currentCycleFirstBlock, nextCycleFirstBlock } from "./pox_Commands"; import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; +import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, @@ -1043,6 +1044,108 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, ), ), + // StackIncreaseAuthCommand_Err_Stacking_Is_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + this.increaseBy >= 1 + ) { + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Is_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackIncreaseAuthCommand_Err_Stacking_Insufficient_Funds + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(100_000_000_000_000), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + !(this.increaseBy <= stacker.amountUnlocked) && + this.increaseBy >= 1 + ) { + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Insufficient_Funds", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, + ), + ), + // StackIncreaseAuthCommand_Err_Stacking_Invalid_Amount + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(0), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + !(this.increaseBy >= 1) + ) { + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Invalid_Amount", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts new file mode 100644 index 0000000000..a74aa3c211 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts @@ -0,0 +1,133 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackIncreaseAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackIncreaseAuthCommand` to increase lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + increaseBy: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + // Act + + // Include the authorization and the `stack-increase` transactions in a single + // block. This way we ensure both the authorization and the stack-increase + // transactions are called during the same reward cycle and avoid the clarity + // error `ERR_INVALID_REWARD_CYCLE`. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii(Pox4SignatureTopic.StackIncrease), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-increase-auth", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase auth increase-by ${this.increaseBy}`; + } +} From 4f4b0de732fe93b794eaa68a1a91d25a2d4fa39e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 30 May 2024 09:25:46 -0500 Subject: [PATCH 0156/1400] more assertions in test for get_forking_info --- .../src/tests/nakamoto_integrations.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d09c81bcba..c2e5566a20 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -60,6 +60,7 @@ use stacks::core::{ }; use stacks::libstackerdb::SlotMetadata; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; +use stacks::net::api::get_tenures_fork_info::TenureForkingInfo; use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, @@ -81,7 +82,6 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; -use stacks_signer::client::ClientError; use stacks_signer::signerdb::{BlockInfo, SignerDb}; use wsts::net::Message; @@ -4407,6 +4407,26 @@ fn signer_chainstate() { "A sibling of a previously approved block must be rejected." ); + let start_sortition = &reorg_to_block.header.consensus_hash; + let stop_sortition = &sortitions_view.cur_sortition.prior_sortition; + // check that the get_tenure_forking_info response is sane + let fork_info = signer_client + .get_tenure_forking_info(start_sortition, stop_sortition) + .unwrap(); + + // it should start and stop with the given inputs (reversed!) + assert_eq!(fork_info.first().unwrap().consensus_hash, *stop_sortition); + assert_eq!(fork_info.last().unwrap().consensus_hash, *start_sortition); + + // every step of the return should be linked to the parent + let mut prior: Option<&TenureForkingInfo> = None; + for step in fork_info.iter().rev() { + if let Some(ref prior) = prior { + assert_eq!(prior.sortition_id, step.parent_sortition_id); + } + prior = Some(step); + } + // view is stale, if we ever expand this test, sortitions_view should // be fetched again, so drop it here. drop(sortitions_view); From a83cefb7ee09046b2d8e5a8bc5aaaf19939c31ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:53:58 -0400 Subject: [PATCH 0157/1400] chore: remove dead code, and add a way to get the last sortition in a prepare phase --- stackslib/src/chainstate/burn/db/sortdb.rs | 196 ++++++--------------- 1 file changed, 49 insertions(+), 147 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 8f416b4c39..4e8dd50f1e 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -43,8 +43,6 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::*; use stacks_common::util::{get_epoch_time_secs, log}; -use wsts::common::Signature as WSTSSignature; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::affirmation::{AffirmationMap, AffirmationMapEntry}; use crate::burnchains::bitcoin::BitcoinNetworkType; @@ -1860,80 +1858,6 @@ impl<'a> SortitionHandleConn<'a> { SortitionHandleConn::open_reader(connection, &sn.sortition_id) } - /// Does the sortition db expect to receive blocks - /// signed by this signer set? - /// - /// This only works if `consensus_hash` is within two reward cycles (4200 blocks) of the - /// sortition pointed to by this handle's sortiton tip. If it isn't, then this - /// method returns Ok(false). This is to prevent a DDoS vector whereby compromised stale - /// Signer keys can be used to blast out lots of Nakamoto blocks that will be accepted - /// but never processed. So, `consensus_hash` can be in the same reward cycle as - /// `self.context.chain_tip`, or the previous, but no earlier. - pub fn expects_signer_signature( - &self, - consensus_hash: &ConsensusHash, - signer_signature: &WSTSSignature, - message: &[u8], - aggregate_public_key: &Point, - ) -> Result { - let sn = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No sortition for tip: {:?}", &self.context.chain_tip); - e - })?; - - let ch_sn = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No sortition for consensus hash: {:?}", consensus_hash); - e - })?; - - if ch_sn.block_height - + u64::from(self.context.pox_constants.reward_cycle_length) - + u64::from(self.context.pox_constants.prepare_length) - < sn.block_height - { - // too far in the past - debug!("Block with consensus hash {} is too far in the past", consensus_hash; - "consensus_hash" => %consensus_hash, - "block_height" => ch_sn.block_height, - "tip_block_height" => sn.block_height - ); - return Ok(false); - } - - // this given consensus hash must be an ancestor of our chain tip - let ch_at = self - .get_consensus_at(ch_sn.block_height)? - .ok_or(db_error::NotFoundError) - .map_err(|e| { - warn!("No ancestor consensus hash"; - "tip" => %self.context.chain_tip, - "consensus_hash" => %consensus_hash, - "consensus_hash height" => %ch_sn.block_height - ); - e - })?; - - if ch_at != ch_sn.consensus_hash { - // not an ancestor - warn!("Consensus hash is not an ancestor of the sortition tip"; - "tip" => %self.context.chain_tip, - "consensus_hash" => %consensus_hash - ); - return Err(db_error::NotFoundError); - } - - // is this consensus hash in this fork? - if SortitionDB::get_burnchain_header_hash_by_consensus(self, consensus_hash)?.is_none() { - return Ok(false); - } - - Ok(signer_signature.verify(aggregate_public_key, message)) - } - pub fn get_reward_set_size_at(&self, sortition_id: &SortitionId) -> Result { self.get_indexed(sortition_id, &db_keys::pox_reward_set_size()) .map(|x| { @@ -1984,32 +1908,6 @@ impl<'a> SortitionHandleConn<'a> { Ok(anchor_block_txid) } - /// Get the last processed reward cycle. - /// Since we always process a RewardSetInfo at the start of a reward cycle (anchor block or - /// no), this is simply the same as asking which reward cycle this SortitionHandleConn's - /// sortition tip is in. - pub fn get_last_processed_reward_cycle(&self) -> Result { - let sn = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)? - .ok_or(db_error::NotFoundError)?; - let rc = self - .context - .pox_constants - .block_height_to_reward_cycle(self.context.first_block_height, sn.block_height) - .expect("FATAL: sortition from before system start"); - let rc_start_block = self - .context - .pox_constants - .reward_cycle_to_block_height(self.context.first_block_height, rc); - let last_rc = if sn.block_height >= rc_start_block { - rc - } else { - // NOTE: the reward cycle is "processed" at reward cycle index 1, not index 0 - rc.saturating_sub(1) - }; - - Ok(last_rc) - } - pub fn get_reward_cycle_unlocks( &mut self, cycle: u64, @@ -3535,19 +3433,39 @@ impl SortitionDB { } /// Store a pre-processed reward set. - /// `sortition_id` is the first sortition ID of the prepare phase + /// `sortition_id` is the first sortition ID of the prepare phase. + /// No-op if the reward set is empty. pub fn store_preprocessed_reward_set( sort_tx: &mut DBTx, sortition_id: &SortitionId, rc_info: &RewardCycleInfo, ) -> Result<(), db_error> { + if rc_info.known_selected_anchor_block().is_none() { + return Ok(()); + } let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; - let args: &[&dyn ToSql] = &[sortition_id, &rc_json]; + let args = rusqlite::params![sortition_id, &rc_json]; sort_tx.execute(sql, args)?; Ok(()) } + /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_cycle(). + /// See that method for details. + pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( + &self, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + self.index_conn() + .get_prepare_phase_end_sortition_id_for_reward_cycle( + &self.pox_constants, + self.first_block_height, + tip, + reward_cycle_id, + ) + } + /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). /// See that method for details. pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( @@ -3876,6 +3794,33 @@ impl<'a> SortitionDBConn<'a> { .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) } + /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( + &self, + pox_constants: &PoxConstants, + first_block_height: u64, + tip: &SortitionId, + reward_cycle_id: u64, + ) -> Result { + let prepare_phase_end = pox_constants + .reward_cycle_to_block_height(first_block_height, reward_cycle_id) + .saturating_sub(1); + + let last_sortition = + get_ancestor_sort_id(self, prepare_phase_end, tip)?.ok_or_else(|| { + error!( + "Could not find prepare phase end ancestor while fetching reward set"; + "tip_sortition_id" => %tip, + "reward_cycle_id" => reward_cycle_id, + "prepare_phase_end_height" => prepare_phase_end + ); + db_error::NotFoundError + })?; + Ok(last_sortition) + } + /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned /// sortition will be in the preceding reward cycle) @@ -6101,16 +6046,6 @@ impl<'a> SortitionHandleTx<'a> { keys.push(db_keys::pox_affirmation_map().to_string()); values.push(cur_affirmation_map.encode()); - if cfg!(test) { - // last reward cycle. - // NOTE: We keep this only for testing, since this is what the original (but - // unmigratable code) did, and we need to verify that the compatibility fix to - // SortitionDB::get_last_processed_reward_cycle() is semantically compatible - // with querying this key. - keys.push(db_keys::last_reward_cycle_key().to_string()); - values.push(db_keys::last_reward_cycle_to_string(_reward_cycle)); - } - pox_payout_addrs } else { // if this snapshot consumed some reward set entries AND @@ -6193,15 +6128,6 @@ impl<'a> SortitionHandleTx<'a> { keys.push(db_keys::pox_last_selected_anchor_txid().to_string()); values.push("".to_string()); - if cfg!(test) { - // NOTE: We keep this only for testing, since this is what the original (but - // unmigratable code) did, and we need to verify that the compatibility fix to - // SortitionDB::get_last_processed_reward_cycle() is semantically compatible - // with querying this key. - keys.push(db_keys::last_reward_cycle_key().to_string()); - values.push(db_keys::last_reward_cycle_to_string(0)); - } - // no payouts vec![] }; @@ -6543,30 +6469,6 @@ pub mod tests { use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; - impl<'a> SortitionHandleConn<'a> { - /// At one point in the development lifecycle, this code depended on a MARF key/value - /// pair to map the sortition tip to the last-processed reward cycle number. This data would - /// not have been present in epoch 2.4 chainstate and earlier, but would have been present in - /// epoch 2.5 and later, since at the time it was expected that all nodes would perform a - /// genesis sync when booting into epoch 2.5. However, that requirement changed at the last - /// minute, so this code was reworked to avoid the need for the MARF key. But to ensure that - /// this method is semantically consistent with the old code (which the Nakamoto chains - /// coordinator depends on), this code will test that the new reward cycle calculation matches - /// the old reward cycle calculation. - #[cfg(test)] - pub fn legacy_get_last_processed_reward_cycle(&self) -> Result { - // verify that this is semantically compatible with the older behavior, which shipped - // for epoch 2.5 but needed to be removed at the last minute in order to support a - // migration path from 2.4 chainstate to 2.5/3.0 chainstate. - let encoded_rc = self - .get_indexed(&self.context.chain_tip, &db_keys::last_reward_cycle_key())? - .expect("FATAL: no last-processed reward cycle"); - - let expected_rc = db_keys::last_reward_cycle_from_string(&encoded_rc); - Ok(expected_rc) - } - } - impl<'a> SortitionHandleTx<'a> { /// Update the canonical Stacks tip (testing only) pub fn test_update_canonical_stacks_tip( From 6ab82f41f24c74004f40048efe290217b7689374 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:54:21 -0400 Subject: [PATCH 0158/1400] chore: update docs on when nakamoto needs the preprocessed reward set --- stackslib/src/chainstate/coordinator/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index f34e21d1bd..973dd83b53 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -830,7 +830,8 @@ pub fn get_reward_cycle_info( }; // cache the reward cycle info as of the first sortition in the prepare phase, so that - // the Nakamoto epoch can go find it later + // the first Nakamoto epoch can go find it later. Subsequent Nakamoto epochs will use the + // reward set stored to the Nakamoto chain state. let ic = sort_db.index_handle(sortition_tip); let prev_reward_cycle = burnchain .block_height_to_reward_cycle(burn_height) From e69ef4a8102fbfc0225acc2183ef610ede47f261 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:54:47 -0400 Subject: [PATCH 0159/1400] feat: add `load_nakamoto_reward_set()` function, which will load a Nakamoto-epoch reward set from the Nakamoto chain state, except for the first-ever Nakamoto reward set which is necessarily a preprocessed reward set. Also, remove all calls to load a preprocessed sortition DB reward set from the Nakamoto coordinator --- .../chainstate/nakamoto/coordinator/mod.rs | 262 +++++++++++++----- 1 file changed, 189 insertions(+), 73 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80..df4966da49 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -42,7 +42,7 @@ use crate::chainstate::coordinator::{ }; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{RewardSet, SIGNERS_NAME}; -use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use crate::chainstate::stacks::Error as ChainstateError; use crate::cost_estimates::{CostEstimator, FeeEstimator}; @@ -210,14 +210,9 @@ fn find_prepare_phase_sortitions( /// If this method returns None, the caller should try again when there are more Stacks blocks. In /// Nakamoto, every reward cycle _must_ have a PoX anchor block; otherwise, the chain halts. /// -/// N.B. this method assumes that the prepare phase is comprised _solely_ of Nakamoto tenures. It -/// will not work if any of the prepare-phase tenures are from epoch 2.x. -/// /// Returns Ok(Some(reward-cycle-info)) if we found the first sortition in the prepare phase. /// Returns Ok(None) if we're still waiting for the PoX anchor block sortition /// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase -/// Returns Err(Error::RewardCycleAlreadyProcessed) if the reward set for this reward cycle has -/// already been processed. pub fn get_nakamoto_reward_cycle_info( burn_height: u64, sortition_tip: &SortitionId, @@ -243,7 +238,6 @@ pub fn get_nakamoto_reward_cycle_info( let reward_cycle = burnchain .next_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); - let reward_start_height = burnchain.reward_cycle_to_block_height(reward_cycle); debug!("Processing reward set for Nakamoto reward cycle"; "burn_height" => burn_height, @@ -251,26 +245,144 @@ pub fn get_nakamoto_reward_cycle_info( "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, "prepare_phase_length" => burnchain.pox_constants.prepare_length); + let Some((rc_info, anchor_block_header)) = load_nakamoto_reward_set( + burn_height, + sortition_tip, + burnchain, + chain_state, + sort_db, + provider, + )? + else { + return Ok(None); + }; + + let block_id = match anchor_block_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(..) => anchor_block_header.index_block_hash(), + StacksBlockHeaderTypes::Nakamoto(ref header) => header.block_id(), + }; + + info!( + "Anchor block selected"; + "cycle" => reward_cycle, + "block_id" => %block_id, + "consensus_hash" => %anchor_block_header.consensus_hash, + "burn_height" => anchor_block_header.burn_header_height, + "anchor_chain_tip" => %anchor_block_header.index_block_hash(), + "anchor_chain_tip_height" => %anchor_block_header.burn_header_height, + ); + + return Ok(Some(rc_info)); +} + +/// Helper to get the Nakamoto reward set for a given reward cycle. +/// In all but the first Nakamoto reward cycle, this will load up the stored reward set from the +/// Nakamoto chain state. In the first Nakamoto reward cycle, where the reward set is computed +/// from epoch2 state, the reward set will be loaded from the sortition DB (which is the only place +/// it will be stored). +/// +/// Returns Ok(Some((reward set info, PoX anchor block header))) on success +/// Returns Ok(None) if the reward set is not yet known, but could be known by the time a +/// subsequent call is made. +pub fn load_nakamoto_reward_set( + burn_height: u64, + sortition_tip: &SortitionId, + burnchain: &Burnchain, + chain_state: &mut StacksChainState, + sort_db: &SortitionDB, + provider: &U, +) -> Result, Error> { + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? + .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); + + // calculating the reward set for the _next_ reward cycle + let reward_cycle = burnchain + .next_reward_cycle(burn_height) + .expect("FATAL: no reward cycle for burn height"); + + let reward_start_height = burnchain.reward_cycle_to_block_height(reward_cycle); + // Find the first Stacks block in this reward cycle's preceding prepare phase. // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. // Note that we may not have processed it yet. But, if we do find it, then it's // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). - let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, sortition_tip)?; - - // did we already calculate the reward cycle info? If so, then return it. - let first_sortition_id = if let Some(first_sn) = prepare_phase_sortitions.first() { - if let Some(persisted_reward_cycle_info) = - SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? + let first_epoch30_reward_cycle = burnchain + .next_reward_cycle(epoch_at_height.start_height) + .expect("FATAL: no reward cycle for epoch 3.0 start height"); + + if epoch_at_height.epoch_id < StacksEpochId::Epoch30 + || (epoch_at_height.epoch_id == StacksEpochId::Epoch30 + && reward_cycle == first_epoch30_reward_cycle) + { + // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. + // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward + // cycle info in the nakamoto chain state. + if let Ok(persisted_reward_cycle_info) = + sort_db.get_preprocessed_reward_set_of(sortition_tip) { - return Ok(Some(persisted_reward_cycle_info)); + if persisted_reward_cycle_info + .known_selected_anchor_block() + .is_none() + { + debug!("No reward set known yet for prepare phase"; + "sortition_tip" => %sortition_tip); + return Ok(None); + } + + // find the corresponding Stacks anchor block header + let Some((anchor_block_hash, _)) = persisted_reward_cycle_info.selected_anchor_block() + else { + // should be unreachable + error!("No anchor block known for persisted reward set"; + "sortition_tip" => %sortition_tip); + return Ok(None); + }; + + let ic = sort_db.index_conn(); + let Some(anchor_block_snapshot) = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + sortition_tip, + anchor_block_hash, + )? + else { + // should be unreachable + error!("No ancestor block snapshot for anchor block"; + "anchor_block_hash" => %anchor_block_hash, + "sortition_tip" => %sortition_tip); + + return Ok(None); + }; + + let Some(anchor_block_header) = + StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chain_state.db(), + &anchor_block_snapshot.consensus_hash, + )? + else { + // should be unreachable + error!("No block header for anchor block"; + "consensus_hash" => %anchor_block_snapshot.consensus_hash, + "anchor_block_hash" => %anchor_block_hash); + return Ok(None); + }; + + debug!("Loaded reward set calculated in epoch 2.5 for reward cycle {} (which is in epoch {})", reward_cycle, epoch_at_height.epoch_id); + return Ok(Some((persisted_reward_cycle_info, anchor_block_header))); } - first_sn.sortition_id.clone() - } else { - // can't do anything + + // no reward set known yet. It's possible that it simply hasn't been processed yet. + debug!("No pre-processed PoX reward set known for pre-Nakamoto cycle {reward_cycle}"); return Ok(None); - }; + } + + // find the reward cycle's prepare-phase sortitions (in the preceding reward cycle) + let sort_end = sort_db.get_prepare_phase_end_sortition_id_for_reward_cycle( + sortition_tip, + reward_cycle.saturating_sub(1), + )?; + let prepare_phase_sortitions = find_prepare_phase_sortitions(sort_db, burnchain, &sort_end)?; // iterate over the prepare_phase_sortitions, finding the first such sortition // with a processed stacks block @@ -317,7 +429,7 @@ pub fn get_nakamoto_reward_cycle_info( .expect("FATAL: no snapshot for winning PoX anchor block"); // make sure the `anchor_block` field is the same as whatever goes into the block-commit, - // or PoX ancestry queries won't work + // or PoX ancestry queries won't work. let (block_id, stacks_block_hash) = match anchor_block_header.anchored_header { StacksBlockHeaderTypes::Epoch2(ref header) => ( StacksBlockId::new(&anchor_block_header.consensus_hash, &header.block_hash()), @@ -330,19 +442,18 @@ pub fn get_nakamoto_reward_cycle_info( let txid = anchor_block_sn.winning_block_txid; - info!( - "Anchor block selected"; - "cycle" => reward_cycle, - "block_id" => %block_id, - "consensus_hash" => %anchor_block_header.consensus_hash, - "burn_height" => anchor_block_header.burn_header_height, - "anchor_chain_tip" => %anchor_block_header.index_block_hash(), - "anchor_chain_tip_height" => %anchor_block_header.burn_header_height, - "first_prepare_sortition_id" => %first_sortition_id - ); + test_debug!("Stacks anchor block found"; + "block_id" => %block_id, + "block_hash" => %stacks_block_hash, + "consensus_hash" => %anchor_block_sn.consensus_hash, + "txid" => %txid, + "reward_start_height" => %reward_start_height, + "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - reward_start_height, + // NOTE: the .saturating_sub(2) is needed here because reward_start_height is at reward + // index 1, while we need the highest height in the last cycle. + reward_start_height.saturating_sub(2), chain_state, burnchain, sort_db, @@ -358,13 +469,7 @@ pub fn get_nakamoto_reward_cycle_info( reward_cycle, anchor_status, }; - - // persist this - let mut tx = sort_db.tx_begin()?; - SortitionDB::store_preprocessed_reward_set(&mut tx, &first_sortition_id, &rc_info)?; - tx.commit()?; - - return Ok(Some(rc_info)); + Ok(Some((rc_info, anchor_block_header))) } /// Get the next PoX recipients in the Nakamoto epoch. @@ -375,35 +480,21 @@ pub fn get_nakamoto_reward_cycle_info( pub fn get_nakamoto_next_recipients( sortition_tip: &BlockSnapshot, sort_db: &mut SortitionDB, + chain_state: &mut StacksChainState, burnchain: &Burnchain, ) -> Result, Error> { let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height + 1) { - // load up new reward cycle info so we can start using *that* - let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, &sortition_tip.parent_sortition_id)?; - - // NOTE: this must panic because Nakamoto's first reward cycle has stackers - let first_sn = prepare_phase_sortitions - .first() - .expect("FATAL: unreachable: no prepare-phase sortitions at start of reward cycle"); - - debug!("Get pre-processed reward set"; - "sortition_id" => %first_sn.sortition_id); - - // NOTE: don't panic here. The only caller of this method is a stacks-node miner, - // and they *may* have invoked this before they've processed the prepare phase. - // That's recoverable by simply waiting to mine until they've processed those - // blocks. - let reward_set = - SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? - .ok_or_else(|| { - warn!( - "No preprocessed reward set found"; - "reward_cycle_start" => sortition_tip.block_height + 1, - "first_prepare_sortition_id" => %first_sn.sortition_id - ); - Error::PoXNotProcessedYet - })?; + let Some((reward_set, _)) = load_nakamoto_reward_set( + sortition_tip.block_height, + &sortition_tip.sortition_id, + burnchain, + chain_state, + sort_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(None); + }; Some(reward_set) } else { None @@ -465,9 +556,18 @@ impl< .expect("FATAL: epoch3 block height has no reward cycle"); // only proceed if we have processed the _anchor block_ for this reward cycle - let handle_conn = self.sortition_db.index_handle(&canonical_sortition_tip); - let last_processed_rc = handle_conn.get_last_processed_reward_cycle()?; - Ok(last_processed_rc >= first_epoch3_reward_cycle) + let Some((rc_info, _)) = load_nakamoto_reward_set( + canonical_sn.block_height, + &canonical_sn.sortition_id, + &self.burnchain, + &mut self.chain_state_db, + &self.sortition_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(false); + }; + Ok(rc_info.reward_cycle >= first_epoch3_reward_cycle) } /// This is the main loop body for the coordinator in epoch 3. @@ -707,8 +807,24 @@ impl< }); let last_processed_reward_cycle = { - let ic = self.sortition_db.index_handle(&canonical_sortition_tip); - ic.get_last_processed_reward_cycle()? + let canonical_sn = SortitionDB::get_block_snapshot( + &self.sortition_db.conn(), + &canonical_sortition_tip, + )? + .ok_or(DBError::NotFoundError)?; + let Some((rc_info, _)) = load_nakamoto_reward_set( + canonical_sn.block_height, + &canonical_sn.sortition_id, + &self.burnchain, + &mut self.chain_state_db, + &self.sortition_db, + &OnChainRewardSetProvider::new(), + )? + else { + // no anchor block yet, so try processing another block + continue; + }; + rc_info.reward_cycle }; if last_processed_reward_cycle > current_reward_cycle { @@ -863,10 +979,10 @@ impl< let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; if let Some(rc_info) = reward_cycle_info { // in nakamoto, if we have any reward cycle info at all, it will be known. - assert!( - rc_info.known_selected_anchor_block().is_some(), - "FATAL: unknown PoX anchor block in Nakamoto" - ); + if rc_info.known_selected_anchor_block().is_none() { + warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", header.block_height); + return Ok(false); + } } } From 35368887a3a81ff8d449a629e261c5f4eba1e4cf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:55:50 -0400 Subject: [PATCH 0160/1400] chore: API sync --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index b7c0bb5ba9..a95d968f3e 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -400,6 +400,7 @@ fn replay_reward_cycle( info!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( + &peer.config.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, From 201acbe2629a3c8a6719631d35d22606e68ccc0e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:56:23 -0400 Subject: [PATCH 0161/1400] chore: fmt --- stackslib/src/chainstate/nakamoto/test_signers.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 4a2aa4f29c..13d7f2ff1e 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -50,7 +50,6 @@ use crate::chainstate::burn::*; use crate::chainstate::coordinator::{ ChainsCoordinator, Error as CoordinatorError, OnChainRewardSetProvider, }; -use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; From 1d8bea0e639af485b5d3593415d664707e58f63f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:56:38 -0400 Subject: [PATCH 0162/1400] feat: test new reward-set load path by using it directly in the Nakamoto test infrastructure, instead of loading it from a preprocessed reward set in the sortition DB --- .../src/chainstate/nakamoto/tests/node.rs | 33 ++++++++++++++----- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index fc425d0580..1054f584b6 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -46,7 +46,9 @@ use crate::chainstate::burn::*; use crate::chainstate::coordinator::{ ChainsCoordinator, Error as CoordinatorError, OnChainRewardSetProvider, }; -use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; +use crate::chainstate::nakamoto::coordinator::{ + get_nakamoto_next_recipients, load_nakamoto_reward_set, +}; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; @@ -579,12 +581,20 @@ impl TestStacksNode { .unwrap(); // Get the reward set - let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); - let reward_set = sortdb - .get_preprocessed_reward_set_of(&sort_tip) - .expect("Failed to get reward cycle info") - .known_selected_anchor_block_owned() - .expect("Expected a reward set"); + let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let reward_set = load_nakamoto_reward_set( + sort_tip_sn.block_height, + &sort_tip_sn.sortition_id, + &miner.burnchain, + chainstate, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to load reward set") + .expect("Expected a reward set") + .0 + .known_selected_anchor_block_owned() + .expect("Unknown reward set"); test_debug!( "Signing Nakamoto block {} in tenure {} with key in cycle {}", @@ -609,6 +619,7 @@ impl TestStacksNode { let mut sort_handle = sortdb.index_handle(&sort_tip); info!("Processing the new nakamoto block"); let accepted = match Relayer::process_new_nakamoto_block( + &miner.burnchain, sortdb, &mut sort_handle, chainstate, @@ -912,7 +923,12 @@ impl<'a> TestPeer<'a> { } // patch in reward set info - match get_nakamoto_next_recipients(&tip, &mut sortdb, &self.config.burnchain) { + match get_nakamoto_next_recipients( + &tip, + &mut sortdb, + &mut stacks_node.chainstate, + &self.config.burnchain, + ) { Ok(recipients) => { block_commit_op.commit_outs = match recipients { Some(info) => { @@ -1130,6 +1146,7 @@ impl<'a> TestPeer<'a> { let block_id = block.block_id(); debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( + &self.network.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, From 9be7365ae16045cadaffe2cfea2c21762d0da6ba Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:57:13 -0400 Subject: [PATCH 0163/1400] chore: API sync -- we now use a `CurrentRewardSet` struct in the PeerNetwork to reference the reward cycle and its reward set --- .../net/download/nakamoto/download_state_machine.rs | 4 ++-- .../src/net/download/nakamoto/tenure_downloader.rs | 2 +- .../src/net/download/nakamoto/tenure_downloader_set.rs | 8 ++++---- .../download/nakamoto/tenure_downloader_unconfirmed.rs | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index c95dc6d5f3..ae5c8e055c 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -63,7 +63,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -1155,7 +1155,7 @@ impl NakamotoDownloadStateMachine { fn update_tenure_downloaders( &mut self, count: usize, - current_reward_sets: &BTreeMap, + current_reward_sets: &BTreeMap, ) { self.tenure_downloads.make_tenure_downloaders( &mut self.tenure_download_schedule, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index a3586602e6..9f261929b5 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -57,7 +57,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 0100eb0ecd..f075028589 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -62,7 +62,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -419,7 +419,7 @@ impl NakamotoTenureDownloaderSet { available: &mut HashMap>, tenure_block_ids: &HashMap, count: usize, - current_reward_cycles: &BTreeMap, + current_reward_cycles: &BTreeMap, ) { test_debug!("schedule: {:?}", schedule); test_debug!("available: {:?}", &available); @@ -482,7 +482,7 @@ impl NakamotoTenureDownloaderSet { }; let Some(Some(start_reward_set)) = current_reward_cycles .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.known_selected_anchor_block()) + .map(|cycle_info| cycle_info.reward_set()) else { test_debug!( "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", @@ -494,7 +494,7 @@ impl NakamotoTenureDownloaderSet { }; let Some(Some(end_reward_set)) = current_reward_cycles .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.known_selected_anchor_block()) + .map(|cycle_info| cycle_info.reward_set()) else { test_debug!( "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 4d4d4dee47..97ccb2c389 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -62,7 +62,7 @@ use crate::net::inv::epoch2x::InvState; use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::rpc::NeighborRPC; use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; @@ -186,7 +186,7 @@ impl NakamotoUnconfirmedTenureDownloader { local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, + current_reward_sets: &BTreeMap, ) -> Result<(), NetError> { if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { return Err(NetError::InvalidState); @@ -301,7 +301,7 @@ impl NakamotoUnconfirmedTenureDownloader { // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions let Some(Some(confirmed_reward_set)) = current_reward_sets .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.known_selected_anchor_block()) + .map(|cycle_info| cycle_info.reward_set()) else { warn!( "No signer public keys for confirmed tenure {} (rc {})", @@ -312,7 +312,7 @@ impl NakamotoUnconfirmedTenureDownloader { let Some(Some(unconfirmed_reward_set)) = current_reward_sets .get(&tenure_rc) - .map(|cycle_info| cycle_info.known_selected_anchor_block()) + .map(|cycle_info| cycle_info.reward_set()) else { warn!( "No signer public keys for unconfirmed tenure {} (rc {})", @@ -717,7 +717,7 @@ impl NakamotoUnconfirmedTenureDownloader { sortdb: &SortitionDB, local_sort_tip: &BlockSnapshot, chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, + current_reward_sets: &BTreeMap, ) -> Result>, NetError> { match &self.state { NakamotoUnconfirmedDownloadState::GetTenureInfo => { From 99bce259cd8221afd2c75214c0a5f8d07280e709 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:57:38 -0400 Subject: [PATCH 0164/1400] chore: remove dead code --- stackslib/src/net/mod.rs | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 1cead0306a..b64a537e81 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2756,6 +2756,7 @@ pub mod test { let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), &mut net_result, + &self.network.burnchain, &mut sortdb, &mut stacks_node.chainstate, &mut mempool, @@ -3884,29 +3885,12 @@ pub mod test { } /// Verify that the sortition DB migration into Nakamoto worked correctly. - /// For now, it's sufficient to check that the `get_last_processed_reward_cycle()` calculation - /// works the same across both the original and migration-compatible implementations. pub fn check_nakamoto_migration(&mut self) { let mut sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); let chainstate = &mut node.chainstate; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - for height in 0..=tip.block_height { - let sns = - SortitionDB::get_all_snapshots_by_burn_height(sortdb.conn(), height).unwrap(); - for sn in sns { - let ih = sortdb.index_handle(&sn.sortition_id); - let highest_processed_rc = ih.get_last_processed_reward_cycle().unwrap(); - let expected_highest_processed_rc = - ih.legacy_get_last_processed_reward_cycle().unwrap(); - assert_eq!( - highest_processed_rc, expected_highest_processed_rc, - "BUG: at burn height {} the highest-processed reward cycles diverge", - height - ); - } - } let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap(); let epoch_3_idx = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap(); From e5d2d4840779d091999c3743eb3241f9b9d5245e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:57:51 -0400 Subject: [PATCH 0165/1400] feat: use the new reward-set loader path in the PeerNetwork to load the Nakamoto reward set from the Nakamoto chainstate instead of the sortition DB. Update the caching logic as well to key each cached reward set by anchor block ID --- stackslib/src/net/p2p.rs | 183 ++++++++++++++++++++------------------- 1 file changed, 93 insertions(+), 90 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index fbb6c375ed..a34c212e69 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -40,13 +40,14 @@ use {mio, url}; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{Address, Burnchain, BurnchainView, PublicKey}; -use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB}; +use crate::chainstate::burn::db::sortdb::{get_ancestor_sort_id, BlockHeaderCache, SortitionDB}; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::{ static_get_canonical_affirmation_map, static_get_heaviest_affirmation_map, - static_get_stacks_tip_affirmation_map, RewardCycleInfo, + static_get_stacks_tip_affirmation_map, OnChainRewardSetProvider, RewardCycleInfo, }; -use crate::chainstate::stacks::boot::MINERS_NAME; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{StacksBlockHeader, MAX_BLOCK_LEN, MAX_TRANSACTION_LEN}; use crate::core::StacksEpoch; @@ -232,6 +233,24 @@ impl ConnectingPeer { } } +#[derive(Clone, Debug, PartialEq)] +pub struct CurrentRewardSet { + pub reward_cycle: u64, + pub reward_cycle_info: RewardCycleInfo, + pub anchor_block_consensus_hash: ConsensusHash, + pub anchor_block_hash: BlockHeaderHash, +} + +impl CurrentRewardSet { + pub fn reward_set(&self) -> Option<&RewardSet> { + self.reward_cycle_info.known_selected_anchor_block() + } + + pub fn anchor_block_id(&self) -> StacksBlockId { + StacksBlockId::new(&self.anchor_block_consensus_hash, &self.anchor_block_hash) + } +} + pub struct PeerNetwork { // constants pub peer_version: u32, @@ -258,16 +277,9 @@ pub struct PeerNetwork { /// In epoch 2.x, this is the same as the tip block ID /// In nakamoto, this is the block ID of the first block in the current tenure pub tenure_start_block_id: StacksBlockId, - /// The reward sets of the current and past reward cycle. + /// The reward sets of the past three reward cycles. /// Needed to validate blocks, which are signed by a threshold of stackers - pub current_reward_sets: BTreeMap, - /// The sortition IDs that began the prepare-phases for given reward cycles. This is used to - /// determine whether or not the reward cycle info in `current_reward_sets` is still valid -- a - /// burnchain fork may invalidate them, so the code must check that the sortition ID for the - /// start of the prepare-phase is still canonical. - /// This needs to be in 1-to-1 correspondence with `current_reward_sets` -- the sortition IDs - /// that make up the values need to correspond to the reward sets computed as of the sortition. - pub current_reward_set_ids: BTreeMap, + pub current_reward_sets: BTreeMap, // information about the state of the network's anchor blocks pub heaviest_affirmation_map: AffirmationMap, @@ -479,7 +491,6 @@ impl PeerNetwork { parent_stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), tenure_start_block_id: StacksBlockId([0x00; 32]), current_reward_sets: BTreeMap::new(), - current_reward_set_ids: BTreeMap::new(), peerdb: peerdb, atlasdb: atlasdb, @@ -5434,38 +5445,10 @@ impl PeerNetwork { } /// Clear out old reward cycles - fn free_old_reward_cycles( - &mut self, - sortdb: &SortitionDB, - tip_sortition_id: &SortitionId, - prev_rc: u64, - ) { + fn free_old_reward_cycles(&mut self, rc: u64) { if self.current_reward_sets.len() > 3 { self.current_reward_sets.retain(|old_rc, _| { - if (*old_rc).saturating_add(1) < prev_rc { - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - } - let Some(old_sortition_id) = self.current_reward_set_ids.get(old_rc) else { - // shouldn't happen - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - }; - let Ok(prepare_phase_sort_id) = sortdb - .get_prepare_phase_start_sortition_id_for_reward_cycle( - &tip_sortition_id, - *old_rc, - ) - else { - self.current_reward_set_ids.remove(old_rc); - test_debug!("Drop reward cycle info for cycle {}", old_rc); - return false; - }; - if prepare_phase_sort_id != *old_sortition_id { - // non-canonical reward cycle info - self.current_reward_set_ids.remove(old_rc); + if (*old_rc).saturating_add(2) < rc { test_debug!("Drop reward cycle info for cycle {}", old_rc); return false; } @@ -5474,10 +5457,11 @@ impl PeerNetwork { } } - /// Refresh our view of the last two reward cycles + /// Refresh our view of the last three reward cycles fn refresh_reward_cycles( &mut self, sortdb: &SortitionDB, + chainstate: &mut StacksChainState, tip_sn: &BlockSnapshot, ) -> Result<(), net_error> { let cur_rc = self @@ -5486,57 +5470,58 @@ impl PeerNetwork { .expect("FATAL: sortition from before system start"); let prev_rc = cur_rc.saturating_sub(1); + let prev_prev_rc = prev_rc.saturating_sub(1); + let ih = sortdb.index_handle(&tip_sn.sortition_id); - // keyed by both rc and sortition ID in case there's a bitcoin fork -- we'd want the - // canonical reward set to be loaded - let cur_rc_sortition_id = sortdb - .get_prepare_phase_start_sortition_id_for_reward_cycle(&tip_sn.sortition_id, cur_rc)?; - let prev_rc_sortition_id = sortdb - .get_prepare_phase_start_sortition_id_for_reward_cycle(&tip_sn.sortition_id, prev_rc)?; - - for (rc, sortition_id) in [ - (prev_rc, prev_rc_sortition_id), - (cur_rc, cur_rc_sortition_id), - ] - .into_iter() - { - if let Some(sort_id) = self.current_reward_set_ids.get(&rc) { - if sort_id == &sortition_id { - continue; - } - } - let Ok((reward_cycle_info, reward_cycle_sort_id)) = sortdb - .get_preprocessed_reward_set_for_reward_cycle(&tip_sn.sortition_id, rc) - .map_err(|e| { - warn!( - "Failed to load reward set for cycle {} ({}): {:?}", - rc, &sortition_id, &e - ); - e - }) + for rc in [cur_rc, prev_rc, prev_prev_rc] { + let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc) + 1; + let Some(ancestor_sort_id) = + get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { - // NOTE: this should never be reached - error!("Unreachable code (but not panicking): no reward cycle info for reward cycle {}", rc); + // reward cycle is too far back for there to be an ancestor continue; }; - if !reward_cycle_info.is_reward_info_known() { - // haven't yet processed the anchor block, so don't store - debug!("Reward cycle info for cycle {} at sortition {} expects the PoX anchor block, so will not cache", rc, &reward_cycle_sort_id); - continue; + let ancestor_ih = sortdb.index_handle(&ancestor_sort_id); + let anchor_hash_opt = ancestor_ih.get_last_anchor_block_hash()?; + + if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { + if let Some(anchor_hash) = anchor_hash_opt.as_ref() { + if cached_rc_info.anchor_block_hash == *anchor_hash { + // cached reward set data is still valid + continue; + } + } } - test_debug!( - "Reward cycle info for cycle {} at sortition {} is {:?}", + let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( rc, - &reward_cycle_sort_id, - &reward_cycle_info - ); - self.current_reward_sets.insert(rc, reward_cycle_info); - self.current_reward_set_ids.insert(rc, reward_cycle_sort_id); - } + &tip_sn.sortition_id, + &self.burnchain, + chainstate, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .map_err(|e| { + warn!( + "Failed to load reward cycle info for cycle {}: {:?}", + rc, &e + ); + e + }) + .unwrap_or(None) else { + continue; + }; + + let rc_info = CurrentRewardSet { + reward_cycle: rc, + reward_cycle_info: reward_set_info, + anchor_block_consensus_hash: anchor_block_header.consensus_hash, + anchor_block_hash: anchor_block_header.anchored_header.block_hash(), + }; - // free memory - self.free_old_reward_cycles(sortdb, &tip_sn.sortition_id, prev_rc); + self.current_reward_sets.insert(rc, rc_info); + } + self.free_old_reward_cycles(cur_rc); Ok(()) } @@ -5560,7 +5545,9 @@ impl PeerNetwork { SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn())?; let burnchain_tip_changed = canonical_sn.block_height != self.chain_view.burn_block_height - || self.num_state_machine_passes == 0; + || self.num_state_machine_passes == 0 + || canonical_sn.sortition_id != self.burnchain_tip.sortition_id; + let stacks_tip_changed = self.stacks_tip != stacks_tip; let new_stacks_tip_block_id = StacksBlockId::new(&stacks_tip.0, &stacks_tip.1); let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash @@ -5568,8 +5555,8 @@ impl PeerNetwork { || burnchain_tip_changed || stacks_tip_changed; - if stacks_tip_changed || burnchain_tip_changed { - self.refresh_reward_cycles(sortdb, &canonical_sn)?; + if burnchain_tip_changed || stacks_tip_changed { + self.refresh_reward_cycles(sortdb, chainstate, &canonical_sn)?; } let mut ret: HashMap> = HashMap::new(); @@ -6789,11 +6776,13 @@ mod test { while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { if let Ok(mut result) = peer_1.step_with_ibd(false) { let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); peer_1 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -6807,11 +6796,13 @@ mod test { if let Ok(mut result) = peer_2.step_with_ibd(false) { let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); peer_2 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -6976,11 +6967,13 @@ mod test { while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { if let Ok(mut result) = peer_1.step_with_ibd(false) { let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); peer_1 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -6994,11 +6987,13 @@ mod test { if let Ok(mut result) = peer_2.step_with_ibd(false) { let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); peer_2 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -7180,11 +7175,13 @@ mod test { while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs / 2 { if let Ok(mut result) = peer_1.step_with_ibd(false) { let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); peer_1 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -7198,11 +7195,13 @@ mod test { if let Ok(mut result) = peer_2.step_with_ibd(false) { let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); peer_2 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -7364,11 +7363,13 @@ mod test { while peer_1_mempool_txs < num_txs || peer_2.network.mempool_sync_txs < (num_txs as u64) { if let Ok(mut result) = peer_1.step_with_ibd(false) { let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); peer_1 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, @@ -7382,11 +7383,13 @@ mod test { if let Ok(mut result) = peer_2.step_with_ibd(false) { let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); peer_2 .with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, From 19c592e48cfd4839fa93917be23520964fe94468 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:58:36 -0400 Subject: [PATCH 0166/1400] feat: use new Nakamoto reward set loading code to fetch the reward set when validating a block to be relayed --- stackslib/src/net/relay.rs | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4db684ca35..a073398f42 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -38,7 +38,10 @@ use crate::chainstate::burn::db::sortdb::{ }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::comm::CoordinatorChannels; -use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::coordinator::{ + BlockEventDispatcher, Error as CoordinatorError, OnChainRewardSetProvider, +}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt, StacksHeaderInfo}; @@ -655,6 +658,7 @@ impl Relayer { /// downloaded by us, or pushed via p2p. /// Return Ok(true) if we stored it, Ok(false) if we didn't pub fn process_new_nakamoto_block( + burnchain: &Burnchain, sortdb: &SortitionDB, sort_handle: &mut SortitionHandleConn, chainstate: &mut StacksChainState, @@ -725,15 +729,35 @@ impl Relayer { let config = chainstate.config(); let tip = block_sn.sortition_id; - let reward_info = match sortdb.get_preprocessed_reward_set_of(&tip) { - Ok(x) => x, - Err(db_error::NotFoundError) => { + let reward_info = match load_nakamoto_reward_set( + block_sn.block_height, + &tip, + burnchain, + chainstate, + sortdb, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, ..))) => reward_info, + Ok(None) => { error!("No RewardCycleInfo found for tip {}", tip); return Err(chainstate_error::PoxNoRewardCycle); } - Err(e) => { + Err(CoordinatorError::DBError(db_error::NotFoundError)) => { + error!("No RewardCycleInfo found for tip {}", tip); + return Err(chainstate_error::PoxNoRewardCycle); + } + Err(CoordinatorError::ChainstateError(e)) => { + error!("No RewardCycleInfo loaded for tip {}: {:?}", tip, &e); + return Err(e); + } + Err(CoordinatorError::DBError(e)) => { + error!("No RewardCycleInfo loaded for tip {}: {:?}", tip, &e); return Err(chainstate_error::DBError(e)); } + Err(e) => { + error!("Failed to load RewardCycleInfo for tip {}: {:?}", tip, &e); + return Err(chainstate_error::PoxNoRewardCycle); + } }; let reward_cycle = reward_info.reward_cycle; @@ -769,6 +793,7 @@ impl Relayer { /// Process nakamoto blocks. /// Log errors but do not return them. pub fn process_nakamoto_blocks( + burnchain: &Burnchain, sortdb: &SortitionDB, chainstate: &mut StacksChainState, blocks: impl Iterator, @@ -779,6 +804,7 @@ impl Relayer { for block in blocks { let block_id = block.block_id(); if let Err(e) = Self::process_new_nakamoto_block( + burnchain, sortdb, &mut sort_handle, chainstate, @@ -2028,6 +2054,7 @@ impl Relayer { &mut self, _local_peer: &LocalPeer, network_result: &mut NetworkResult, + burnchain: &Burnchain, sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, mempool: &mut MemPoolDB, @@ -2121,6 +2148,7 @@ impl Relayer { let nakamoto_blocks = std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); if let Err(e) = Relayer::process_nakamoto_blocks( + burnchain, sortdb, chainstate, nakamoto_blocks.into_values(), From 411c3c218cf2d50e4a460ca632bd4dac50490839 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:58:56 -0400 Subject: [PATCH 0167/1400] chore: API sync --- stackslib/src/net/tests/download/epoch2x.rs | 2 ++ stackslib/src/net/tests/download/nakamoto.rs | 10 ++++++++++ stackslib/src/net/tests/mod.rs | 1 + 3 files changed, 13 insertions(+) diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 5e9ea0daf2..1f7a266596 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -329,10 +329,12 @@ where let mut result = peer.step_dns(&mut dns_clients[i]).unwrap(); let lp = peer.network.local_peer.clone(); + let burnchain = peer.network.burnchain.clone(); peer.with_db_state(|sortdb, chainstate, relayer, mempool| { relayer.process_network_result( &lp, &mut result, + &burnchain, sortdb, chainstate, mempool, diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 47dabd176e..62d2fdc0bb 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -448,6 +448,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -456,6 +457,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -523,6 +525,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -531,6 +534,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -624,6 +628,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -632,6 +637,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -724,6 +730,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -732,6 +739,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -803,6 +811,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -811,6 +820,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 5e2cb3e6cc..9e225d8f0d 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -230,6 +230,7 @@ impl NakamotoBootPlan { for block in blocks { let block_id = block.block_id(); let accepted = Relayer::process_new_nakamoto_block( + &peer.network.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, From 41339d1574bd85886d95fdb07bfa573542c733fe Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:59:09 -0400 Subject: [PATCH 0168/1400] fix: load the Nakamoto reward set from Nakamoto chainstate --- .../stacks-node/src/nakamoto_node/miner.rs | 86 +++++++++++++++++-- 1 file changed, 80 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ce0c417704..69b04c3a53 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -25,6 +25,8 @@ use libsigner::v1::messages::{MessageSlotID, SignerMessage}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::coordinator::OnChainRewardSetProvider; +use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; @@ -277,6 +279,13 @@ impl BlockMinerThread { }) })?; + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + let reward_cycle = self .burnchain .pox_constants @@ -290,8 +299,20 @@ impl BlockMinerThread { ) })?; - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(x) => x, + let reward_info = match load_nakamoto_reward_set( + tip.block_height, + &tip.sortition_id, + &self.burnchain, + &mut chain_state, + &sort_db, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, _))) => reward_info, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set stored yet. Cannot mine!".into(), + )); + } Err(e) => { return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" @@ -373,8 +394,27 @@ impl BlockMinerThread { ) .expect("FATAL: building on a burn block that is before the first burn block"); - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(x) => x, + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + + let reward_info = match load_nakamoto_reward_set( + tip.block_height, + &tip.sortition_id, + &self.burnchain, + &mut chain_state, + &sort_db, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, _))) => reward_info, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set stored yet. Cannot mine!".into(), + )); + } Err(e) => { return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" @@ -839,8 +879,42 @@ impl BlockMinerThread { let signer_transactions = self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; - let signer_bitvec_len = - &burn_db.get_preprocessed_reward_set_size(&self.burn_block.sortition_id); + let tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::DBError(e)))?; + + let reward_info = match load_nakamoto_reward_set( + tip.block_height, + &tip.sortition_id, + &self.burnchain, + &mut chain_state, + &burn_db, + &OnChainRewardSetProvider::new(), + ) { + Ok(Some((reward_info, _))) => reward_info, + Ok(None) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "No reward set stored yet. Cannot mine!".into(), + )); + } + Err(e) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" + ))); + } + }; + + let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Current reward cycle did not select a reward set. Cannot mine!".into(), + )); + }; + let signer_bitvec_len = reward_set + .signers + .as_ref() + .map(|x| x.len()) + .unwrap_or(0) + .try_into() + .ok(); // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( From 8508840eeb963a2334f4b19a126c8687c98747ed Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:59:38 -0400 Subject: [PATCH 0169/1400] chore: API sync --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index fc4ca1ae0d..b7cc1bc4f1 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -258,6 +258,7 @@ impl RelayerThread { .process_network_result( &self.local_peer, &mut net_result, + &self.burnchain, &mut self.sortdb, &mut self.chainstate, &mut self.mempool, @@ -416,11 +417,16 @@ impl RelayerThread { .unwrap_or_else(|| VRFProof::empty()); // let's figure out the recipient set! - let recipients = get_nakamoto_next_recipients(&sort_tip, &mut self.sortdb, &self.burnchain) - .map_err(|e| { - error!("Relayer: Failure fetching recipient set: {:?}", e); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; + let recipients = get_nakamoto_next_recipients( + &sort_tip, + &mut self.sortdb, + &mut self.chainstate, + &self.burnchain, + ) + .map_err(|e| { + error!("Relayer: Failure fetching recipient set: {:?}", e); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; let block_header = NakamotoChainState::get_block_header_by_consensus_hash(self.chainstate.db(), target_ch) From 32a1cd733d86bfa9e97981c21c2bca8fd1141012 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 10:59:49 -0400 Subject: [PATCH 0170/1400] chore: API sync --- testnet/stacks-node/src/neon_node.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b6ac17e51e..48086c190b 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2727,6 +2727,7 @@ impl RelayerThread { .process_network_result( &relayer_thread.local_peer, &mut net_result, + &relayer_thread.burnchain, sortdb, chainstate, mempool, From b1a68b780a029868be35bdba2e0508a5c251fb95 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 11:00:01 -0400 Subject: [PATCH 0171/1400] chore: update Nakamoto integration test infrastructure to fetch the Nakamoto reward set from Nakamoto chainstate --- .../src/tests/nakamoto_integrations.rs | 28 +++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3c7e422e8d..2da7444c37 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -37,6 +37,8 @@ use stacks::chainstate::burn::operations::{ BlockstackOperationType, PreStxOp, StackStxOp, VoteForAggregateKeyOp, }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; +use stacks::chainstate::coordinator::OnChainRewardSetProvider; +use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder; use stacks::chainstate::nakamoto::test_signers::TestSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; @@ -362,13 +364,29 @@ pub fn read_and_sign_block_proposal( ) -> Result { let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let reward_set = sortdb - .get_preprocessed_reward_set_of(&tip.sortition_id) - .expect("Failed to get reward cycle info") - .known_selected_anchor_block_owned() - .expect("Expected a reward set"); + let reward_set = load_nakamoto_reward_set( + tip.block_height, + &tip.sortition_id, + &burnchain, + &mut chainstate, + &sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to query reward set") + .expect("No reward set calculated") + .0 + .known_selected_anchor_block_owned() + .expect("Expected a reward set"); let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); From cee39a162674b80ff6a5691b2447ff3161d73e9b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 11:33:15 -0400 Subject: [PATCH 0172/1400] fix: handle unconfirmed blocks correctly --- stackslib/src/chainstate/burn/db/sortdb.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index bba8257cbe..c698a1b7a0 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2658,7 +2658,16 @@ impl SortitionDB { chainstate: &StacksChainState, stacks_block_id: &StacksBlockId, ) -> Result, db_error> { - let header = match NakamotoChainState::get_block_header(chainstate.db(), stacks_block_id) { + let lookup_block_id = if let Some(ref unconfirmed_state) = chainstate.unconfirmed_state { + if &unconfirmed_state.unconfirmed_chain_tip == stacks_block_id { + &unconfirmed_state.confirmed_chain_tip + } else { + stacks_block_id + } + } else { + stacks_block_id + }; + let header = match NakamotoChainState::get_block_header(chainstate.db(), lookup_block_id) { Ok(Some(x)) => x, x => { debug!("Failed to get block header: {:?}", x); From 9a4f7752042bf40d42a3e50c2eee33f77830f817 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 30 May 2024 18:37:44 +0300 Subject: [PATCH 0173/1400] Add unhappy path cases for `StackExtendSigCommand_Err` This commit: - adds 5 unhappy path cases for the `stack-increase` PoX-4 method, called using a signature. - adds the command run tracking inside the `check` method. - adds the expected `stack-extend` PoX-4 errors to the `POX_4_ERRORS` dictionary. --- .../tests/pox-4/err_Commands.ts | 271 +++++++++++++++++- .../pox-4/pox_StackExtendSigCommand_Err.ts | 121 ++++++++ 2 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index e51283f3d1..27264b04fb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -16,13 +16,21 @@ import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCom import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; -import { currentCycleFirstBlock, nextCycleFirstBlock } from "./pox_Commands"; +import { + currentCycle, + currentCycleFirstBlock, + FIRST_BURNCHAIN_BLOCK_HEIGHT, + nextCycleFirstBlock, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; +import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, + ERR_STACKING_INVALID_LOCK_PERIOD: 2, ERR_STACKING_ALREADY_STACKED: 3, ERR_STACKING_NO_SUCH_PRINCIPAL: 4, ERR_STACKING_PERMISSION_DENIED: 9, @@ -30,6 +38,7 @@ const POX_4_ERRORS = { ERR_STACKING_INVALID_AMOUNT: 18, ERR_STACKING_ALREADY_DELEGATED: 20, ERR_DELEGATION_TOO_MUCH_LOCKED: 22, + ERR_STACK_EXTEND_NOT_LOCKED: 26, ERR_STACKING_IS_DELEGATED: 30, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -1146,6 +1155,266 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, ), ), + // StackExtendSigCommand_Err_Stacking_Is_Delegated_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Is_Delegated_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Is_Delegated_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + !(stacker.poolMembers.length === 0) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Is_Delegated_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer(), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + !(totalPeriod <= 12) + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // StackExtendSigCommand_Err_Stack_Extend_Not_Locked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + !stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + !(stacker.amountLocked > 0) && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts new file mode 100644 index 0000000000..185f2796d1 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts @@ -0,0 +1,121 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl } from "@stacks/transactions"; +import { expect } from "vitest"; + +type CheckFunc = ( + this: StackExtendSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackExtendSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackExtendSigCommand` to lock uSTX for stacking. + * + * This command calls `stack-extend` using a `signature`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.extendCount, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackExtend, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: stacker.amountLocked, + }); + + const stackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(stackExtend.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-extend-sig", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend sig extend-count ${this.extendCount}`; + } +} From eaf9274f31a19bb3e33eeb5ac165535ca6f7c717 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 30 May 2024 18:52:05 +0300 Subject: [PATCH 0174/1400] Add unhappy path cases for `StackExtendAuthCommand_Err` This commit: - adds 5 unhappy path cases for the `stack-extend` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 261 ++++++++++++++++++ .../pox-4/pox_StackExtendAuthCommand_Err.ts | 123 +++++++++ 2 files changed, 384 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 27264b04fb..83a9566605 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -27,6 +27,7 @@ import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; +import { StackExtendAuthCommand_Err } from "./pox_StackExtendAuthCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, @@ -1415,6 +1416,266 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, ), ), + // StackExtendAuthCommand_Err_Stacking_Is_Delegated_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Is_Delegated_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Is_Delegated_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + !(stacker.poolMembers.length === 0) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Is_Delegated_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer(), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + !(totalPeriod <= 12) + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // StackExtendAuthCommand_Err_Stack_Extend_Not_Locked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + !stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + !(stacker.amountLocked > 0) && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts new file mode 100644 index 0000000000..e3deed040c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts @@ -0,0 +1,123 @@ +import { poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl } from "@stacks/transactions"; +import { expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackExtendAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackExtendAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackExtendAuthCommand` to lock uSTX for stacking. + * + * This command calls `stack-extend` using an `authorization`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.extendCount), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-extend"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-extend-auth", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend auth extend-count ${this.extendCount}`; + } +} From a84819ab1812791b86f1fa0ae4b6563fe26b20eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 30 May 2024 13:11:46 -0400 Subject: [PATCH 0175/1400] WIP: add a test for continue tenure Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 37 +-- .../src/tests/nakamoto_integrations.rs | 249 ++++++++++++++++++ 2 files changed, 270 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index e25b0c7aa0..2bb83c8db2 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -662,7 +662,16 @@ impl RelayerThread { error!("Relayer: failed to get last sortition snapshot: {e:?}"); NakamotoNodeError::SnapshotNotFoundForChainTip })?; - let Some(block_header) = NakamotoChainState::get_block_header_by_consensus_hash( + + if Some(block_snapshot.winning_block_txid) != self.current_mining_commit_tx { + debug!("Relayer: the miner did not win the last sortition. No tenure to continue."; + "current_mining_commit_tx" => %self.current_mining_commit_tx.unwrap_or(Txid([0u8; 32])), + "block_snapshot_winning_block_txid" => %block_snapshot.winning_block_txid + ); + return Ok(()); + }; + + let block_header = NakamotoChainState::get_block_header_by_consensus_hash( self.chainstate.db(), &block_snapshot.consensus_hash, ) @@ -670,17 +679,12 @@ impl RelayerThread { error!("Relayer: failed to get block header for the last sortition snapshsot: {e:?}"); NakamotoNodeError::MissingTenureStartBlockHeader })? - else { - error!("Relayer: failed to get block header for the last sortition snapshsot"); - return Err(NakamotoNodeError::MissingTenureStartBlockHeader); - }; - - if Some(block_snapshot.winning_block_txid) != self.current_mining_commit_tx { - debug!("Relayer: the miner did not win the last sortition. No tenure to continue."); - return Ok(()); - }; + .ok_or_else(|| { + error!("Relayer: failed to find block header for the last sortition snapshsot"); + NakamotoNodeError::MissingTenureStartBlockHeader + })?; - let Some(last_parent_tenure_header) = + let last_parent_tenure_header = NakamotoChainState::get_nakamoto_tenure_finish_block_header( self.chainstate.db(), &block_header.consensus_hash, @@ -689,10 +693,11 @@ impl RelayerThread { error!("Relayer: failed to get last block of parent tenure: {e:?}"); NakamotoNodeError::ParentNotFound })? - else { - warn!("Failed loading last block of parent tenure"; "consensus_hash" => %block_header.consensus_hash); - return Err(NakamotoNodeError::ParentNotFound); - }; + .ok_or_else(|| { + error!("Relayer: failed to find block header for parent tenure"); + NakamotoNodeError::ParentNotFound + })?; + let parent_tenure_info = ParentTenureInfo { parent_tenure_blocks: 1 + last_parent_tenure_header.stacks_block_height - block_header.stacks_block_height, @@ -744,7 +749,7 @@ impl RelayerThread { MinerDirective::ContinueTenure { new_burn_view } => { match self.continue_tenure(new_burn_view) { Ok(()) => { - debug!("Relayer: handled continue tenure."); + debug!("Relayer: successfully handled continue tenure."); } Err(e) => { error!("Relayer: Failed to continue tenure: {:?}", e); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 55eb6753bf..d1d9f5ebcb 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3895,3 +3895,252 @@ fn check_block_heights() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * The final chain tip is a nakamoto block +fn continue_tenure_extend() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt * 2 + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + Some(&signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + // query for prometheus metrics + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + assert!(res.contains(&expected_result)); + } + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // Mine a regular nakamoto tenures + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + + next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + // Mine 15 more nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + // assert that the transfer tx was observed + let transfer_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + + // make sure prometheus returns an updated height + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); + assert!(res.contains(&expected_result)); + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From 6e5e35706275e1b30633bba0b5a44791509f1b39 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 14:09:46 -0400 Subject: [PATCH 0176/1400] fix: update `supports_epoch` for version `5` This should have been included in #4812 but was overlooked. The fix solves the issue with the various Nakamoto integration tests. --- stackslib/src/chainstate/stacks/db/mod.rs | 40 +++++++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index f10a87dccc..865758ed01 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -294,16 +294,32 @@ impl DBConfig { || self.version == "2" || self.version == "3" || self.version == "4" + || self.version == "5" } StacksEpochId::Epoch2_05 => { - self.version == "2" || self.version == "3" || self.version == "4" + self.version == "2" + || self.version == "3" + || self.version == "4" + || self.version == "5" + } + StacksEpochId::Epoch21 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch22 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch23 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch24 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch25 => { + self.version == "3" || self.version == "4" || self.version == "5" + } + StacksEpochId::Epoch30 => { + self.version == "3" || self.version == "4" || self.version == "5" } - StacksEpochId::Epoch21 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch23 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch24 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch25 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch30 => self.version == "3" || self.version == "4", } } } @@ -2933,4 +2949,14 @@ pub mod test { MAINNET_2_0_GENESIS_ROOT_HASH ); } + + #[test] + fn latest_db_version_supports_latest_epoch() { + let db = DBConfig { + version: CHAINSTATE_VERSION.to_string(), + mainnet: true, + chain_id: CHAIN_ID_MAINNET, + }; + assert!(db.supports_epoch(StacksEpochId::latest())); + } } From e9685305e43e6b6d41d1e32e8d4f79e3613f3f97 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 30 May 2024 11:25:52 -0700 Subject: [PATCH 0177/1400] feat: additional checks for invalid signatures, duplicates, etc --- .../src/nakamoto_node/sign_coordinator.rs | 31 +++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 0db0ee9e04..149eb84cbf 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -30,6 +30,8 @@ use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; +use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; @@ -732,9 +734,32 @@ impl SignCoordinator { "Signer entry not found".into(), )); }; - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); + let Ok(signer_pubkey) = + StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) + else { + warn!("Got invalid signature from a signer. Ignoring."); + continue; + }; + if !valid_sig { + warn!( + "Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } debug!("SignCoordinator: Total weight signed: {total_weight_signed}"); gathered_signatures.insert(slot_id, signature); } From 634c8663b56e5037feb957f60d361425a77d3921 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 14:30:39 -0400 Subject: [PATCH 0178/1400] test: add simple unit test for `SortitionDB` Ensures that the latest DB version supports the latest epoch. --- stackslib/src/chainstate/burn/db/sortdb.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e3802d6ec1..dc1e65f28d 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -10830,4 +10830,12 @@ pub mod tests { let db_epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap(); assert_eq!(db_epochs, STACKS_EPOCHS_MAINNET.to_vec()); } + + #[test] + fn latest_db_version_supports_latest_epoch() { + assert!(SortitionDB::is_db_version_supported_in_epoch( + StacksEpochId::latest(), + SORTITION_DB_VERSION + )); + } } From 803cf7ba86c9aea01828ea23a3607b46c482cb8e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 15:55:44 -0400 Subject: [PATCH 0179/1400] chore: improve tip used for index handles in `net` --- stackslib/src/net/mod.rs | 9 +++++-- stackslib/src/net/relay.rs | 52 ++++++++++++++++++++++---------------- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index d270e396f9..ed456e30f4 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3532,7 +3532,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), block_txs, ) .unwrap(); @@ -3743,7 +3743,12 @@ pub mod test { |mut builder, ref mut miner, ref sortdb| { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path, None).unwrap(); - let sort_iconn = sortdb.index_handle_at_tip(); + let sort_iconn = sortdb + .index_handle_at_block( + &miner_chainstate, + &builder.chain_tip.index_block_hash(), + ) + .unwrap(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index fa11b575d4..eefb2cd3dd 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -4047,21 +4047,20 @@ pub mod test { let chain_tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let iconn = sortdb + .index_handle_at_block(&stacks_node.chainstate, &chain_tip) + .unwrap(); let cur_nonce = stacks_node .chainstate - .with_read_only_clarity_tx( - &sortdb.index_handle_at_tip(), - &chain_tip, - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_nonce( - &spending_account.origin_address().unwrap().into(), - ) - .unwrap() - }) - }, - ) + .with_read_only_clarity_tx(&iconn, &chain_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce( + &spending_account.origin_address().unwrap().into(), + ) + .unwrap() + }) + }) .unwrap(); test_debug!( @@ -5425,7 +5424,7 @@ pub mod test { let block = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx.clone()], ) .unwrap() @@ -5492,7 +5491,7 @@ pub mod test { StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx.clone(), bad_tx.clone()], ) { @@ -5514,7 +5513,7 @@ pub mod test { let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( block_builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx.clone()], ) .unwrap(); @@ -5531,7 +5530,9 @@ pub mod test { let merkle_tree = MerkleTree::::new(&txid_vecs); bad_block.header.tx_merkle_root = merkle_tree.root(); - let sort_ic = sortdb.index_handle_at_tip(); + let sort_ic = sortdb + .index_handle_at_block(chainstate, &parent_index_hash) + .unwrap(); chainstate .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) .unwrap(); @@ -5816,7 +5817,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx], ) .unwrap(); @@ -5994,7 +5995,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx, versioned_contract], ) .unwrap(); @@ -6181,7 +6182,7 @@ pub mod test { let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( builder, chainstate, - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&tip.sortition_id), vec![coinbase_tx], ) .unwrap(); @@ -6220,8 +6221,12 @@ pub mod test { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + let snapshot = + SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &consensus_hash) + .unwrap() + .unwrap(); match node.chainstate.will_admit_mempool_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&snapshot.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, @@ -6270,8 +6275,11 @@ pub mod test { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + let snapshot = SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &consensus_hash) + .unwrap() + .unwrap(); match node.chainstate.will_admit_mempool_tx( - &sortdb.index_handle_at_tip(), + &sortdb.index_handle(&snapshot.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, From 01b18ebbea2a9c0dc16b01640f75d6c72199c5fd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 16:39:43 -0400 Subject: [PATCH 0180/1400] fix: tip selection in net tests --- stackslib/src/net/mod.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index ed456e30f4..7d05bd3e12 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3743,12 +3743,7 @@ pub mod test { |mut builder, ref mut miner, ref sortdb| { let (mut miner_chainstate, _) = StacksChainState::open(false, network_id, &chainstate_path, None).unwrap(); - let sort_iconn = sortdb - .index_handle_at_block( - &miner_chainstate, - &builder.chain_tip.index_block_hash(), - ) - .unwrap(); + let sort_iconn = sortdb.index_handle_at_tip(); let mut miner_epoch_info = builder .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) From af000af2f52bbbac698778f8d319dfc9b0aa308d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 30 May 2024 15:45:55 -0500 Subject: [PATCH 0181/1400] use network.burnchain_tip for QuerySpecifier::Latest --- stackslib/src/net/api/getsortition.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index ed084a084d..1e2551eb83 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -194,10 +194,10 @@ impl RPCRequestHandler for GetSortitionHandler { node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { let result = - node.with_node_state(|_network, sortdb, _chainstate, _mempool, _rpc_args| { + node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { let query_result = match self.query { QuerySpecifier::Latest => { - SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map(Some) + Ok(Some(network.burnchain_tip.clone())) }, QuerySpecifier::ConsensusHash(ref consensus_hash) => { SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) From 49787aec53eadc465e5efcc3c905e2971117a5a8 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 30 May 2024 19:27:13 -0400 Subject: [PATCH 0182/1400] fix: correct tip usage for sortition handle --- testnet/stacks-node/src/nakamoto_node/miner.rs | 3 +-- testnet/stacks-node/src/run_loop/helium.rs | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 40799bafa9..3ebb12fd9f 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -481,8 +481,7 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = - sort_db.index_handle_at_block(&chain_state, &block.block_id())?; + let mut sortition_handle = sort_db.index_handle_at_tip(); let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index 2db7a3a090..2922ce584a 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -223,7 +223,9 @@ impl RunLoop { &burnchain_tip, &chain_tip, &mut self.node.chain_state, - &burnchain.sortdb_ref().index_handle_at_tip(), + &burnchain + .sortdb_ref() + .index_handle(&burnchain_tip.block_snapshot.sortition_id), ); } }; From f963b354ed002e7c8ce8f34ef0dd6e74a333e6e1 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 30 May 2024 16:36:13 -0700 Subject: [PATCH 0183/1400] crc: helper method for RewardSet total weight --- stackslib/src/chainstate/nakamoto/mod.rs | 7 +++--- stackslib/src/chainstate/stacks/boot/mod.rs | 17 ++++++++++++++ .../src/nakamoto_node/sign_coordinator.rs | 22 ++++++++++--------- 3 files changed, 32 insertions(+), 14 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 33f6ff2109..d92b373bdd 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -516,10 +516,9 @@ impl NakamotoBlockHeader { // `last_index` is used to prevent out-of-order signatures let mut last_index = None; - let total_weight = signers.iter().map(|s| s.weight).fold(0, |w, acc| { - acc.checked_add(w) - .expect("FATAL: Total signer weight > u32::MAX") - }); + let total_weight = reward_set + .total_signing_weight() + .map_err(|_| ChainstateError::NoRegisteredSigners(0))?; // HashMap of let signers_by_pk: HashMap<_, _> = signers diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 01ca39be4a..e42f1a0dfa 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -277,6 +277,23 @@ impl RewardSet { pub fn metadata_deserialize(from: &str) -> Result { serde_json::from_str(from).map_err(|e| e.to_string()) } + + /// Return the total `weight` of all signers in the reward set. + /// If there are no reward set signers, a ChainstateError is returned. + pub fn total_signing_weight(&self) -> Result { + let Some(ref reward_set_signers) = self.signers else { + return Err(format!( + "Unable to calculate total weight - No signers in reward set" + )); + }; + Ok(reward_set_signers + .iter() + .map(|s| s.weight) + .fold(0, |s, acc| { + acc.checked_add(s) + .expect("FATAL: Total signer weight > u32::MAX") + })) + } } impl RewardSetData { diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 149eb84cbf..078a73590a 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -254,15 +254,10 @@ impl SignCoordinator { ..Default::default() }; - let total_weight = - reward_set_signers - .iter() - .cloned() - .map(|s| s.weight) - .fold(0, |w, acc| { - acc.checked_add(w) - .expect("FATAL: Total signer weight > u32::MAX") - }); + let total_weight = reward_set.total_signing_weight().map_err(|e| { + warn!("Failed to calculate total weight for the reward set: {e:?}"); + ChainstateError::NoRegisteredSigners(0) + })?; let threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; @@ -760,7 +755,14 @@ impl SignCoordinator { .checked_add(signer_entry.weight) .expect("FATAL: total weight signed exceeds u32::MAX"); } - debug!("SignCoordinator: Total weight signed: {total_weight_signed}"); + debug!("Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + // "signer_weight" => signer_entry.weight // commented due to max size of `debug!` + "total_weight_signed" => total_weight_signed, + ); gathered_signatures.insert(slot_id, signature); } SignerMessageV0::BlockResponse(BlockResponse::Rejected(_)) => { From 4c35571c4cabac9889bf7c9b358d31e0d484380e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 30 May 2024 16:44:07 -0700 Subject: [PATCH 0184/1400] fix: add missing comma to debug metadata --- testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 078a73590a..a0be82f06e 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -760,7 +760,7 @@ impl SignCoordinator { "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, "signature" => %signature, - // "signer_weight" => signer_entry.weight // commented due to max size of `debug!` + "signer_weight" => signer_entry.weight, "total_weight_signed" => total_weight_signed, ); gathered_signatures.insert(slot_id, signature); From e6893196861c615536a42607fd0cf6e81726135d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 21:59:41 -0400 Subject: [PATCH 0185/1400] chore: replace next_reward_cycle() with pox_reward_cycle() --- stackslib/src/burnchains/burnchain.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 0247a54512..52fef1a4f7 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -551,16 +551,18 @@ impl Burnchain { .reward_cycle_to_block_height(self.first_block_height, reward_cycle) } - pub fn next_reward_cycle(&self, block_height: u64) -> Option { + /// Compute the reward cycle ID of the PoX reward set which is active as of this burn_height. + /// The reward set is calculated at reward cycle index 1, so if this block height is at or after + /// reward cycle index 1, then this behaves like `block_height_to_reward_cycle()`. However, + /// if it's reward cycle index is 0, then it belongs to the previous reward cycle. + pub fn pox_reward_cycle(&self, block_height: u64) -> Option { let cycle = self.block_height_to_reward_cycle(block_height)?; let effective_height = block_height.checked_sub(self.first_block_height)?; - let next_bump = if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 - { - 0 + if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 { + Some(cycle.saturating_sub(1)) } else { - 1 - }; - Some(cycle + next_bump) + Some(cycle) + } } pub fn block_height_to_reward_cycle(&self, block_height: u64) -> Option { From 243dcd785d27b7636c68f3611313c46394ddc0c9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:00:10 -0400 Subject: [PATCH 0186/1400] fix: off-by-one error returned the wrong reward cycle --- stackslib/src/chainstate/burn/db/sortdb.rs | 23 ++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 4e8dd50f1e..3ee746971f 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3212,11 +3212,18 @@ impl SortitionDB { ) -> Result<(), db_error> { let pox_constants = self.pox_constants.clone(); for rc in 0..=(canonical_tip.block_height / u64::from(pox_constants.reward_cycle_length)) { - if pox_constants.reward_cycle_to_block_height(self.first_block_height, rc) - > canonical_tip.block_height - { + let rc_start = pox_constants.reward_cycle_to_block_height(self.first_block_height, rc); + if rc_start > canonical_tip.block_height { break; } + let epoch_at_height = SortitionDB::get_stacks_epoch(self.conn(), rc_start)? + .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", rc_start)) + .epoch_id; + + if epoch_at_height >= StacksEpochId::Epoch30 { + break; + } + info!("Regenerating reward set for cycle {}", &rc); migrator.regenerate_reward_cycle_info(self, rc)?; } @@ -3434,13 +3441,13 @@ impl SortitionDB { /// Store a pre-processed reward set. /// `sortition_id` is the first sortition ID of the prepare phase. - /// No-op if the reward set is empty. + /// No-op if the reward set has a selected-and-unknown anchor block. pub fn store_preprocessed_reward_set( sort_tx: &mut DBTx, sortition_id: &SortitionId, rc_info: &RewardCycleInfo, ) -> Result<(), db_error> { - if rc_info.known_selected_anchor_block().is_none() { + if !rc_info.is_reward_info_known() { return Ok(()); } let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; @@ -3777,12 +3784,8 @@ impl<'a> SortitionDBConn<'a> { db_error::NotFoundError })?; - // NOTE: the .saturating_sub(1) is necessary because the reward set is calculated in epoch - // 2.5 and lower at reward cycle index 1, not 0. This correction ensures that the last - // block is checked against the signers who were active just before the new reward set is - // calculated. let reward_cycle_id = pox_constants - .block_height_to_reward_cycle(first_block_height, tip_sn.block_height.saturating_sub(1)) + .block_height_to_reward_cycle(first_block_height, tip_sn.block_height) .expect("FATAL: stored snapshot with block height < first_block_height"); self.get_preprocessed_reward_set_for_reward_cycle( From 84b6f2ecb532a0dc8666eb56737264c9630f94e2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:00:26 -0400 Subject: [PATCH 0187/1400] chore: fmt --- stackslib/src/chainstate/coordinator/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 973dd83b53..b3e170987e 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -752,6 +752,7 @@ pub fn get_reward_cycle_info( ) -> Result, Error> { let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); + if !burnchain.is_reward_cycle_start(burn_height) { return Ok(None); } @@ -3531,6 +3532,7 @@ impl SortitionDBMigrator { .pox_constants .reward_cycle_to_block_height(sort_db.first_block_height, reward_cycle) .saturating_sub(1); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; let ancestor_sn = { From 480c59ac4f01b0e49c81c9b94fba75da71614289 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:00:45 -0400 Subject: [PATCH 0188/1400] chore: fix off-by-one errors by basing the reward set retrieval logic on the *given* block height's reward cycle --- .../chainstate/nakamoto/coordinator/mod.rs | 87 ++++++++----------- 1 file changed, 37 insertions(+), 50 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index df4966da49..f703a23486 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -28,7 +28,7 @@ use stacks_common::types::{StacksEpoch, StacksEpochId}; use crate::burnchains::db::{BurnchainBlockData, BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{Burnchain, BurnchainBlockHeader}; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{get_ancestor_sort_id, SortitionDB}; use crate::chainstate::burn::operations::leader_block_commit::RewardSetInfo; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::comm::{ @@ -200,9 +200,13 @@ fn find_prepare_phase_sortitions( Ok(sns) } -/// Try to get the reward cycle information for a Nakamoto reward cycle. +/// Try to get the reward cycle information for a Nakamoto reward cycle, identified by the +/// burn_height. The reward cycle info returned will be from the reward cycle that is active as of +/// `burn_height`. `sortition_tip` can be any sortition ID that's at a higher height than +/// `burn_height`. +/// /// In Nakamoto, the PoX anchor block for reward cycle _R_ is the _first_ Stacks block mined in the -/// _last_ tenure of _R - 1_'s reward phase phase (i.e. which takes place toward the end of reward cycle). +/// _last_ tenure of _R - 1_'s reward phase (i.e. which takes place toward the end of reward cycle). /// The reason it must be this way is because its hash will be in the block-commit for the first /// prepare-phase tenure of cycle _R_ (which is required for the PoX ancestry query in the /// block-commit validation logic). @@ -230,13 +234,9 @@ pub fn get_nakamoto_reward_cycle_info( "FATAL: called a nakamoto function outside of epoch 3" ); - if !burnchain.is_in_prepare_phase(burn_height) { - return Err(Error::NotInPreparePhase); - } - // calculating the reward set for the _next_ reward cycle let reward_cycle = burnchain - .next_reward_cycle(burn_height) + .pox_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); debug!("Processing reward set for Nakamoto reward cycle"; @@ -275,7 +275,8 @@ pub fn get_nakamoto_reward_cycle_info( return Ok(Some(rc_info)); } -/// Helper to get the Nakamoto reward set for a given reward cycle. +/// Helper to get the Nakamoto reward set for a given reward cycle, identified by `burn_height`. +/// /// In all but the first Nakamoto reward cycle, this will load up the stored reward set from the /// Nakamoto chain state. In the first Nakamoto reward cycle, where the reward set is computed /// from epoch2 state, the reward set will be loaded from the sortition DB (which is the only place @@ -295,12 +296,20 @@ pub fn load_nakamoto_reward_set( let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); - // calculating the reward set for the _next_ reward cycle let reward_cycle = burnchain - .next_reward_cycle(burn_height) + .pox_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); - let reward_start_height = burnchain.reward_cycle_to_block_height(reward_cycle); + let prepare_end_height = burnchain + .reward_cycle_to_block_height(reward_cycle) + .saturating_sub(1); + let Some(prepare_end_sortition_id) = + get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, prepare_end_height, sortition_tip); + return Ok(None); + }; // Find the first Stacks block in this reward cycle's preceding prepare phase. // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. @@ -308,7 +317,7 @@ pub fn load_nakamoto_reward_set( // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). let first_epoch30_reward_cycle = burnchain - .next_reward_cycle(epoch_at_height.start_height) + .pox_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); if epoch_at_height.epoch_id < StacksEpochId::Epoch30 @@ -319,14 +328,15 @@ pub fn load_nakamoto_reward_set( // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. if let Ok(persisted_reward_cycle_info) = - sort_db.get_preprocessed_reward_set_of(sortition_tip) + sort_db.get_preprocessed_reward_set_of(&prepare_end_sortition_id) { if persisted_reward_cycle_info .known_selected_anchor_block() .is_none() { debug!("No reward set known yet for prepare phase"; - "sortition_tip" => %sortition_tip); + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); return Ok(None); } @@ -335,7 +345,8 @@ pub fn load_nakamoto_reward_set( else { // should be unreachable error!("No anchor block known for persisted reward set"; - "sortition_tip" => %sortition_tip); + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); return Ok(None); }; @@ -343,14 +354,15 @@ pub fn load_nakamoto_reward_set( let Some(anchor_block_snapshot) = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, - sortition_tip, + &prepare_end_sortition_id, anchor_block_hash, )? else { // should be unreachable error!("No ancestor block snapshot for anchor block"; "anchor_block_hash" => %anchor_block_hash, - "sortition_tip" => %sortition_tip); + "sortition_tip" => %sortition_tip, + "prepare_end_sortition_id" => %prepare_end_sortition_id); return Ok(None); }; @@ -378,11 +390,8 @@ pub fn load_nakamoto_reward_set( } // find the reward cycle's prepare-phase sortitions (in the preceding reward cycle) - let sort_end = sort_db.get_prepare_phase_end_sortition_id_for_reward_cycle( - sortition_tip, - reward_cycle.saturating_sub(1), - )?; - let prepare_phase_sortitions = find_prepare_phase_sortitions(sort_db, burnchain, &sort_end)?; + let prepare_phase_sortitions = + find_prepare_phase_sortitions(sort_db, burnchain, &prepare_end_sortition_id)?; // iterate over the prepare_phase_sortitions, finding the first such sortition // with a processed stacks block @@ -447,13 +456,11 @@ pub fn load_nakamoto_reward_set( "block_hash" => %stacks_block_hash, "consensus_hash" => %anchor_block_sn.consensus_hash, "txid" => %txid, - "reward_start_height" => %reward_start_height, + "prepare_end_height" => %prepare_end_height, "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - // NOTE: the .saturating_sub(2) is needed here because reward_start_height is at reward - // index 1, while we need the highest height in the last cycle. - reward_start_height.saturating_sub(2), + prepare_end_height.saturating_sub(1), chain_state, burnchain, sort_db, @@ -972,42 +979,22 @@ impl< } }; - if self.burnchain.is_in_prepare_phase(header.block_height) { - // try to eagerly load up the reward cycle information, so we can persist it and - // make it available to signers. If we're at the _end_ of the prepare phase, then - // we have no choice but to block. - let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; - if let Some(rc_info) = reward_cycle_info { - // in nakamoto, if we have any reward cycle info at all, it will be known. - if rc_info.known_selected_anchor_block().is_none() { - warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", header.block_height); - return Ok(false); - } - } - } - let reward_cycle_info = if self.burnchain.is_reward_cycle_start(header.block_height) { // we're at the end of the prepare phase, so we'd better have obtained the reward // cycle info of we must block. - // N.B. it's `- 2` because `is_reward_cycle_start` implies that `block_height % reward_cycle_length == 1`, - // but this call needs `block_height % reward_cycle_length == reward_cycle_length - 1` -- i.e. `block_height` - // must be the last block height in the last reward cycle. - let end_cycle_block_height = header.block_height.saturating_sub(2); - let reward_cycle_info = - self.get_nakamoto_reward_cycle_info(end_cycle_block_height)?; + let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; if let Some(rc_info) = reward_cycle_info.as_ref() { // in nakamoto, if we have any reward cycle info at all, it will be known. // otherwise, we may have to process some more Stacks blocks if rc_info.known_selected_anchor_block().is_none() { - warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", end_cycle_block_height); + warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", header.block_height); return Ok(false); } } else { // have to block -- we don't have the reward cycle information debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), - "reward_cycle_end" => end_cycle_block_height - ); + "block_height" => header.block_height); return Ok(false); } reward_cycle_info From 75d200eb0268d71160ff30a91241c6473588ac43 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:01:20 -0400 Subject: [PATCH 0189/1400] chore: log last tenure ID --- stackslib/src/chainstate/nakamoto/tests/node.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 1054f584b6..5b08a398fa 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -386,11 +386,12 @@ impl TestStacksNode { .unwrap(); test_debug!( - "Work in {} {} for Nakamoto parent: {},{}", + "Work in {} {} for Nakamoto parent: {},{}. Last tenure ID is {}", burn_block.block_height, burn_block.parent_snapshot.burn_header_hash, parent_sortition.total_burn, last_parent.header.chain_length + 1, + &parent_tenure_id, ); (parent_tenure_id, parent_sortition) @@ -420,11 +421,12 @@ impl TestStacksNode { let parent_tenure_id = parent_chain_tip.index_block_hash(); test_debug!( - "Work in {} {} for Stacks 2.x parent: {},{}", + "Work in {} {} for Stacks 2.x parent: {},{}. Last tenure ID is {}", burn_block.block_height, burn_block.parent_snapshot.burn_header_hash, parent_stacks_block_snapshot.total_burn, parent_chain_tip.anchored_header.height(), + &parent_tenure_id, ); (parent_tenure_id, parent_stacks_block_snapshot) From 04501144655ff0fc9691613899653f2eeef172de Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 30 May 2024 22:02:10 -0400 Subject: [PATCH 0190/1400] fix: a reward set can be identified by either the block hash (epoch 2) or the block ID (nakamoto) --- stackslib/src/net/p2p.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index a34c212e69..1d195323f5 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5474,7 +5474,7 @@ impl PeerNetwork { let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { - let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc) + 1; + let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc); let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { @@ -5486,7 +5486,12 @@ impl PeerNetwork { if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { if let Some(anchor_hash) = anchor_hash_opt.as_ref() { - if cached_rc_info.anchor_block_hash == *anchor_hash { + // careful -- the sortition DB stores a StacksBlockId's value (the tenure-start + // StacksBlockId) as a BlockHeaderHash, since that's what it was designed to + // deal with in the pre-Nakamoto days + if cached_rc_info.anchor_block_id() == StacksBlockId(anchor_hash.0.clone()) + || cached_rc_info.anchor_block_hash == *anchor_hash + { // cached reward set data is still valid continue; } @@ -5494,7 +5499,7 @@ impl PeerNetwork { } let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( - rc, + rc_start_height, &tip_sn.sortition_id, &self.burnchain, chainstate, @@ -5519,6 +5524,11 @@ impl PeerNetwork { anchor_block_hash: anchor_block_header.anchored_header.block_hash(), }; + test_debug!( + "Store cached reward set for reward cycle {} anchor block {}", + rc, + &rc_info.anchor_block_hash + ); self.current_reward_sets.insert(rc, rc_info); } self.free_old_reward_cycles(cur_rc); From be39b1a97c4cec510eb04c902f76209ef332f234 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 31 May 2024 15:58:27 +0300 Subject: [PATCH 0191/1400] Add unhappy path cases for `DelegateStackExtendCommand_Err` This commit: - adds 4 unhappy path cases for the `delegate-stack-extend` PoX-4 method. - adds the command run tracking inside the `check` method. - adds the expected `delegate-stack-extend` PoX-4 error to the `POX_4_ERRORS` dictionary. --- .../tests/pox-4/err_Commands.ts | 302 ++++++++++++++++++ .../pox_DelegateStackExtendCommand_Err.ts | 96 ++++++ 2 files changed, 398 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 83a9566605..1ad4815cf7 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -28,6 +28,7 @@ import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; import { StackExtendAuthCommand_Err } from "./pox_StackExtendAuthCommand_Err"; +import { DelegateStackExtendCommand_Err } from "./pox_DelegateStackExtendCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, @@ -41,6 +42,7 @@ const POX_4_ERRORS = { ERR_DELEGATION_TOO_MUCH_LOCKED: 22, ERR_STACK_EXTEND_NOT_LOCKED: 26, ERR_STACKING_IS_DELEGATED: 30, + ERR_STACKING_NOT_DELEGATED: 31, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -1676,6 +1678,306 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, ), ), + // DelegateStackExtendCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.constant(100000000000000), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }).map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function ( + this: DelegateStackExtendCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + + if ( + stackerWallet.amountLocked > 0 && + stackerWallet.hasDelegated === true && + stackerWallet.isStacking === true && + stackerWallet.delegatedTo === this.operator.stxAddress && + !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + stackerWallet.delegatedMaxAmount >= stackedAmount && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + operatorWallet.lockedAddresses.includes( + this.stacker.stxAddress, + ) && + !(totalPeriod <= 12) + ) { + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // DelegateStackExtendCommand_Err_Stacking_Not_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc + .record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }) + .map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function ( + this: DelegateStackExtendCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + + if ( + stackerWallet.amountLocked > 0 && + !(stackerWallet.hasDelegated === true) && + stackerWallet.isStacking === true && + stackerWallet.isStackingSolo === true && + !(stackerWallet.delegatedTo === this.operator.stxAddress) && + !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + !(stackerWallet.delegatedMaxAmount >= stackedAmount) && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + !operatorWallet.lockedAddresses.includes( + this.stacker.stxAddress, + ) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Not_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NOT_DELEGATED, + ), + ), + // DelegateStackExtendCommand_Err_Stack_Extend_Not_Locked + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }).map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function ( + this: DelegateStackExtendCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + if ( + !(stackerWallet.amountLocked > 0) && + stackerWallet.hasDelegated === true && + !(stackerWallet.isStacking === true) && + !(stackerWallet.delegatedTo === this.operator.stxAddress) && + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight && + stackerWallet.delegatedMaxAmount >= stackedAmount && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + !operatorWallet.lockedAddresses.includes( + this.stacker.stxAddress, + ) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), + // DelegateStackExtendCommand_Err_Stacking_Permission_Denied + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + stacker: fc.constantFrom(...wallets.values()), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function ( + this: DelegateStackExtendCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + + if ( + stackerWallet.amountLocked > 0 && + !(stackerWallet.hasDelegated === true) && + stackerWallet.isStacking === true && + !(stackerWallet.delegatedTo === this.operator.stxAddress) && + !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + !(stackerWallet.delegatedMaxAmount >= stackedAmount) && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + operatorWallet.lockedAddresses.includes( + this.stacker.stxAddress, + ) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Permission_Denied", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts new file mode 100644 index 0000000000..830fb7d182 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts @@ -0,0 +1,96 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStackExtendCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStackExtendCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly extendCount: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStackExtendCommand_Err` to extend the unlock + * height as a Pool Operator on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the STacker's wallet. + * @param extendCount - Represents the cycles to be expended. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + stacker: Wallet, + extendCount: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.extendCount = extendCount; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + // Act + const delegateStackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-extend", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (extend-count uint) + Cl.uint(this.extendCount), + ], + this.operator.stxAddress, + ); + + expect(delegateStackExtend.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label} Ӿ ${this.stacker.label}`, + "delegate-stack-extend", + "extend count", + this.extendCount.toString(), + "new unlock height", + stackerWallet.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} Ӿ ${this.stacker.label} delegate-stack-extend extend count ${this.extendCount}`; + } +} From 64e2ece5af054d8539a1031679f115afea42ce1e Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Wed, 22 May 2024 00:08:29 +0200 Subject: [PATCH 0192/1400] feat(pox-4-tests): add check function delegate to PoxCommand-derived types - Added import for StackStxSigCommand_Err and StackStxAuthCommand_Err - Added StackStxAuthCommand_Err with a custom check function delegate to PoxCommands - Added StackStxSigCommand_Err with a custom check function delegate to PoxCommands This allows the check function to be parameterized, reducing the need for copy-pasting classes. Note: This is a very work in progress. --- .../tests/pox-4/pox_Commands.ts | 62 +++++++ .../pox-4/pox_StackStxAuthCommand_Err.ts | 172 ++++++++++++++++++ .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 170 +++++++++++++++++ 3 files changed, 404 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts index bafbe38a43..628e1d2d7a 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -3,7 +3,9 @@ import { Real, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; import { GetStackingMinimumCommand } from "./pox_GetStackingMinimumCommand"; import { GetStxAccountCommand } from "./pox_GetStxAccountCommand"; import { StackStxSigCommand } from "./pox_StackStxSigCommand"; +import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand } from "./pox_StackStxAuthCommand"; +import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { DelegateStxCommand } from "./pox_DelegateStxCommand"; import { DelegateStackStxCommand } from "./pox_DelegateStackStxCommand"; import { Simnet } from "@hirosystems/clarinet-sdk"; @@ -83,6 +85,36 @@ export function PoxCommands( r.margin, ) ), + // StackStxAuthCommand_Err + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this: StackStxAuthCommand_Err, model: Readonly): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + console.log("I in StackStxAuthCommand_Err stacker", stacker); + return ( + model.stackingMinimum > 0 && !stacker.isStacking && + !stacker.hasDelegated + ); + }, + 123, + ) + ), // StackExtendAuthCommand fc .record({ @@ -105,6 +137,36 @@ export function PoxCommands( r.currentCycle, ), ), + // StackStxSigCommand_Err + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this: StackStxSigCommand_Err, model: Readonly): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + console.log("I in StackStxSigCommand_Err stacker", stacker); + return ( + model.stackingMinimum > 0 && !stacker.isStacking && + !stacker.hasDelegated + ); + }, + 123, + ) + ), // StackExtendSigCommand fc .record({ diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts new file mode 100644 index 0000000000..ad310fef9a --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -0,0 +1,172 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackStxAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackStxAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackStxAuthCommand_Err` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + * @param checkFunc - A function to check constraints for running this command. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const { result: setAuthorization } = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(setAuthorization).toBeOk(Cl.bool(true)); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Act + const stackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(stackStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-stx-auth", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx auth auth-id ${this.authId} and period ${this.period}`; + } +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts new file mode 100644 index 0000000000..4c5f7ce149 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -0,0 +1,170 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { assert, expect } from "vitest"; +import { + Cl, + ClarityType, + ClarityValue, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackStxSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackStxSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly authId: number; + readonly period: number; + readonly margin: number; + + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackStxSigCommand` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param authId - Unique auth-id for the authorization. + * @param period - Number of reward cycles to lock uSTX. + * @param margin - Multiplier for minimum required uSTX to stack so that each + * Stacker locks a different amount of uSTX across test runs. + * @param checkFunc - A function to check constraints for running this command. + */ + constructor( + wallet: Wallet, + authId: number, + period: number, + margin: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.authId = authId; + this.period = period; + this.margin = margin; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + model.trackCommandRun(this.constructor.name); + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + const currentRewCycle = currentCycle(real.network); + + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. For our tests, we will use the minimum amount of uSTX to be stacked + // in the given reward cycle multiplied by the margin, which is a randomly + // generated number passed to the constructor of this class. + const maxAmount = model.stackingMinimum * this.margin; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackStx, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // The amount of uSTX to be locked in the reward cycle. For this test, we + // will use the maximum amount of uSTX that can be used (per tx) with this + // signer key. + const amountUstx = maxAmount; + + // Act + const stackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + const { result: rewardCycle } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycle, ClarityType.UInt)); + + const { result: unlockBurnHeight } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-to-burn-height", + [Cl.uint(Number(rewardCycle.value) + this.period + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + + // Assert + expect(stackStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-stx-sig", + "lock-amount", + amountUstx.toString(), + "period", + this.period.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-stx sig auth-id ${this.authId} and period ${this.period}`; + } +} From 7f4b53673c22fb7d9f908dfa1ec3b39a2fd33c7c Mon Sep 17 00:00:00 2001 From: Nikos Baxevanis Date: Wed, 22 May 2024 08:27:17 +0200 Subject: [PATCH 0193/1400] feat(pox-4-tests): add check function delegate to PoxCommand-derived types - Separate success paths from failure paths to keep pox_Commands.ts focused on success cases only. This prevents the file from growing with out-of-scope cases. Note: This is a work in progress. --- .../tests/pox-4/err_Commands.ts | 74 +++++++++++++++++++ .../tests/pox-4/pox-4.stateful-prop.test.ts | 8 +- .../tests/pox-4/pox_Commands.ts | 70 +----------------- .../pox-4/pox_StackStxAuthCommand_Err.ts | 2 +- .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 2 +- 5 files changed, 86 insertions(+), 70 deletions(-) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts new file mode 100644 index 0000000000..2e4259f740 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -0,0 +1,74 @@ +import fc from "fast-check"; +import { PoxCommand, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; +import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; +import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; +import { Simnet } from "@hirosystems/clarinet-sdk"; + +export function ErrCommands( + wallets: Map, + stackers: Map, + network: Simnet, +): fc.Arbitrary[] { + const cmds = [ + // StackStxAuthCommand_Err + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this: StackStxAuthCommand_Err, model: Readonly): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( + model.stackingMinimum > 0 && !stacker.isStacking && + !stacker.hasDelegated + ); + }, + 123, + ) + ), + // StackStxSigCommand_Err + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function (this: StackStxSigCommand_Err, model: Readonly): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( + model.stackingMinimum > 0 && !stacker.isStacking && + !stacker.hasDelegated + ); + }, + 123, + ) + ), + ]; + + return cmds; +} diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index bf8b63ffe7..a2be09f593 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -16,6 +16,7 @@ import { StackingClient } from "@stacks/stacking"; import fc from "fast-check"; import { PoxCommands } from "./pox_Commands.ts"; +import { ErrCommands } from "./err_Commands.ts"; import fs from "fs"; import path from "path"; @@ -143,9 +144,14 @@ it("statefully interacts with PoX-4", async () => { simnet.setEpoch("3.0"); + const successPath = PoxCommands(model.wallets, model.stackers, sut.network); + const failurePath = ErrCommands(model.wallets, model.stackers, sut.network); + fc.assert( fc.property( - PoxCommands(model.wallets, model.stackers, sut.network), + // More on size: https://github.com/dubzzz/fast-check/discussions/2978 + // More on cmds: https://github.com/dubzzz/fast-check/discussions/3026 + fc.commands(successPath.concat(failurePath), { size: "xsmall" }), (cmds) => { const initialState = () => ({ model: model, real: sut }); fc.modelRun(initialState, cmds); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts index 628e1d2d7a..0a1fd6f3ea 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -1,11 +1,9 @@ import fc from "fast-check"; -import { Real, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; +import { PoxCommand, Stacker, StxAddress, Wallet } from "./pox_CommandModel"; import { GetStackingMinimumCommand } from "./pox_GetStackingMinimumCommand"; import { GetStxAccountCommand } from "./pox_GetStxAccountCommand"; import { StackStxSigCommand } from "./pox_StackStxSigCommand"; -import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand } from "./pox_StackStxAuthCommand"; -import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { DelegateStxCommand } from "./pox_DelegateStxCommand"; import { DelegateStackStxCommand } from "./pox_DelegateStackStxCommand"; import { Simnet } from "@hirosystems/clarinet-sdk"; @@ -29,7 +27,7 @@ export function PoxCommands( wallets: Map, stackers: Map, network: Simnet, -): fc.Arbitrary>> { +): fc.Arbitrary[] { const cmds = [ // GetStackingMinimumCommand fc.record({ @@ -85,36 +83,6 @@ export function PoxCommands( r.margin, ) ), - // StackStxAuthCommand_Err - fc.record({ - wallet: fc.constantFrom(...wallets.values()), - authId: fc.nat(), - period: fc.integer({ min: 1, max: 12 }), - margin: fc.integer({ min: 1, max: 9 }), - }).map(( - r: { - wallet: Wallet; - authId: number; - period: number; - margin: number; - }, - ) => - new StackStxAuthCommand_Err( - r.wallet, - r.authId, - r.period, - r.margin, - function (this: StackStxAuthCommand_Err, model: Readonly): boolean { - const stacker = model.stackers.get(this.wallet.stxAddress)!; - console.log("I in StackStxAuthCommand_Err stacker", stacker); - return ( - model.stackingMinimum > 0 && !stacker.isStacking && - !stacker.hasDelegated - ); - }, - 123, - ) - ), // StackExtendAuthCommand fc .record({ @@ -137,36 +105,6 @@ export function PoxCommands( r.currentCycle, ), ), - // StackStxSigCommand_Err - fc.record({ - wallet: fc.constantFrom(...wallets.values()), - authId: fc.nat(), - period: fc.integer({ min: 1, max: 12 }), - margin: fc.integer({ min: 1, max: 9 }), - }).map(( - r: { - wallet: Wallet; - authId: number; - period: number; - margin: number; - }, - ) => - new StackStxSigCommand_Err( - r.wallet, - r.authId, - r.period, - r.margin, - function (this: StackStxSigCommand_Err, model: Readonly): boolean { - const stacker = model.stackers.get(this.wallet.stxAddress)!; - console.log("I in StackStxSigCommand_Err stacker", stacker); - return ( - model.stackingMinimum > 0 && !stacker.isStacking && - !stacker.hasDelegated - ); - }, - 123, - ) - ), // StackExtendSigCommand fc .record({ @@ -514,9 +452,7 @@ export function PoxCommands( ), ]; - // More on size: https://github.com/dubzzz/fast-check/discussions/2978 - // More on cmds: https://github.com/dubzzz/fast-check/discussions/3026 - return fc.commands(cmds, { size: "xsmall" }); + return cmds; } export const REWARD_CYCLE_LENGTH = 1050; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts index ad310fef9a..e1d0a2e113 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -26,7 +26,6 @@ export class StackStxAuthCommand_Err implements PoxCommand { readonly authId: number; readonly period: number; readonly margin: number; - readonly checkFunc: CheckFunc; readonly errorCode: number; @@ -39,6 +38,7 @@ export class StackStxAuthCommand_Err implements PoxCommand { * @param margin - Multiplier for minimum required uSTX to stack so that each * Stacker locks a different amount of uSTX across test runs. * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. */ constructor( wallet: Wallet, diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts index 4c5f7ce149..db6af5c5ba 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -26,7 +26,6 @@ export class StackStxSigCommand_Err implements PoxCommand { readonly authId: number; readonly period: number; readonly margin: number; - readonly checkFunc: CheckFunc; readonly errorCode: number; @@ -39,6 +38,7 @@ export class StackStxSigCommand_Err implements PoxCommand { * @param margin - Multiplier for minimum required uSTX to stack so that each * Stacker locks a different amount of uSTX across test runs. * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. */ constructor( wallet: Wallet, From 124abd4e2d6474c06e0561a556aba53aed85e65f Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 22 May 2024 18:32:24 +0300 Subject: [PATCH 0194/1400] Remove command tracking from the command's `run` method The command run tracking will be added to the command's `check` method. --- .../tests/pox-4/pox_StackStxAuthCommand_Err.ts | 1 - .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 1 - 2 files changed, 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts index e1d0a2e113..35212e0320 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -59,7 +59,6 @@ export class StackStxAuthCommand_Err implements PoxCommand { check = (model: Readonly): boolean => this.checkFunc.call(this, model); run(model: Stub, real: Real): void { - model.trackCommandRun(this.constructor.name); const currentRewCycle = currentCycle(real.network); // The maximum amount of uSTX that can be used (per tx) with this signer diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts index db6af5c5ba..58092109f0 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -59,7 +59,6 @@ export class StackStxSigCommand_Err implements PoxCommand { check = (model: Readonly): boolean => this.checkFunc.call(this, model); run(model: Stub, real: Real): void { - model.trackCommandRun(this.constructor.name); const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); const burnBlockHeight = Number( cvToValue(burnBlockHeightCV as ClarityValue), From dfb72304508f5510e9203fda275a1e3b5d7ce88e Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 22 May 2024 18:36:52 +0300 Subject: [PATCH 0195/1400] Pass the incremented burn height when calling `stack-stx` If not passed incremented, the call will result in an `ERR_INVALID_START_BURN_HEIGHT` when being sent at the limit between 2 cycles. --- .../tests/pox-4/pox_StackStxAuthCommand_Err.ts | 2 +- .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts index 35212e0320..6889e89917 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -112,7 +112,7 @@ export class StackStxAuthCommand_Err implements PoxCommand { // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight), + Cl.uint(burnBlockHeight + 1), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts index 58092109f0..f9c2cdc8d4 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -110,7 +110,7 @@ export class StackStxSigCommand_Err implements PoxCommand { // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight), + Cl.uint(burnBlockHeight + 1), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) From 1de982d87ab1b82f6280b926df3bbaec57c38de8 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 22 May 2024 19:01:19 +0300 Subject: [PATCH 0196/1400] Add the unhappy path cases for `StackStxXCommand_Err` This commit: - adds 6 unhappy path cases for the `stack-stx` PoX-4 method, 3 for each signing method (authorization or signature) - adds a dictionary that contains the PoX-4 error names and the error codes - adds the command run tracking inside the `check` method, resulting in displaying all the paths hit and the number of times. --- .../tests/pox-4/err_Commands.ts | 207 ++++++++++++++++-- 1 file changed, 194 insertions(+), 13 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 2e4259f740..08a911e68a 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -1,16 +1,27 @@ import fc from "fast-check"; -import { PoxCommand, Stacker, Stub, StxAddress, Wallet } from "./pox_CommandModel"; +import { + PoxCommand, + Stacker, + Stub, + StxAddress, + Wallet, +} from "./pox_CommandModel"; import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { Simnet } from "@hirosystems/clarinet-sdk"; +const POX_4_ERRORS = { + ERR_STACKING_ALREADY_STACKED: 3, + ERR_STACKING_ALREADY_DELEGATED: 20, +}; + export function ErrCommands( wallets: Map, stackers: Map, network: Simnet, ): fc.Arbitrary[] { const cmds = [ - // StackStxAuthCommand_Err + // StackStxAuthCommand_Err_Stacking_Already_Stacked_1 fc.record({ wallet: fc.constantFrom(...wallets.values()), authId: fc.nat(), @@ -29,17 +40,102 @@ export function ErrCommands( r.authId, r.period, r.margin, - function (this: StackStxAuthCommand_Err, model: Readonly): boolean { + function ( + this: StackStxAuthCommand_Err, + model: Readonly, + ): boolean { const stacker = model.stackers.get(this.wallet.stxAddress)!; - return ( - model.stackingMinimum > 0 && !stacker.isStacking && + if ( + model.stackingMinimum > 0 && + stacker.isStacking && !stacker.hasDelegated - ); + ) { + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Stacked_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxAuthCommand_Err_Stacking_Already_Stacked_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function ( + this: StackStxAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.hasDelegated + ) { + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Stacked_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxAuthCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxAuthCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function ( + this: StackStxAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stacker.isStacking && + stacker.hasDelegated + ) { + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; }, - 123, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) ), - // StackStxSigCommand_Err + // StackStxSigCommand_Err_Stacking_Already_Stacked_1 fc.record({ wallet: fc.constantFrom(...wallets.values()), authId: fc.nat(), @@ -58,14 +154,99 @@ export function ErrCommands( r.authId, r.period, r.margin, - function (this: StackStxSigCommand_Err, model: Readonly): boolean { + function ( + this: StackStxSigCommand_Err, + model: Readonly, + ): boolean { const stacker = model.stackers.get(this.wallet.stxAddress)!; - return ( - model.stackingMinimum > 0 && !stacker.isStacking && + if ( + model.stackingMinimum > 0 && + stacker.isStacking && !stacker.hasDelegated - ); + ) { + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Stacked_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxSigCommand_Err_Stacking_Already_Stacked_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function ( + this: StackStxSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.hasDelegated + ) { + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Stacked_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, + ) + ), + // StackStxSigCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + period: fc.integer({ min: 1, max: 12 }), + margin: fc.integer({ min: 1, max: 9 }), + }).map(( + r: { + wallet: Wallet; + authId: number; + period: number; + margin: number; + }, + ) => + new StackStxSigCommand_Err( + r.wallet, + r.authId, + r.period, + r.margin, + function ( + this: StackStxSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stacker.isStacking && + stacker.hasDelegated + ) { + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; }, - 123, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) ), ]; From be6966c3603d573a4148363806f441f0e3220167 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 22 May 2024 19:02:27 +0300 Subject: [PATCH 0197/1400] Remove `StackStxXCommand.ts` from statistics They needed to be excluded as we have removed the command run tracking from the run method. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index a2be09f593..ceadd9ec4e 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -109,7 +109,9 @@ it("statefully interacts with PoX-4", async () => { // commands are run at least once. const statistics = fs.readdirSync(path.join(__dirname)).filter((file) => file.startsWith("pox_") && file.endsWith(".ts") && - file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" + file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && + file !== "pox_StackStxAuthCommand_Err.ts" && + file !== "pox_StackStxSigCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. @@ -159,7 +161,7 @@ it("statefully interacts with PoX-4", async () => { ), { // Defines the number of test iterations to run; default is 100. - numRuns: 1000, + numRuns: 20000, // Adjusts the level of detail in test reports. Default is 0 (minimal). // At level 2, reports include extensive details, helpful for deep // debugging. This includes not just the failing case and its seed, but From 544382cc0d351aa594c8155a25897cff86a2a6c1 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 14:08:26 +0300 Subject: [PATCH 0198/1400] =?UTF-8?q?Add=20unhappy=20path=20for=C2=A0`revo?= =?UTF-8?q?ke-delegate-stx`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The added unhappy path tries to call revoke-delegate-stx with an address that is not delegating. --- .../tests/pox-4/err_Commands.ts | 30 +++++++++ .../pox-4/pox_RevokeDelegateStxCommand_Err.ts | 66 +++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 08a911e68a..a7e05dc5c7 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -9,10 +9,12 @@ import { import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { Simnet } from "@hirosystems/clarinet-sdk"; +import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, ERR_STACKING_ALREADY_DELEGATED: 20, + ERR_DELEGATION_ALREADY_REVOKED: 34, }; export function ErrCommands( @@ -249,6 +251,34 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) ), + // RevokeDelegateStxCommand_Err_Delegation_Already_Revoked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + }).map(( + r: { + wallet: Wallet; + }, + ) => + new RevokeDelegateStxCommand_Err( + r.wallet, + function ( + this: RevokeDelegateStxCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stacker.hasDelegated + ) { + model.trackCommandRun( + "RevokeDelegateStxCommand_Err_Delegation_Already_Revoked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_DELEGATION_ALREADY_REVOKED, + ) + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts new file mode 100644 index 0000000000..60b3439e8e --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts @@ -0,0 +1,66 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: RevokeDelegateStxCommand_Err, + model: Readonly, +) => boolean; + +export class RevokeDelegateStxCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `RevokeDelegateStxCommand_Err` to revoke delegate uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor(wallet: Wallet, checkFunc: CheckFunc, errorCode: number) { + this.wallet = wallet; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + // Act + const revokeDelegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "revoke-delegate-stx", + [], + this.wallet.stxAddress, + ); + + // Assert + expect(revokeDelegateStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "revoke-delegate-stx", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.stxAddress} revoke-delegate-stx`; + } +} From 3227bc41c31107a34f06570721ec059e7198688d Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 14:16:45 +0300 Subject: [PATCH 0199/1400] Remove `RevokeDelegateStxCommand_Err` from statistics The command run tracking was moved inside the command's check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index ceadd9ec4e..2f811b0db1 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -111,7 +111,8 @@ it("statefully interacts with PoX-4", async () => { file.startsWith("pox_") && file.endsWith(".ts") && file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && file !== "pox_StackStxAuthCommand_Err.ts" && - file !== "pox_StackStxSigCommand_Err.ts" + file !== "pox_StackStxSigCommand_Err.ts" && + file !== "pox_RevokeDelegateStxCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From e7610e3fa1b14bd52760f17a6f219e32517963a4 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 14:21:58 +0300 Subject: [PATCH 0200/1400] =?UTF-8?q?Add=20unhappy=20path=20for=C2=A0`dele?= =?UTF-8?q?gate-stx`?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The added unhappy path tries to call delegate-stx with an address that is already delegating. --- .../tests/pox-4/err_Commands.ts | 39 +++++++ .../tests/pox-4/pox_DelegateStxCommand_Err.ts | 104 ++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index a7e05dc5c7..6a6c3b7028 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -10,6 +10,7 @@ import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { Simnet } from "@hirosystems/clarinet-sdk"; import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; +import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -279,6 +280,44 @@ export function ErrCommands( POX_4_ERRORS.ERR_DELEGATION_ALREADY_REVOKED, ) ), + // DelegateStxCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + delegateTo: fc.constantFrom(...wallets.values()), + untilBurnHt: fc.integer({ min: 1 }), + amount: fc.bigInt({ min: 0n, max: 100_000_000_000_000n }), + }) + .map(( + r: { + wallet: Wallet; + delegateTo: Wallet; + untilBurnHt: number; + amount: bigint; + }, + ) => + new DelegateStxCommand_Err( + r.wallet, + r.delegateTo, + r.untilBurnHt, + r.amount, + function ( + this: DelegateStxCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.hasDelegated + ) { + model.trackCommandRun( + "DelegateStxCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ) + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts new file mode 100644 index 0000000000..138d99265f --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand_Err.ts @@ -0,0 +1,104 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStxCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStxCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly delegateTo: Wallet; + readonly untilBurnHt: number; + readonly amount: bigint; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStxCommand_Err` to delegate uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param delegateTo - Represents the Delegatee's STX address. + * @param untilBurnHt - The burn block height until the delegation is valid. + * @param amount - The maximum amount the `Stacker` delegates the `Delegatee` + * to stack on his behalf. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + delegateTo: Wallet, + untilBurnHt: number, + amount: bigint, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.delegateTo = delegateTo; + this.untilBurnHt = untilBurnHt; + this.amount = amount; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + // The amount of uSTX delegated by the Stacker to the Delegatee. + // Even if there are no constraints about the delegated amount, + // it will be checked in the future, when calling delegate-stack-stx. + const amountUstx = Number(this.amount); + + // Act + const delegateStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stx", + [ + // (amount-ustx uint) + Cl.uint(amountUstx), + // (delegate-to principal) + Cl.principal(this.delegateTo.stxAddress), + // (until-burn-ht (optional uint)) + Cl.some(Cl.uint(this.untilBurnHt)), + // (pox-addr (optional { version: (buff 1), hashbytes: (buff 32) })) + Cl.some(poxAddressToTuple(this.delegateTo.btcAddress)), + ], + this.wallet.stxAddress, + ); + + // Assert + expect(delegateStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "delegate-stx", + "amount", + amountUstx.toString(), + "delegated to", + this.delegateTo.label, + "until", + this.untilBurnHt.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} delegate-stx to ${this.delegateTo.label} until burn ht ${this.untilBurnHt}`; + } +} From 232d38830439598596e909839bd668e9d5fb9f1c Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 14:24:20 +0300 Subject: [PATCH 0201/1400] Remove `DelegateStxCommand_Err` from statistics The command run tracking was moved inside the command's check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 2f811b0db1..25adfde9d9 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -112,7 +112,8 @@ it("statefully interacts with PoX-4", async () => { file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && file !== "pox_StackStxAuthCommand_Err.ts" && file !== "pox_StackStxSigCommand_Err.ts" && - file !== "pox_RevokeDelegateStxCommand_Err.ts" + file !== "pox_RevokeDelegateStxCommand_Err.ts" && + file !== "pox_DelegateStxCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From bdcee6bd8eadcb6a824b71bb2aa0d9aa86b2b995 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 24 May 2024 16:26:27 +0300 Subject: [PATCH 0202/1400] Use simnet `mineBlock` inside `StackStxAuthCommand_Err` This commit: - includes the authorization and the function call in the same block. It is needed because otherwise, it can result in issuing the authorization for the wrong reward cycle. - updates the passed start-burn-ht param, different from the StackStxSigCommand. If not doing it like this, the test fails when the command is called at the limit between 2 reward cycles. - removes unnecessary operations: retrieving the reward cycle, retrieving the unlockBurnHeight. --- .../pox-4/pox_StackStxAuthCommand_Err.ts | 102 +++++++----------- 1 file changed, 39 insertions(+), 63 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts index 6889e89917..37f32a5458 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand_Err.ts @@ -6,15 +6,10 @@ import { Wallet, } from "./pox_CommandModel.ts"; import { poxAddressToTuple } from "@stacks/stacking"; -import { assert, expect } from "vitest"; -import { - Cl, - ClarityType, - ClarityValue, - cvToValue, - isClarityType, -} from "@stacks/transactions"; +import { expect } from "vitest"; +import { Cl, ClarityValue, cvToValue } from "@stacks/transactions"; import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; type CheckFunc = ( this: StackStxAuthCommand_Err, @@ -66,53 +61,50 @@ export class StackStxAuthCommand_Err implements PoxCommand { // in the given reward cycle multiplied by the margin, which is a randomly // generated number passed to the constructor of this class. const maxAmount = model.stackingMinimum * this.margin; + const amountUstx = maxAmount; - const { result: setAuthorization } = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "set-signer-key-authorization", - [ - // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) - poxAddressToTuple(this.wallet.btcAddress), - // (period uint) - Cl.uint(this.period), - // (reward-cycle uint) - Cl.uint(currentRewCycle), - // (topic (string-ascii 14)) - Cl.stringAscii("stack-stx"), - // (signer-key (buff 33)) - Cl.bufferFromHex(this.wallet.signerPubKey), - // (allowed bool) - Cl.bool(true), - // (max-amount uint) - Cl.uint(maxAmount), - // (auth-id uint) - Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - - expect(setAuthorization).toBeOk(Cl.bool(true)); const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); const burnBlockHeight = Number( cvToValue(burnBlockHeightCV as ClarityValue), ); - // The amount of uSTX to be locked in the reward cycle. For this test, we - // will use the maximum amount of uSTX that can be used (per tx) with this - // signer key. - const amountUstx = maxAmount; - // Act - const stackStx = real.network.callPublicFn( - "ST000000000000000000002AMW42H.pox-4", - "stack-stx", - [ + + // Include the authorization and the `stack-stx` transactions in a single + // block. This way we ensure both the authorization and the stack-stx + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-stx call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-stx"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn("ST000000000000000000002AMW42H.pox-4", "stack-stx", [ // (amount-ustx uint) Cl.uint(amountUstx), // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) poxAddressToTuple(this.wallet.btcAddress), // (start-burn-ht uint) - Cl.uint(burnBlockHeight + 1), + Cl.uint(burnBlockHeight), // (lock-period uint) Cl.uint(this.period), // (signer-sig (optional (buff 65))) @@ -123,28 +115,12 @@ export class StackStxAuthCommand_Err implements PoxCommand { Cl.uint(maxAmount), // (auth-id uint) Cl.uint(this.authId), - ], - this.wallet.stxAddress, - ); - - const { result: rewardCycle } = real.network.callReadOnlyFn( - "ST000000000000000000002AMW42H.pox-4", - "burn-height-to-reward-cycle", - [Cl.uint(burnBlockHeight)], - this.wallet.stxAddress, - ); - assert(isClarityType(rewardCycle, ClarityType.UInt)); - - const { result: unlockBurnHeight } = real.network.callReadOnlyFn( - "ST000000000000000000002AMW42H.pox-4", - "reward-cycle-to-burn-height", - [Cl.uint(Number(rewardCycle.value) + this.period + 1)], - this.wallet.stxAddress, - ); - assert(isClarityType(unlockBurnHeight, ClarityType.UInt)); + ], this.wallet.stxAddress), + ]); // Assert - expect(stackStx.result).toBeErr(Cl.int(this.errorCode)); + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); // Log to console for debugging purposes. This is not necessary for the // test to pass but it is useful for debugging and eyeballing the test. From 190d7a5f88df8d8f2111ef3d584661e82dd756fa Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 12:22:29 +0300 Subject: [PATCH 0203/1400] Add the unhappy path cases for `StackAggCommitSigCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-aggregation-commit` PoX-4 method, called using a signature. - adds the command run tracking inside the `check` method. - adds the expected `stack-aggregation-commit` PoX-4 errors to the POX_4_ERRORS dictionary. --- .../tests/pox-4/err_Commands.ts | 89 +++++++++++++ ...ox_StackAggregationCommitSigCommand_Err.ts | 121 ++++++++++++++++++ 2 files changed, 210 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 6a6c3b7028..8c1bdf774d 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -11,9 +11,12 @@ import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { Simnet } from "@hirosystems/clarinet-sdk"; import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; +import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, + ERR_STACKING_NO_SUCH_PRINCIPAL: 4, + ERR_STACKING_THRESHOLD_NOT_MET: 11, ERR_STACKING_ALREADY_DELEGATED: 20, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -318,6 +321,92 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) ), + // StackAggregationCommitSigCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit > 0 + ) { + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit == 0 + ) { + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !(operator.lockedAddresses.length > 0) && + !(operator.amountToCommit >= model.stackingMinimum) + ) { + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts new file mode 100644 index 0000000000..ca53b56d1c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts @@ -0,0 +1,121 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationCommitSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitSigCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitAuthCommand_Err` to lock uSTX for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommit = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommit.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "signature", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} From b0f8464e153ca8a8d188ff060ad85f12785d22d8 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 12:26:54 +0300 Subject: [PATCH 0204/1400] Remove `StackAggregationCommitSigCommand_Err` from statistics The command run tracking was moved inside the command's check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 25adfde9d9..7a89fafaf6 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -113,7 +113,8 @@ it("statefully interacts with PoX-4", async () => { file !== "pox_StackStxAuthCommand_Err.ts" && file !== "pox_StackStxSigCommand_Err.ts" && file !== "pox_RevokeDelegateStxCommand_Err.ts" && - file !== "pox_DelegateStxCommand_Err.ts" + file !== "pox_DelegateStxCommand_Err.ts" && + file !== "pox_StackAggregationCommitSigCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From 367e89a613f9e556dfed50f4c4a196e09d66fbb5 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 13:29:41 +0300 Subject: [PATCH 0205/1400] Add the unhappy path cases for `StackAggCommitAuthCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-aggregation-commit` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 87 ++++++++++++ ...x_StackAggregationCommitAuthCommand_Err.ts | 127 ++++++++++++++++++ 2 files changed, 214 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 8c1bdf774d..8276d02f12 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -12,6 +12,7 @@ import { Simnet } from "@hirosystems/clarinet-sdk"; import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; +import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -407,6 +408,92 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), ), + // StackAggregationCommitAuthCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit > 0 + ) { + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit === 0 + ) { + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !(operator.lockedAddresses.length > 0) && + !(operator.amountToCommit >= model.stackingMinimum) + ) { + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts new file mode 100644 index 0000000000..3580061fae --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts @@ -0,0 +1,127 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackAggregationCommitAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitAuthCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitAuthCommand_Err` to lock uSTX for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + // Include the authorization and the `stack-aggregation-commit` transactions + // in a single block. This way we ensure both the authorization and the + // stack-aggregation-commit transactions are called during the same reward + // cycle, so the authorization currentRewCycle param is relevant for the + // upcoming stack-aggregation-commit call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit auth-id ${this.authId}`; + } +} From abdf8bd39dee744d1adfbacc3808af3906f5c973 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 13:31:25 +0300 Subject: [PATCH 0206/1400] Remove `StackAggregationCommitAuthCommand_Err` from statistics The command run tracking was moved inside the command's check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index 7a89fafaf6..a77156a87a 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -114,7 +114,8 @@ it("statefully interacts with PoX-4", async () => { file !== "pox_StackStxSigCommand_Err.ts" && file !== "pox_RevokeDelegateStxCommand_Err.ts" && file !== "pox_DelegateStxCommand_Err.ts" && - file !== "pox_StackAggregationCommitSigCommand_Err.ts" + file !== "pox_StackAggregationCommitSigCommand_Err.ts" && + file !== "pox_StackAggregationCommitAuthCommand_Err.ts" ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From 3ab6e61e2c36223b45f4bccb3b3a7bd7aa793202 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 15:23:30 +0300 Subject: [PATCH 0207/1400] Use strict equality inside generator --- .../tests/pox-4/err_Commands.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 8276d02f12..3c3d242d33 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -369,7 +369,7 @@ export function ErrCommands( if ( operator.lockedAddresses.length > 0 && !(operator.amountToCommit >= model.stackingMinimum) && - operator.amountToCommit == 0 + operator.amountToCommit === 0 ) { model.trackCommandRun( "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1", From 91a7c43b2b7b4cbff198757fef9fc45aa2afa82a Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 15:44:36 +0300 Subject: [PATCH 0208/1400] Order statistics alphabetically This commit improves the unhappy paths execution visibility after the test suite run is complete. --- .../tests/pox-4/pox_CommandModel.ts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts index ce1d2a28b4..3d4b7415f9 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -46,7 +46,13 @@ export class Stub { reportCommandRuns() { console.log("Command run method execution counts:"); - this.statistics.forEach((count, commandName) => { + const orderedStatistics = Array.from(this.statistics.entries()).sort( + ([keyA], [keyB]) => { + return keyA.localeCompare(keyB); + }, + ); + + orderedStatistics.forEach(([commandName, count]) => { console.log(`${commandName}: ${count}`); }); } From ab686c4186db8169cabe0c43948646805e52adab Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 27 May 2024 22:53:56 +0300 Subject: [PATCH 0209/1400] Add the unhappy path cases for `StackAggCommitIndexedSigCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-aggregation-commit-indexed` PoX-4 method, called using a signature. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 87 ++++++++++++ ...kAggregationCommitIndexedSigCommand_Err.ts | 124 ++++++++++++++++++ 2 files changed, 211 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 3c3d242d33..bfe600e594 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -13,6 +13,7 @@ import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; +import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -494,6 +495,92 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit > 0 + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + !(operator.amountToCommit > 0) + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedSigCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !(operator.lockedAddresses.length > 0) && + !(operator.amountToCommit >= model.stackingMinimum) + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts new file mode 100644 index 0000000000..22b5a4f923 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts @@ -0,0 +1,124 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationCommitIndexedSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitIndexedSigCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitIndexedSigCommand_Err` to lock uSTX + * for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase or agg-commit. + topic: Pox4SignatureTopic.AggregateCommit, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: committedAmount, + }); + + // Act + const stackAggregationCommitIndexed = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationCommitIndexed.result).toBeErr( + Cl.int(this.errorCode), + ); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} From 50475b75bc25e209447f70caedac5cb5d22d9c92 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 14:59:18 +0300 Subject: [PATCH 0210/1400] Add the unhappy path cases for `StackAggCommitIndexedAuthCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-aggregation-commit-indexed` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 87 ++++++++++++ ...AggregationCommitIndexedAuthCommand_Err.ts | 133 ++++++++++++++++++ 2 files changed, 220 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index bfe600e594..4bdb6f5da3 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -14,6 +14,7 @@ import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; +import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -581,6 +582,92 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + !(operator.amountToCommit > 0) + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + !(operator.lockedAddresses.length > 0) && + !(operator.amountToCommit >= model.stackingMinimum) + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), + // StackAggregationCommitIndexedAuthCommand_Err_Stacking_Threshold_Not_Met + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).map( + (r: { wallet: Wallet; authId: number }) => + new StackAggregationCommitIndexedAuthCommand_Err( + r.wallet, + r.authId, + function ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + + if ( + operator.lockedAddresses.length > 0 && + !(operator.amountToCommit >= model.stackingMinimum) && + operator.amountToCommit > 0 + ) { + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts new file mode 100644 index 0000000000..92ebfa0d19 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts @@ -0,0 +1,133 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; +import { currentCycle } from "./pox_Commands.ts"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackAggregationCommitIndexedAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationCommitIndexedAuthCommand_Err + implements PoxCommand { + readonly operator: Wallet; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationCommitIndexedAuthCommand_Err` to lock uSTX + * for stacking. + * + * @param operator - Represents the `Operator`'s wallet. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + // Act + + // Include the authorization and the `stack-aggregation-commit-indexed` + // transactions in a single block. This way we ensure both the authorization + // and the stack-aggregation-commit-indexed transactions are called during + // the same reward cycle, so the authorization currentRewCycle param is + // relevant for the upcoming stack-aggregation-commit-indexed call. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (period uint) + Cl.uint(1), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (topic (string-ascii 14)) + Cl.stringAscii("agg-commit"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-commit-indexed", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(committedAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr( + Cl.int(this.errorCode), + ); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-commit-indexed", + "amount committed", + committedAmount.toString(), + "authorization", + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-commit-indexed auth-id ${this.authId}`; + } +} From 7173ad680e044a79059da07edb8ef414b7cf2ddc Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 15:09:19 +0300 Subject: [PATCH 0211/1400] Remove all files containing `_Err` from command tracking The command run tracking for the unhappy paths was moved inside the commands' check function. No need to report the run using the file name anymore. --- .../tests/pox-4/pox-4.stateful-prop.test.ts | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts index a77156a87a..31a9239a44 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox-4.stateful-prop.test.ts @@ -110,12 +110,7 @@ it("statefully interacts with PoX-4", async () => { const statistics = fs.readdirSync(path.join(__dirname)).filter((file) => file.startsWith("pox_") && file.endsWith(".ts") && file !== "pox_CommandModel.ts" && file !== "pox_Commands.ts" && - file !== "pox_StackStxAuthCommand_Err.ts" && - file !== "pox_StackStxSigCommand_Err.ts" && - file !== "pox_RevokeDelegateStxCommand_Err.ts" && - file !== "pox_DelegateStxCommand_Err.ts" && - file !== "pox_StackAggregationCommitSigCommand_Err.ts" && - file !== "pox_StackAggregationCommitAuthCommand_Err.ts" + !file.includes("_Err") ).map((file) => file.slice(4, -3)); // Remove "pox_" prefix and ".ts" suffix. // This is the initial state of the model. From c6ff82d17ce4b132c1f96c01fa7e1ea601925a6d Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 16:00:05 +0300 Subject: [PATCH 0212/1400] Add one unhappy path case for `StackAggIncreaseCommand_Err` This commit: - adds one unhappy path case for the `stack-aggregation-increase` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 44 ++++++ ...pox_StackAggregationIncreaseCommand_Err.ts | 143 ++++++++++++++++++ 2 files changed, 187 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 4bdb6f5da3..26da594c60 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -15,6 +15,7 @@ import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationComm import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; +import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, @@ -668,6 +669,49 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, ), ), + // StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.wallet.stxAddress)!; + const committedRewCycleIndexesOrFallback = + operator.committedRewCycleIndexes.length > 0 + ? operator.committedRewCycleIndexes + : [-1]; + return fc + .record({ + rewardCycleIndex: fc.constantFrom( + ...committedRewCycleIndexesOrFallback, + ), + }) + .map((cycleIndex) => ({ ...r, ...cycleIndex })); + }) + .map( + (r: { wallet: Wallet; rewardCycleIndex: number; authId: number }) => + new StackAggregationIncreaseCommand_Err( + r.wallet, + r.rewardCycleIndex, + r.authId, + function ( + this: StackAggregationIncreaseCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + if ( + operator.lockedAddresses.length > 0 && + this.rewardCycleIndex >= 0 && + !(operator.amountToCommit > 0) + ) { + model.trackCommandRun( + "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts new file mode 100644 index 0000000000..26fc49eb60 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand_Err.ts @@ -0,0 +1,143 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { bufferFromHex } from "@stacks/transactions/dist/cl"; +import { currentCycle } from "./pox_Commands.ts"; + +type CheckFunc = ( + this: StackAggregationIncreaseCommand_Err, + model: Readonly, +) => boolean; + +export class StackAggregationIncreaseCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly rewardCycleIndex: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackAggregationIncreaseCommand_Err` to commit partially + * stacked STX to a PoX address which has already received some STX. + * + * @param operator - Represents the `Operator`'s wallet. + * @param rewardCycleIndex - The cycle index to increase the commit for. + * @param authId - Unique `auth-id` for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + rewardCycleIndex: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.rewardCycleIndex = rewardCycleIndex; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const committedAmount = operatorWallet.amountToCommit; + + const existingEntryCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "reward-cycle-pox-address-list", + Cl.tuple({ + index: Cl.uint(this.rewardCycleIndex), + "reward-cycle": Cl.uint(currentRewCycle + 1), + }), + ); + + const totalStackedBefore = + cvToJSON(existingEntryCV).value.value["total-ustx"].value; + const maxAmount = committedAmount + Number(totalStackedBefore); + + const signerSig = this.operator.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.operator.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For stack-stx and stack-extend, this refers to the reward cycle + // where the transaction is confirmed. For stack-aggregation-commit, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle + 1, + // For stack-stx, this refers to lock-period. For stack-extend, + // this refers to extend-count. For stack-aggregation-commit, this is + // u1. + period: 1, + // A string representing the function where this authorization is valid. + // Either stack-stx, stack-extend, stack-increase, agg-commit or agg-increase. + topic: Pox4SignatureTopic.AggregateIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.operator.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + // Act + const stackAggregationIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-aggregation-increase", + [ + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (reward-cycle uint) + Cl.uint(currentRewCycle + 1), + // (reward-cycle-index uint)) + Cl.uint(this.rewardCycleIndex), + // (signer-sig (optional (buff 65))) + Cl.some(bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.operator.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.operator.stxAddress, + ); + + // Assert + expect(stackAggregationIncrease.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label}`, + "stack-agg-increase", + "amount committed", + committedAmount.toString(), + "cycle index", + this.rewardCycleIndex.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} stack-aggregation-increase for index ${this.rewardCycleIndex}`; + } +} From a267a044d6d8c027be624e27fb77a772c03da3c9 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 28 May 2024 20:22:13 +0300 Subject: [PATCH 0213/1400] Format using `deno` according to the other generators --- .../tests/pox-4/err_Commands.ts | 51 +++++++++---------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 26da594c60..4a01ac827a 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -686,32 +686,31 @@ export function ErrCommands( ), }) .map((cycleIndex) => ({ ...r, ...cycleIndex })); - }) - .map( - (r: { wallet: Wallet; rewardCycleIndex: number; authId: number }) => - new StackAggregationIncreaseCommand_Err( - r.wallet, - r.rewardCycleIndex, - r.authId, - function ( - this: StackAggregationIncreaseCommand_Err, - model: Readonly, - ): boolean { - const operator = model.stackers.get(this.operator.stxAddress)!; - if ( - operator.lockedAddresses.length > 0 && - this.rewardCycleIndex >= 0 && - !(operator.amountToCommit > 0) - ) { - model.trackCommandRun( - "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", - ); - return true; - } else return false; - }, - POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, - ), - ), + }).map( + (r: { wallet: Wallet; rewardCycleIndex: number; authId: number }) => + new StackAggregationIncreaseCommand_Err( + r.wallet, + r.rewardCycleIndex, + r.authId, + function ( + this: StackAggregationIncreaseCommand_Err, + model: Readonly, + ): boolean { + const operator = model.stackers.get(this.operator.stxAddress)!; + if ( + operator.lockedAddresses.length > 0 && + this.rewardCycleIndex >= 0 && + !(operator.amountToCommit > 0) + ) { + model.trackCommandRun( + "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, + ), + ), ]; return cmds; From 1bd9c78d95a5485a2d47046b3f8e85192b4ec234 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 29 May 2024 14:18:18 +0300 Subject: [PATCH 0214/1400] Add unhappy path cases for `DelegateStackStxCommand_Err` This commit: - adds 3 unhappy path cases for the `delegate-stack-stx` PoX-4 method. - adds the command run tracking inside the `check` method. - adds the expected `delegate-stack-stx` PoX-4 errors to the `POX_4_ERRORS` dictionary. - exports the `nextCycleFirstBlock` method from pox_commands, as it is used inside err_Commands. --- .../tests/pox-4/err_Commands.ts | 226 ++++++++++++++++++ .../tests/pox-4/pox_Commands.ts | 2 +- .../pox-4/pox_DelegateStackStxCommand_Err.ts | 105 ++++++++ 3 files changed, 332 insertions(+), 1 deletion(-) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 4a01ac827a..74c0170af5 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -16,12 +16,16 @@ import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCom import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; +import { currentCycleFirstBlock, nextCycleFirstBlock } from "./pox_Commands"; +import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_ALREADY_STACKED: 3, ERR_STACKING_NO_SUCH_PRINCIPAL: 4, + ERR_STACKING_PERMISSION_DENIED: 9, ERR_STACKING_THRESHOLD_NOT_MET: 11, ERR_STACKING_ALREADY_DELEGATED: 20, + ERR_DELEGATION_TOO_MUCH_LOCKED: 22, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -711,6 +715,228 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), ), + // DelegateStackStxCommand_Err_Delegation_Too_Much_Locked + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: 100_000_000_000_000n, + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function ( + this: DelegateStackStxCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stackerWallet.isStacking && + stackerWallet.hasDelegated && + !(stackerWallet.delegatedMaxAmount >= Number(this.amountUstx)) && + Number(this.amountUstx) <= stackerWallet.ustxBalance && + Number(this.amountUstx) >= model.stackingMinimum && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + ) { + model.trackCommandRun( + "DelegateStackStxCommand_Err_Delegation_Too_Much_Locked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_DELEGATION_TOO_MUCH_LOCKED, + ); + }), + // DelegateStackStxCommand_Err_Stacking_Permission_Denied + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: BigInt( + stackers.get(resultWithUnlockHeight.stacker.stxAddress)! + .delegatedMaxAmount, + ), + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function ( + this: DelegateStackStxCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stackerWallet.isStacking && + stackerWallet.hasDelegated && + stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) && + Number(this.amountUstx) <= stackerWallet.ustxBalance && + Number(this.amountUstx) >= model.stackingMinimum && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + ) { + model.trackCommandRun( + "DelegateStackStxCommand_Err_Stacking_Permission_Denied_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ); + }), + // DelegateStackStxCommand_Err_Stacking_Permission_Denied_2 + fc.record({ + operator: fc.constantFrom(...wallets.values()), + startBurnHt: fc.integer({ + min: currentCycleFirstBlock(network), + max: nextCycleFirstBlock(network), + }), + period: fc.integer({ min: 1, max: 12 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + // Determine available stackers based on the operator + const availableStackers = operator.poolMembers.length > 0 + ? operator.poolMembers + : [r.operator.stxAddress]; + + return fc.record({ + stacker: fc.constantFrom(...availableStackers), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })).chain((resultWithStacker) => { + return fc.record({ + unlockBurnHt: fc.constant( + currentCycleFirstBlock(network) + + 1050 * (resultWithStacker.period + 1), + ), + }).map((additionalProps) => ({ + ...resultWithStacker, + ...additionalProps, + })); + }).chain((resultWithUnlockHeight) => { + return fc.record({ + amount: fc.bigInt({ + min: 0n, + max: 100_000_000_000_000n, + }), + }).map((amountProps) => ({ + ...resultWithUnlockHeight, + ...amountProps, + })); + }); + }).map((finalResult) => { + return new DelegateStackStxCommand_Err( + finalResult.operator, + finalResult.stacker, + finalResult.period, + finalResult.amount, + finalResult.unlockBurnHt, + function ( + this: DelegateStackStxCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get(this.operator.stxAddress)!; + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + if ( + model.stackingMinimum > 0 && + !stackerWallet.isStacking && + !(stackerWallet.hasDelegated) && + !(stackerWallet.delegatedMaxAmount >= Number(this.amountUstx)) && + Number(this.amountUstx) <= stackerWallet.ustxBalance && + Number(this.amountUstx) >= model.stackingMinimum && + !(operatorWallet.poolMembers.includes(this.stacker.stxAddress)) && + !(this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) + ) { + model.trackCommandRun( + "DelegateStackStxCommand_Err_Stacking_Permission_Denied_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ); + }), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts index 0a1fd6f3ea..a42cb6278e 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_Commands.ts @@ -479,7 +479,7 @@ export const currentCycleFirstBlock = (network: Simnet) => ).result, )); -const nextCycleFirstBlock = (network: Simnet) => +export const nextCycleFirstBlock = (network: Simnet) => Number(cvToValue( network.callReadOnlyFn( "ST000000000000000000002AMW42H.pox-4", diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts new file mode 100644 index 0000000000..b4e5a491dd --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts @@ -0,0 +1,105 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl, ClarityValue, cvToValue } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStackStxCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStackStxCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly period: number; + readonly amountUstx: bigint; + readonly unlockBurnHt: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStackStxCommand` to lock uSTX as a Pool Operator + * on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the STacker's wallet. + * @param period - Number of reward cycles to lock uSTX. + * @param amountUstx - The uSTX amount stacked by the Operator on behalf + * of the Stacker. + * @param unlockBurnHt - The burn height at which the uSTX is unlocked. + */ + constructor( + operator: Wallet, + stacker: Wallet, + period: number, + amountUstx: bigint, + unlockBurnHt: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.period = period; + this.amountUstx = amountUstx; + this.unlockBurnHt = unlockBurnHt; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + // Act + const delegateStackStx = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-stx", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (amount-ustx uint) + Cl.uint(this.amountUstx), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (start-burn-ht uint) + Cl.uint(burnBlockHeight), + // (lock-period uint) + Cl.uint(this.period), + ], + this.operator.stxAddress, + ); + + // Assert + expect(delegateStackStx.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✓ ${this.operator.label} Ӿ ${this.stacker.label}`, + "delegate-stack-stx", + "lock-amount", + this.amountUstx.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-stx stacker ${this.stacker.label} period ${this.period}`; + } +} From b66b19cd3884a258647e56ee147f46e09955f8a5 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Wed, 29 May 2024 21:47:33 +0300 Subject: [PATCH 0215/1400] Add unhappy path cases for `StackIncreaseSigCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-increase` PoX-4 method, called using a signature. - adds the command run tracking inside the `check` method. - adds the expected `stack-increase` PoX-4 errors to the `POX_4_ERRORS` dictionary. --- .../tests/pox-4/err_Commands.ts | 106 +++++++++++++ .../pox-4/pox_StackIncreaseSigCommand_Err.ts | 143 ++++++++++++++++++ 2 files changed, 249 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 74c0170af5..113d52ef46 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -18,14 +18,18 @@ import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggrega import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; import { currentCycleFirstBlock, nextCycleFirstBlock } from "./pox_Commands"; import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; +import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; const POX_4_ERRORS = { + ERR_STACKING_INSUFFICIENT_FUNDS: 1, ERR_STACKING_ALREADY_STACKED: 3, ERR_STACKING_NO_SUCH_PRINCIPAL: 4, ERR_STACKING_PERMISSION_DENIED: 9, ERR_STACKING_THRESHOLD_NOT_MET: 11, + ERR_STACKING_INVALID_AMOUNT: 18, ERR_STACKING_ALREADY_DELEGATED: 20, ERR_DELEGATION_TOO_MUCH_LOCKED: 22, + ERR_STACKING_IS_DELEGATED: 30, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -937,6 +941,108 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, ); }), + // StackIncreaseSigCommand_Err_Stacking_Is_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + this.increaseBy >= 1 + ) { + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Is_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackIncreaseSigCommand_Err_Stacking_Insufficient_Funds + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(100_000_000_000_000), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + !(this.increaseBy <= stacker.amountUnlocked) && + this.increaseBy >= 1 + ) { + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Insufficient_Funds", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, + ), + ), + // StackIncreaseSigCommand_Err_Stacking_Invalid_Amount + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(0), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseSigCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + !(this.increaseBy >= 1) + ) { + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Invalid_Amount", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts new file mode 100644 index 0000000000..4a122784b3 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts @@ -0,0 +1,143 @@ +import { Pox4SignatureTopic } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + Cl, + ClarityType, + ClarityValue, + cvToJSON, + cvToValue, + isClarityType, +} from "@stacks/transactions"; +import { assert, expect } from "vitest"; + +type CheckFunc = ( + this: StackIncreaseSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackIncreaseSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackIncreaseSigCommand_Err` to lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + increaseBy: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + const burnBlockHeightCV = real.network.runSnippet("burn-block-height"); + const burnBlockHeight = Number( + cvToValue(burnBlockHeightCV as ClarityValue), + ); + + const { result: rewardCycleNextBlockCV } = real.network.callReadOnlyFn( + "ST000000000000000000002AMW42H.pox-4", + "burn-height-to-reward-cycle", + [Cl.uint(burnBlockHeight + 1)], + this.wallet.stxAddress, + ); + assert(isClarityType(rewardCycleNextBlockCV, ClarityType.UInt)); + + const rewardCycleNextBlock = cvToValue(rewardCycleNextBlockCV); + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: rewardCycleNextBlock, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: period, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackIncrease, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: maxAmount, + }); + + const stackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(stackIncrease.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-increase-sig", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase sig increase-by ${this.increaseBy}`; + } +} From 50bd1b5cc15b6ff1a8489c258eb79d114238166c Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 30 May 2024 13:21:34 +0300 Subject: [PATCH 0216/1400] Add unhappy path cases for `StackIncreaseAuthCommand_Err` This commit: - adds 3 unhappy path cases for the `stack-increase` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 103 ++++++++++++++ .../pox-4/pox_StackIncreaseAuthCommand_Err.ts | 133 ++++++++++++++++++ 2 files changed, 236 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 113d52ef46..e51283f3d1 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -19,6 +19,7 @@ import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncre import { currentCycleFirstBlock, nextCycleFirstBlock } from "./pox_Commands"; import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; +import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, @@ -1043,6 +1044,108 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, ), ), + // StackIncreaseAuthCommand_Err_Stacking_Is_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + this.increaseBy >= 1 + ) { + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Is_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackIncreaseAuthCommand_Err_Stacking_Insufficient_Funds + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(100_000_000_000_000), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + !(this.increaseBy <= stacker.amountUnlocked) && + this.increaseBy >= 1 + ) { + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Insufficient_Funds", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, + ), + ), + // StackIncreaseAuthCommand_Err_Stacking_Invalid_Amount + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(0), + authId: fc.nat(), + }).map( + (r) => + new StackIncreaseAuthCommand_Err( + r.operator, + r.increaseBy, + r.authId, + function ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + this.increaseBy <= stacker.amountUnlocked && + !(this.increaseBy >= 1) + ) { + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Invalid_Amount", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts new file mode 100644 index 0000000000..a74aa3c211 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts @@ -0,0 +1,133 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl, cvToJSON } from "@stacks/transactions"; +import { expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackIncreaseAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackIncreaseAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly increaseBy: number; + readonly authId: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackIncreaseAuthCommand` to increase lock uSTX for stacking. + * + * @param wallet - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param authId - Unique auth-id for the authorization. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + increaseBy: number, + authId: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.increaseBy = increaseBy; + this.authId = authId; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + // Get the lock period from the stacking state. This will be used for correctly + // issuing the authorization. + const stackingStateCV = real.network.getMapEntry( + "ST000000000000000000002AMW42H.pox-4", + "stacking-state", + Cl.tuple({ stacker: Cl.principal(this.wallet.stxAddress) }), + ); + const period = cvToJSON(stackingStateCV).value.value["lock-period"].value; + + const maxAmount = stacker.amountLocked + this.increaseBy; + + // Act + + // Include the authorization and the `stack-increase` transactions in a single + // block. This way we ensure both the authorization and the stack-increase + // transactions are called during the same reward cycle and avoid the clarity + // error `ERR_INVALID_REWARD_CYCLE`. + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(period), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii(Pox4SignatureTopic.StackIncrease), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-increase", + [ + // (increase-by uint) + Cl.uint(this.increaseBy), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(maxAmount), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + // Assert + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-increase-auth", + "increase-by", + this.increaseBy.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-increase auth increase-by ${this.increaseBy}`; + } +} From 4c2f3b6c384a2e8a28859194537e574686a8d1ca Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 30 May 2024 18:37:44 +0300 Subject: [PATCH 0217/1400] Add unhappy path cases for `StackExtendSigCommand_Err` This commit: - adds 5 unhappy path cases for the `stack-increase` PoX-4 method, called using a signature. - adds the command run tracking inside the `check` method. - adds the expected `stack-extend` PoX-4 errors to the `POX_4_ERRORS` dictionary. --- .../tests/pox-4/err_Commands.ts | 271 +++++++++++++++++- .../pox-4/pox_StackExtendSigCommand_Err.ts | 121 ++++++++ 2 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index e51283f3d1..27264b04fb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -16,13 +16,21 @@ import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCom import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; -import { currentCycleFirstBlock, nextCycleFirstBlock } from "./pox_Commands"; +import { + currentCycle, + currentCycleFirstBlock, + FIRST_BURNCHAIN_BLOCK_HEIGHT, + nextCycleFirstBlock, + REWARD_CYCLE_LENGTH, +} from "./pox_Commands"; import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; +import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, + ERR_STACKING_INVALID_LOCK_PERIOD: 2, ERR_STACKING_ALREADY_STACKED: 3, ERR_STACKING_NO_SUCH_PRINCIPAL: 4, ERR_STACKING_PERMISSION_DENIED: 9, @@ -30,6 +38,7 @@ const POX_4_ERRORS = { ERR_STACKING_INVALID_AMOUNT: 18, ERR_STACKING_ALREADY_DELEGATED: 20, ERR_DELEGATION_TOO_MUCH_LOCKED: 22, + ERR_STACK_EXTEND_NOT_LOCKED: 26, ERR_STACKING_IS_DELEGATED: 30, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -1146,6 +1155,266 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, ), ), + // StackExtendSigCommand_Err_Stacking_Is_Delegated_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Is_Delegated_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Is_Delegated_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + !(stacker.poolMembers.length === 0) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Is_Delegated_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ), + ), + // StackExtendSigCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer(), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + !(totalPeriod <= 12) + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // StackExtendSigCommand_Err_Stack_Extend_Not_Locked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendSigCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendSigCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + !stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + !(stacker.amountLocked > 0) && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendSigCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts new file mode 100644 index 0000000000..185f2796d1 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts @@ -0,0 +1,121 @@ +import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl } from "@stacks/transactions"; +import { expect } from "vitest"; + +type CheckFunc = ( + this: StackExtendSigCommand_Err, + model: Readonly, +) => boolean; + +export class StackExtendSigCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackExtendSigCommand` to lock uSTX for stacking. + * + * This command calls `stack-extend` using a `signature`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const signerSig = this.wallet.stackingClient.signPoxSignature({ + // The signer key being authorized. + signerPrivateKey: this.wallet.signerPrvKey, + // The reward cycle for which the authorization is valid. + // For `stack-stx` and `stack-extend`, this refers to the reward cycle + // where the transaction is confirmed. For `stack-aggregation-commit`, + // this refers to the reward cycle argument in that function. + rewardCycle: currentRewCycle, + // For `stack-stx`, this refers to `lock-period`. For `stack-extend`, + // this refers to `extend-count`. For `stack-aggregation-commit`, this is + // `u1`. + period: this.extendCount, + // A string representing the function where this authorization is valid. + // Either `stack-stx`, `stack-extend`, `stack-increase` or `agg-commit`. + topic: Pox4SignatureTopic.StackExtend, + // The PoX address that can be used with this signer key. + poxAddress: this.wallet.btcAddress, + // The unique auth-id for this authorization. + authId: this.authId, + // The maximum amount of uSTX that can be used (per tx) with this signer + // key. + maxAmount: stacker.amountLocked, + }); + + const stackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.some(Cl.bufferFromHex(signerSig)), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ); + + expect(stackExtend.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-extend-sig", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend sig extend-count ${this.extendCount}`; + } +} From add9d5592ea1c47e38cfd5121bac4d7d74127787 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 30 May 2024 18:52:05 +0300 Subject: [PATCH 0218/1400] Add unhappy path cases for `StackExtendAuthCommand_Err` This commit: - adds 5 unhappy path cases for the `stack-extend` PoX-4 method, called using an authorization. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 261 ++++++++++++++++++ .../pox-4/pox_StackExtendAuthCommand_Err.ts | 123 +++++++++ 2 files changed, 384 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 27264b04fb..83a9566605 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -27,6 +27,7 @@ import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; +import { StackExtendAuthCommand_Err } from "./pox_StackExtendAuthCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, @@ -1415,6 +1416,266 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, ), ), + // StackExtendAuthCommand_Err_Stacking_Is_Delegated_1 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Is_Delegated_1", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Is_Delegated_2 + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + !(stacker.poolMembers.length === 0) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Is_Delegated_2", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Already_Delegated + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Already_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, + ), + ), + // StackExtendAuthCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer(), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + stacker.isStacking && + stacker.isStackingSolo && + !stacker.hasDelegated && + stacker.amountLocked > 0 && + stacker.poolMembers.length === 0 && + !(totalPeriod <= 12) + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // StackExtendAuthCommand_Err_Stack_Extend_Not_Locked + fc.record({ + wallet: fc.constantFrom(...wallets.values()), + authId: fc.nat(), + extendCount: fc.integer({ min: 1, max: 12 }), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (r: { + wallet: Wallet; + extendCount: number; + authId: number; + currentCycle: number; + }) => + new StackExtendAuthCommand_Err( + r.wallet, + r.extendCount, + r.authId, + r.currentCycle, + function ( + this: StackExtendAuthCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const firstRewardCycle = + stacker.firstLockedRewardCycle < this.currentCycle + ? this.currentCycle + : stacker.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + if ( + model.stackingMinimum > 0 && + !stacker.isStacking && + !stacker.isStackingSolo && + !stacker.hasDelegated && + !(stacker.amountLocked > 0) && + stacker.poolMembers.length === 0 && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts new file mode 100644 index 0000000000..e3deed040c --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts @@ -0,0 +1,123 @@ +import { poxAddressToTuple } from "@stacks/stacking"; +import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { currentCycle } from "./pox_Commands"; +import { Cl } from "@stacks/transactions"; +import { expect } from "vitest"; +import { tx } from "@hirosystems/clarinet-sdk"; + +type CheckFunc = ( + this: StackExtendAuthCommand_Err, + model: Readonly, +) => boolean; + +export class StackExtendAuthCommand_Err implements PoxCommand { + readonly wallet: Wallet; + readonly extendCount: number; + readonly authId: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `StackExtendAuthCommand` to lock uSTX for stacking. + * + * This command calls `stack-extend` using an `authorization`. + * + * @param wallet - Represents the Stacker's wallet. + * @param extendCount - Represents the cycles to extend the stack with. + * @param authId - Unique auth-id for the authorization. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + wallet: Wallet, + extendCount: number, + authId: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.wallet = wallet; + this.extendCount = extendCount; + this.authId = authId; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const currentRewCycle = currentCycle(real.network); + const stacker = model.stackers.get(this.wallet.stxAddress)!; + + const block = real.network.mineBlock([ + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "set-signer-key-authorization", + [ + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + poxAddressToTuple(this.wallet.btcAddress), + // (period uint) + Cl.uint(this.extendCount), + // (reward-cycle uint) + Cl.uint(currentRewCycle), + // (topic (string-ascii 14)) + Cl.stringAscii("stack-extend"), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (allowed bool) + Cl.bool(true), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + tx.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "stack-extend", + [ + // (extend-count uint) + Cl.uint(this.extendCount), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.wallet.btcAddress), + // (signer-sig (optional (buff 65))) + Cl.none(), + // (signer-key (buff 33)) + Cl.bufferFromHex(this.wallet.signerPubKey), + // (max-amount uint) + Cl.uint(stacker.amountLocked), + // (auth-id uint) + Cl.uint(this.authId), + ], + this.wallet.stxAddress, + ), + ]); + + expect(block[0].result).toBeOk(Cl.bool(true)); + expect(block[1].result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.wallet.label}`, + "stack-extend-auth", + "extend-count", + this.extendCount.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.wallet.label} stack-extend auth extend-count ${this.extendCount}`; + } +} From 3f3da1639b3b3c916ad1e4ce79292c021b4c1f5c Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 31 May 2024 15:58:27 +0300 Subject: [PATCH 0219/1400] Add unhappy path cases for `DelegateStackExtendCommand_Err` This commit: - adds 4 unhappy path cases for the `delegate-stack-extend` PoX-4 method. - adds the command run tracking inside the `check` method. - adds the expected `delegate-stack-extend` PoX-4 error to the `POX_4_ERRORS` dictionary. --- .../tests/pox-4/err_Commands.ts | 302 ++++++++++++++++++ .../pox_DelegateStackExtendCommand_Err.ts | 96 ++++++ 2 files changed, 398 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 83a9566605..1ad4815cf7 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -28,6 +28,7 @@ import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; import { StackExtendAuthCommand_Err } from "./pox_StackExtendAuthCommand_Err"; +import { DelegateStackExtendCommand_Err } from "./pox_DelegateStackExtendCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, @@ -41,6 +42,7 @@ const POX_4_ERRORS = { ERR_DELEGATION_TOO_MUCH_LOCKED: 22, ERR_STACK_EXTEND_NOT_LOCKED: 26, ERR_STACKING_IS_DELEGATED: 30, + ERR_STACKING_NOT_DELEGATED: 31, ERR_DELEGATION_ALREADY_REVOKED: 34, }; @@ -1676,6 +1678,306 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, ), ), + // DelegateStackExtendCommand_Err_Stacking_Invalid_Lock_Period + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.constant(100000000000000), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }).map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function ( + this: DelegateStackExtendCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + + if ( + stackerWallet.amountLocked > 0 && + stackerWallet.hasDelegated === true && + stackerWallet.isStacking === true && + stackerWallet.delegatedTo === this.operator.stxAddress && + !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + stackerWallet.delegatedMaxAmount >= stackedAmount && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + operatorWallet.lockedAddresses.includes( + this.stacker.stxAddress, + ) && + !(totalPeriod <= 12) + ) { + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, + ), + ), + // DelegateStackExtendCommand_Err_Stacking_Not_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc + .record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }) + .map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function ( + this: DelegateStackExtendCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + + if ( + stackerWallet.amountLocked > 0 && + !(stackerWallet.hasDelegated === true) && + stackerWallet.isStacking === true && + stackerWallet.isStackingSolo === true && + !(stackerWallet.delegatedTo === this.operator.stxAddress) && + !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + !(stackerWallet.delegatedMaxAmount >= stackedAmount) && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + !operatorWallet.lockedAddresses.includes( + this.stacker.stxAddress, + ) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Not_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NOT_DELEGATED, + ), + ), + // DelegateStackExtendCommand_Err_Stack_Extend_Not_Locked + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + currentCycle: fc.constant(currentCycle(network)), + }).map((additionalProps) => ({ + ...r, + stacker: wallets.get(additionalProps.stacker)!, + currentCycle: additionalProps.currentCycle, + })); + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function ( + this: DelegateStackExtendCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + if ( + !(stackerWallet.amountLocked > 0) && + stackerWallet.hasDelegated === true && + !(stackerWallet.isStacking === true) && + !(stackerWallet.delegatedTo === this.operator.stxAddress) && + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight && + stackerWallet.delegatedMaxAmount >= stackedAmount && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + !operatorWallet.lockedAddresses.includes( + this.stacker.stxAddress, + ) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stack_Extend_Not_Locked", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, + ), + ), + // DelegateStackExtendCommand_Err_Stacking_Permission_Denied + fc.record({ + operator: fc.constantFrom(...wallets.values()), + extendCount: fc.integer({ min: 1, max: 11 }), + stacker: fc.constantFrom(...wallets.values()), + currentCycle: fc.constant(currentCycle(network)), + }).map( + (final) => + new DelegateStackExtendCommand_Err( + final.operator, + final.stacker, + final.extendCount, + final.currentCycle, + function ( + this: DelegateStackExtendCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + const firstRewardCycle = + this.currentCycle > stackerWallet.firstLockedRewardCycle + ? this.currentCycle + : stackerWallet.firstLockedRewardCycle; + const firstExtendCycle = Math.floor( + (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + REWARD_CYCLE_LENGTH, + ); + const lastExtendCycle = firstExtendCycle + this.extendCount - 1; + const totalPeriod = lastExtendCycle - firstRewardCycle + 1; + const newUnlockHeight = + REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + + FIRST_BURNCHAIN_BLOCK_HEIGHT; + const stackedAmount = stackerWallet.amountLocked; + + if ( + stackerWallet.amountLocked > 0 && + !(stackerWallet.hasDelegated === true) && + stackerWallet.isStacking === true && + !(stackerWallet.delegatedTo === this.operator.stxAddress) && + !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + !(stackerWallet.delegatedMaxAmount >= stackedAmount) && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + operatorWallet.lockedAddresses.includes( + this.stacker.stxAddress, + ) && + totalPeriod <= 12 + ) { + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Permission_Denied", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts new file mode 100644 index 0000000000..830fb7d182 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts @@ -0,0 +1,96 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStackExtendCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStackExtendCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly extendCount: number; + readonly currentCycle: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a `DelegateStackExtendCommand_Err` to extend the unlock + * height as a Pool Operator on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the STacker's wallet. + * @param extendCount - Represents the cycles to be expended. + * @param currentCycle - Represents the current PoX reward cycle. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + stacker: Wallet, + extendCount: number, + currentCycle: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.extendCount = extendCount; + this.currentCycle = currentCycle; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + + // Act + const delegateStackExtend = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-extend", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (extend-count uint) + Cl.uint(this.extendCount), + ], + this.operator.stxAddress, + ); + + expect(delegateStackExtend.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label} Ӿ ${this.stacker.label}`, + "delegate-stack-extend", + "extend count", + this.extendCount.toString(), + "new unlock height", + stackerWallet.unlockHeight.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} Ӿ ${this.stacker.label} delegate-stack-extend extend count ${this.extendCount}`; + } +} From 115a3855c4e9ef9fe8c77448b93ba27c4e0152e9 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 31 May 2024 16:09:19 +0300 Subject: [PATCH 0220/1400] Update `err_Commands` to include the `delegatedUntilBurnHt` none branch This commit adds the undefined check in the `err_Commands` comparisons that involve `delegatedUntilBurnHt`. --- .../tests/pox-4/err_Commands.ts | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 1ad4815cf7..9287df41a2 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -794,7 +794,8 @@ export function ErrCommands( Number(this.amountUstx) <= stackerWallet.ustxBalance && Number(this.amountUstx) >= model.stackingMinimum && operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + (stackerWallet.delegatedUntilBurnHt === undefined || + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) ) { model.trackCommandRun( "DelegateStackStxCommand_Err_Delegation_Too_Much_Locked", @@ -870,7 +871,8 @@ export function ErrCommands( Number(this.amountUstx) <= stackerWallet.ustxBalance && Number(this.amountUstx) >= model.stackingMinimum && !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt + (stackerWallet.delegatedUntilBurnHt === undefined || + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) ) { model.trackCommandRun( "DelegateStackStxCommand_Err_Stacking_Permission_Denied_1", @@ -943,7 +945,8 @@ export function ErrCommands( Number(this.amountUstx) <= stackerWallet.ustxBalance && Number(this.amountUstx) >= model.stackingMinimum && !(operatorWallet.poolMembers.includes(this.stacker.stxAddress)) && - !(this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) + !(stackerWallet.delegatedUntilBurnHt === undefined || + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) ) { model.trackCommandRun( "DelegateStackStxCommand_Err_Stacking_Permission_Denied_2", @@ -1740,7 +1743,8 @@ export function ErrCommands( stackerWallet.hasDelegated === true && stackerWallet.isStacking === true && stackerWallet.delegatedTo === this.operator.stxAddress && - !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + !(stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && stackerWallet.delegatedMaxAmount >= stackedAmount && operatorWallet.poolMembers.includes(this.stacker.stxAddress) && operatorWallet.lockedAddresses.includes( @@ -1822,7 +1826,8 @@ export function ErrCommands( stackerWallet.isStacking === true && stackerWallet.isStackingSolo === true && !(stackerWallet.delegatedTo === this.operator.stxAddress) && - !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + !(stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && !(stackerWallet.delegatedMaxAmount >= stackedAmount) && !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && !operatorWallet.lockedAddresses.includes( @@ -1900,7 +1905,8 @@ export function ErrCommands( stackerWallet.hasDelegated === true && !(stackerWallet.isStacking === true) && !(stackerWallet.delegatedTo === this.operator.stxAddress) && - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight && + (stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && stackerWallet.delegatedMaxAmount >= stackedAmount && !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && !operatorWallet.lockedAddresses.includes( @@ -1961,7 +1967,8 @@ export function ErrCommands( !(stackerWallet.hasDelegated === true) && stackerWallet.isStacking === true && !(stackerWallet.delegatedTo === this.operator.stxAddress) && - !(stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && + !(stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && !(stackerWallet.delegatedMaxAmount >= stackedAmount) && !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && operatorWallet.lockedAddresses.includes( From 6eb8eec5d561eaa41c7389971a798980fe74b45b Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 31 May 2024 16:30:50 +0300 Subject: [PATCH 0221/1400] Add unhappy path cases for `DelegateStackIncreaseCommand_Err` This commit: - adds 4 unhappy path cases for the `delegate-stack-increase` PoX-4 method. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 258 ++++++++++++++++++ .../pox_DelegateStackIncreaseCommand_Err.ts | 95 +++++++ 2 files changed, 353 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 9287df41a2..d3833003eb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -29,6 +29,7 @@ import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; import { StackExtendAuthCommand_Err } from "./pox_StackExtendAuthCommand_Err"; import { DelegateStackExtendCommand_Err } from "./pox_DelegateStackExtendCommand_Err"; +import { DelegateStackIncreaseCommand_Err } from "./pox_DelegateStackIncreaseCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, @@ -1985,6 +1986,263 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, ), ), + // DelegateStackIncreaseCommand_Err_Stacking_Insufficient_Funds + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(Number.MAX_SAFE_INTEGER), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }).map( + (final) => + new DelegateStackIncreaseCommand_Err( + final.operator, + final.stacker, + final.increaseBy, + function ( + this: DelegateStackIncreaseCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + if ( + stackerWallet.amountLocked > 0 && + stackerWallet.hasDelegated === true && + stackerWallet.isStacking === true && + this.increaseBy > 0 && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + !(stackerWallet.amountUnlocked >= this.increaseBy) && + !( + stackerWallet.delegatedMaxAmount >= + this.increaseBy + stackerWallet.amountLocked + ) && + operatorWallet.lockedAddresses.indexOf( + this.stacker.stxAddress, + ) > -1 + ) { + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Insufficient_Funds", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, + ), + ), + // DelegateStackIncreaseCommand_Err_Stacking_Invalid_Amount + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.constant(0), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }).map( + (final) => + new DelegateStackIncreaseCommand_Err( + final.operator, + final.stacker, + final.increaseBy, + function ( + this: DelegateStackIncreaseCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + if ( + stackerWallet.amountLocked > 0 && + stackerWallet.hasDelegated === true && + stackerWallet.isStacking === true && + !(this.increaseBy > 0) && + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + stackerWallet.amountUnlocked >= this.increaseBy && + stackerWallet.delegatedMaxAmount >= + this.increaseBy + stackerWallet.amountLocked && + operatorWallet.lockedAddresses.indexOf( + this.stacker.stxAddress, + ) > -1 + ) { + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Invalid_Amount", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, + ), + ), + // DelegateStackIncreaseCommand_Err_Stacking_Not_Delegated + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }).map( + (final) => + new DelegateStackIncreaseCommand_Err( + final.operator, + final.stacker, + final.increaseBy, + function ( + this: DelegateStackIncreaseCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + if ( + stackerWallet.amountLocked > 0 && + !(stackerWallet.hasDelegated === true) && + stackerWallet.isStacking === true && + stackerWallet.isStackingSolo === true && + this.increaseBy > 0 && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + stackerWallet.amountUnlocked >= this.increaseBy && + !( + stackerWallet.delegatedMaxAmount >= + this.increaseBy + stackerWallet.amountLocked + ) && + !( + operatorWallet.lockedAddresses.indexOf( + this.stacker.stxAddress, + ) > -1 + ) + ) { + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Not_Delegated", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_NOT_DELEGATED, + ), + ), + // DelegateStackIncreaseCommand_Err_Stacking_Permission_Denied + fc.record({ + operator: fc.constantFrom(...wallets.values()), + increaseBy: fc.nat(), + }).chain((r) => { + const operator = stackers.get(r.operator.stxAddress)!; + const delegatorsList = operator.poolMembers; + + const availableStackers = delegatorsList.filter((delegator) => { + const delegatorWallet = stackers.get(delegator)!; + return delegatorWallet.unlockHeight > nextCycleFirstBlock(network); + }); + + const availableStackersOrFallback = availableStackers.length === 0 + ? [r.operator.stxAddress] + : availableStackers; + + return fc.record({ + stacker: fc.constantFrom(...availableStackersOrFallback), + }).map((stacker) => ({ + ...r, + stacker: wallets.get(stacker.stacker)!, + })); + }).map( + (final) => + new DelegateStackIncreaseCommand_Err( + final.operator, + final.stacker, + final.increaseBy, + function ( + this: DelegateStackIncreaseCommand_Err, + model: Readonly, + ): boolean { + const operatorWallet = model.stackers.get( + this.operator.stxAddress, + )!; + const stackerWallet = model.stackers.get( + this.stacker.stxAddress, + )!; + + if ( + stackerWallet.amountLocked > 0 && + !(stackerWallet.hasDelegated === true) && + stackerWallet.isStacking === true && + this.increaseBy > 0 && + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + stackerWallet.amountUnlocked >= this.increaseBy && + !( + stackerWallet.delegatedMaxAmount >= + this.increaseBy + stackerWallet.amountLocked + ) && + operatorWallet.lockedAddresses.indexOf( + this.stacker.stxAddress, + ) > -1 + ) { + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Permission_Denied", + ); + return true; + } else return false; + }, + POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts new file mode 100644 index 0000000000..d54853dcb6 --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts @@ -0,0 +1,95 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { poxAddressToTuple } from "@stacks/stacking"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DelegateStackIncreaseCommand_Err, + model: Readonly, +) => boolean; + +export class DelegateStackIncreaseCommand_Err implements PoxCommand { + readonly operator: Wallet; + readonly stacker: Wallet; + readonly increaseBy: number; + readonly checkFunc: CheckFunc; + readonly errorCode: number; + + /** + * Constructs a DelegateStackIncreaseCommand_Err to increase the uSTX amount + * previously locked on behalf of a Stacker. + * + * @param operator - Represents the Pool Operator's wallet. + * @param stacker - Represents the Stacker's wallet. + * @param increaseBy - Represents the locked amount to be increased by. + * @param checkFunc - A function to check constraints for running this command. + * @param errorCode - The expected error code when running this command. + */ + constructor( + operator: Wallet, + stacker: Wallet, + increaseBy: number, + checkFunc: CheckFunc, + errorCode: number, + ) { + this.operator = operator; + this.stacker = stacker; + this.increaseBy = increaseBy; + this.checkFunc = checkFunc; + this.errorCode = errorCode; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); + + run(model: Stub, real: Real): void { + const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; + const prevLocked = stackerWallet.amountLocked; + // Act + const delegateStackIncrease = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "delegate-stack-increase", + [ + // (stacker principal) + Cl.principal(this.stacker.stxAddress), + // (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + poxAddressToTuple(this.operator.btcAddress), + // (increase-by uint) + Cl.uint(this.increaseBy), + ], + this.operator.stxAddress, + ); + + // Assert + expect(delegateStackIncrease.result).toBeErr(Cl.int(this.errorCode)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.operator.label} Ӿ ${this.stacker.label}`, + "delegate-stack-increase", + "increased by", + this.increaseBy.toString(), + "previously locked", + prevLocked.toString(), + "total locked", + stackerWallet.amountLocked.toString(), + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.operator.label} delegate-stack-increase by ${this.increaseBy}`; + } +} From 3492365f606088d4e150e13aa5b00b39596684a7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 May 2024 12:08:00 -0500 Subject: [PATCH 0222/1400] feat: use burnview consensus hash to initialize the sortdb handles for nakamoto blocks --- stackslib/src/chainstate/burn/db/sortdb.rs | 34 ++++++++++++-- stackslib/src/chainstate/nakamoto/mod.rs | 37 +++++++++++++++ stackslib/src/chainstate/nakamoto/tenure.rs | 47 +++++++++++++++++++ .../stacks-node/src/nakamoto_node/miner.rs | 2 +- 4 files changed, 115 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3e5c27ba84..1c6fe26606 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -70,7 +70,7 @@ use crate::chainstate::coordinator::{ use crate::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use crate::chainstate::stacks::boot::PoxStartCycleInfo; -use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MARF}; use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::index::{ @@ -2674,9 +2674,24 @@ impl SortitionDB { return Err(db_error::NotFoundError); } }; - let snapshot = - SortitionDB::get_block_snapshot_consensus(&self.conn(), &header.consensus_hash)? - .ok_or(db_error::NotFoundError)?; + // if its a nakamoto block, we want to use the burnchain view of the block + let burn_view = match &header.anchored_header { + StacksBlockHeaderTypes::Epoch2(_) => header.consensus_hash, + StacksBlockHeaderTypes::Nakamoto(_) => { + NakamotoChainState::get_tenure_for_block(chainstate.db(), &header) + .map_err(|e| { + warn!( + "Failed to get tenure for block header: {:?}", e; + "block_id" => %stacks_block_id, + ); + db_error::NotFoundError + })? + .burn_view_consensus_hash + } + }; + + let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &burn_view)? + .ok_or(db_error::NotFoundError)?; Ok(self.index_handle(&snapshot.sortition_id)) } @@ -4604,6 +4619,17 @@ impl SortitionDB { self.index_handle(&sortition_id) } + /// Open an index handle at the given consensus hash + /// Returns a db_error::NotFoundError if `ch` cannot be found + pub fn index_handle_at_ch<'a>( + &'a self, + ch: &ConsensusHash, + ) -> Result, db_error> { + let sortition_id = Self::get_sortition_id_by_consensus(self.conn(), ch)? + .ok_or_else(|| db_error::NotFoundError)?; + Ok(self.index_handle(&sortition_id)) + } + /// Open a tx handle at the burn chain tip pub fn tx_begin_at_tip<'a>(&'a mut self) -> SortitionHandleTx<'a> { let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d3541fae22..3c8946280c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1344,6 +1344,43 @@ impl NakamotoChainState { return Err(ChainstateError::InvalidStacksBlock(msg.into())); } + // set the sortition handle's pointer to the block's burnchain view. + // this is either: + // (1) set by the tenure change tx if one exists + // (2) the same as parent block id + + let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { + tenure_change.burn_view_consensus_hash + } else { + let Some(current_tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( + &chainstate_tx.tx, + &next_ready_block.header.consensus_hash, + )? + else { + warn!( + "Cannot process Nakamoto block: failed to find active tenure"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Ok(None); + }; + current_tenure.burn_view_consensus_hash + }; + let Some(burnchain_view_sortid) = + SortitionDB::get_sortition_id_by_consensus(sort_tx.tx(), &burnchain_view)? + else { + warn!( + "Cannot process Nakamoto block: failed to find Sortition ID associated with burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "burn_view_consensus_hash" => %burnchain_view, + ); + return Ok(None); + }; + + sort_tx.context.chain_tip = burnchain_view_sortid; + // find commit and sortition burns if this is a tenure-start block let Ok(new_tenure) = next_ready_block.is_wellformed_tenure_start_block() else { return Err(ChainstateError::InvalidStacksBlock( diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 2edf9f1e87..f68e0ee90a 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -595,6 +595,53 @@ impl NakamotoChainState { Ok(tenure_opt) } + /// Get the tenure change that was active for a given block header + /// If a tenure change occurred during this block, it will be returned + pub fn get_tenure_for_block( + headers_conn: &Connection, + block_header: &StacksHeaderInfo, + ) -> Result { + let sql = "SELECT * FROM nakamoto_tenures WHERE block_id = ? LIMIT 1"; + let tenure_opt: Option = + query_row(headers_conn, sql, &[block_header.index_block_hash()])?; + if let Some(tenure) = tenure_opt { + return Ok(tenure); + } + // there wasn't a tenure change at that block, so we need to figure out the active tenure + // use the "tenure height" to query for `num_blocks_confirmed` + let block_height = block_header.stacks_block_height; + let tenure_start_height = Self::get_nakamoto_tenure_start_block_header( + headers_conn, + &block_header.consensus_hash, + )? + .ok_or_else(|| ChainstateError::NoSuchBlockError)? + .stacks_block_height; + let blocks_confirmed = u64_to_sql(block_height.saturating_sub(tenure_start_height))?; + // querying by blocks confirmed doesn't work if cause is blockfound, + // so don't try and instead failback to directly querying it + let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ? + AND num_blocks_confirmed <= ? + AND cause <> ? + ORDER BY num_blocks_confirmed DESC LIMIT 1"; + if let Some(tenure) = query_row( + headers_conn, + sql, + params![ + &block_header.consensus_hash, + blocks_confirmed, + TenureChangeCause::BlockFound.as_u8() + ], + )? { + return Ok(tenure); + } + // failback to the BlockFound tenure change + Self::get_highest_nakamoto_tenure_change_by_tenure_id( + headers_conn, + &block_header.consensus_hash, + )? + .ok_or_else(|| ChainstateError::NoSuchBlockError) + } + /// Get the highest non-empty processed tenure on the canonical sortition history. pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3ebb12fd9f..c34d75966e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -481,7 +481,7 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = sort_db.index_handle_at_tip(); + let mut sortition_handle = sort_db.index_handle_at_ch(&block.header.consensus_hash)?; let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, From 50bcac2c1cdbc221024fb3f9792b757135b23ab6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 May 2024 12:49:16 -0500 Subject: [PATCH 0223/1400] naka miner should use its consensus hash view as the sortition handle --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index c34d75966e..d256039b2e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -765,7 +765,7 @@ impl BlockMinerThread { let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, &burn_db - .index_handle_at_block(&chain_state, &parent_block_id) + .index_handle_at_ch(&self.burn_block.consensus_hash) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &mut mem_pool, &parent_block_info.stacks_parent_header, From da87ec804093beec35deba5cf099c57f8ba4ec90 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 31 May 2024 22:28:09 +0300 Subject: [PATCH 0224/1400] Order imports inside `err_Commands` --- .../tests/pox-4/err_Commands.ts | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index d3833003eb..4154de6c92 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -1,4 +1,5 @@ import fc from "fast-check"; +import { Simnet } from "@hirosystems/clarinet-sdk"; import { PoxCommand, Stacker, @@ -6,16 +7,6 @@ import { StxAddress, Wallet, } from "./pox_CommandModel"; -import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; -import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; -import { Simnet } from "@hirosystems/clarinet-sdk"; -import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; -import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; -import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; -import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; -import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; -import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; -import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; import { currentCycle, currentCycleFirstBlock, @@ -23,13 +14,22 @@ import { nextCycleFirstBlock, REWARD_CYCLE_LENGTH, } from "./pox_Commands"; -import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; -import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; -import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; -import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; -import { StackExtendAuthCommand_Err } from "./pox_StackExtendAuthCommand_Err"; import { DelegateStackExtendCommand_Err } from "./pox_DelegateStackExtendCommand_Err"; import { DelegateStackIncreaseCommand_Err } from "./pox_DelegateStackIncreaseCommand_Err"; +import { DelegateStackStxCommand_Err } from "./pox_DelegateStackStxCommand_Err"; +import { DelegateStxCommand_Err } from "./pox_DelegateStxCommand_Err"; +import { RevokeDelegateStxCommand_Err } from "./pox_RevokeDelegateStxCommand_Err"; +import { StackAggregationCommitAuthCommand_Err } from "./pox_StackAggregationCommitAuthCommand_Err"; +import { StackAggregationCommitIndexedAuthCommand_Err } from "./pox_StackAggregationCommitIndexedAuthCommand_Err"; +import { StackAggregationCommitIndexedSigCommand_Err } from "./pox_StackAggregationCommitIndexedSigCommand_Err"; +import { StackAggregationCommitSigCommand_Err } from "./pox_StackAggregationCommitSigCommand_Err"; +import { StackAggregationIncreaseCommand_Err } from "./pox_StackAggregationIncreaseCommand_Err"; +import { StackExtendAuthCommand_Err } from "./pox_StackExtendAuthCommand_Err"; +import { StackExtendSigCommand_Err } from "./pox_StackExtendSigCommand_Err"; +import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err"; +import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; +import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; +import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, From 9365892102d9c2c6285621399ef168cbc49271c3 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 31 May 2024 22:47:05 +0300 Subject: [PATCH 0225/1400] Add unhappy path case for `DisallowContractCallerCommand_Err` This commit: - adds one unhappy path case for the `disallow-contract-caller` PoX-4 method. - adds the command run tracking inside the `check` method. --- .../tests/pox-4/err_Commands.ts | 34 +++++++++ .../pox_DisallowContractCallerCommand_Err.ts | 73 +++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand_Err.ts diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 4154de6c92..c24c028f8d 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -30,6 +30,7 @@ import { StackIncreaseAuthCommand_Err } from "./pox_StackIncreaseAuthCommand_Err import { StackIncreaseSigCommand_Err } from "./pox_StackIncreaseSigCommand_Err"; import { StackStxAuthCommand_Err } from "./pox_StackStxAuthCommand_Err"; import { StackStxSigCommand_Err } from "./pox_StackStxSigCommand_Err"; +import { DisallowContractCallerCommand_Err } from "./pox_DisallowContractCallerCommand_Err"; const POX_4_ERRORS = { ERR_STACKING_INSUFFICIENT_FUNDS: 1, @@ -2243,6 +2244,39 @@ export function ErrCommands( POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, ), ), + // DisallowContractCallerCommand_Err + fc.record({ + stacker: fc.constantFrom(...wallets.values()), + callerToDisallow: fc.constantFrom(...wallets.values()), + }).map( + (r: { stacker: Wallet; callerToDisallow: Wallet }) => + new DisallowContractCallerCommand_Err( + r.stacker, + r.callerToDisallow, + function ( + this: DisallowContractCallerCommand_Err, + model: Readonly, + ): boolean { + const stacker = model.stackers.get(this.stacker.stxAddress)!; + const callerToDisallow = model.stackers.get( + this.callerToDisallow.stxAddress, + )!; + if ( + !stacker.allowedContractCallers.includes( + this.callerToDisallow.stxAddress, + ) && + !callerToDisallow.callerAllowedBy.includes( + this.stacker.stxAddress, + ) === true + ) { + model.trackCommandRun( + "DisallowContractCallerCommand_Err", + ); + return true; + } else return false; + }, + ), + ), ]; return cmds; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand_Err.ts new file mode 100644 index 0000000000..028457b41e --- /dev/null +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand_Err.ts @@ -0,0 +1,73 @@ +import { + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel.ts"; +import { expect } from "vitest"; +import { Cl } from "@stacks/transactions"; + +type CheckFunc = ( + this: DisallowContractCallerCommand_Err, + model: Readonly, +) => boolean; + +export class DisallowContractCallerCommand_Err implements PoxCommand { + readonly stacker: Wallet; + readonly callerToDisallow: Wallet; + readonly checkFunc: CheckFunc; + + /** + * Constructs a `DisallowContractCallerComand` to revoke authorization + * for calling stacking methods. + * + * @param stacker - Represents the `Stacker`'s wallet. + * @param callerToDisallow - The `contract-caller` to be revoked. + * @param checkFunc - A function to check constraints for running this command. + */ + constructor(stacker: Wallet, callerToDisallow: Wallet, checkFunc: CheckFunc) { + this.stacker = stacker; + this.callerToDisallow = callerToDisallow; + this.checkFunc = checkFunc; + } + + check = (model: Readonly): boolean => this.checkFunc.call(this, model); // Constraints for running this command include: + // - The Caller to be disallowed must have been previously allowed + // by the Operator. + + run(model: Stub, real: Real): void { + // Act + const disallowContractCaller = real.network.callPublicFn( + "ST000000000000000000002AMW42H.pox-4", + "disallow-contract-caller", + [ + // (caller principal) + Cl.principal(this.callerToDisallow.stxAddress), + ], + this.stacker.stxAddress, + ); + + // Assert + expect(disallowContractCaller.result).toBeOk(Cl.bool(false)); + + // Log to console for debugging purposes. This is not necessary for the + // test to pass but it is useful for debugging and eyeballing the test. + logCommand( + `₿ ${model.burnBlockHeight}`, + `✗ ${this.stacker.label}`, + "disallow-contract-caller", + this.callerToDisallow.label, + ); + + // Refresh the model's state if the network gets to the next reward cycle. + model.refreshStateForNextRewardCycle(real); + } + + toString() { + // fast-check will call toString() in case of errors, e.g. property failed. + // It will then make a minimal counterexample, a process called 'shrinking' + // https://github.com/dubzzz/fast-check/issues/2864#issuecomment-1098002642 + return `${this.stacker.label} disallow-contract-caller ${this.callerToDisallow.label}`; + } +} From b74377cc105dff99b88c35cb94b563bedd344171 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 May 2024 15:05:30 -0500 Subject: [PATCH 0226/1400] when setting the sort_tx context in naka block processing, always unset it afterwards --- stackslib/src/chainstate/nakamoto/mod.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 3c8946280c..4fec156e6f 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1379,8 +1379,6 @@ impl NakamotoChainState { return Ok(None); }; - sort_tx.context.chain_tip = burnchain_view_sortid; - // find commit and sortition burns if this is a tenure-start block let Ok(new_tenure) = next_ready_block.is_wellformed_tenure_start_block() else { return Err(ChainstateError::InvalidStacksBlock( @@ -1418,7 +1416,12 @@ impl NakamotoChainState { // though it will always be None), which gets the borrow-checker to believe that it's safe // to access `stacks_chain_state` again. In the `Ok(..)` case, it's instead sufficient so // simply commit the block before beginning the second transaction to mark it processed. - let (ok_opt, err_opt) = match NakamotoChainState::append_block( + + // set the sortition tx's tip to the burnchain view -- we must unset this after appending the block, + // so we wrap this call in a closure to make sure that the unsetting is infallible + let prior_sort_tip = + std::mem::replace(&mut sort_tx.context.chain_tip, burnchain_view_sortid); + let (ok_opt, err_opt) = (|clarity_instance| match NakamotoChainState::append_block( &mut chainstate_tx, clarity_instance, sort_tx, @@ -1437,7 +1440,9 @@ impl NakamotoChainState { ) { Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), Err(e) => (None, Some(e)), - }; + })(clarity_instance); + + sort_tx.context.chain_tip = prior_sort_tip; if let Some(e) = err_opt { // force rollback From 54aa3b91598c4e05df325a0d830794efb0009a81 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 31 May 2024 18:11:09 -0400 Subject: [PATCH 0227/1400] chore: address PR feedback --- stacks-common/src/types/mod.rs | 18 +++++++++++ .../burn/operations/leader_block_commit.rs | 2 +- .../chainstate/nakamoto/coordinator/mod.rs | 32 ++++++++----------- stackslib/src/chainstate/nakamoto/mod.rs | 6 ++++ .../src/chainstate/nakamoto/signer_set.rs | 7 ++++ .../src/chainstate/nakamoto/tests/node.rs | 2 +- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/relay.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 12 +++++-- .../src/nakamoto_node/sign_coordinator.rs | 6 +++- .../src/tests/nakamoto_integrations.rs | 4 ++- 11 files changed, 65 insertions(+), 28 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index e6e5cf5f79..901d72b40a 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -151,6 +151,24 @@ impl StacksEpochId { StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, } } + + /// Does this epoch use the nakamoto reward set, or the epoch2 reward set? + /// We use the epoch2 reward set in all pre-3.0 epochs. + /// We also use the epoch2 reward set in the first 3.0 reward cycle. + /// After that, we use the nakamoto reward set. + pub fn uses_nakamoto_reward_set(&self, cur_reward_cycle: u64, first_epoch30_reward_cycle: u64) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle + } + } } impl std::fmt::Display for StacksEpochId { diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 087a3e3b42..539181f9af 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -662,7 +662,7 @@ impl LeaderBlockCommitOp { check_recipients.sort(); let mut commit_outs = self.commit_outs.clone(); commit_outs.sort(); - for (expected_commit, found_commit) in + for (found_commit, expected_commit) in commit_outs.iter().zip(check_recipients) { if expected_commit.to_burnchain_repr() diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f703a23486..64950f9059 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -234,7 +234,7 @@ pub fn get_nakamoto_reward_cycle_info( "FATAL: called a nakamoto function outside of epoch 3" ); - // calculating the reward set for the _next_ reward cycle + // calculating the reward set for the current reward cycle let reward_cycle = burnchain .pox_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); @@ -246,7 +246,7 @@ pub fn get_nakamoto_reward_cycle_info( "prepare_phase_length" => burnchain.pox_constants.prepare_length); let Some((rc_info, anchor_block_header)) = load_nakamoto_reward_set( - burn_height, + reward_cycle, sortition_tip, burnchain, chain_state, @@ -275,7 +275,7 @@ pub fn get_nakamoto_reward_cycle_info( return Ok(Some(rc_info)); } -/// Helper to get the Nakamoto reward set for a given reward cycle, identified by `burn_height`. +/// Helper to get the Nakamoto reward set for a given reward cycle, identified by `reward_cycle`. /// /// In all but the first Nakamoto reward cycle, this will load up the stored reward set from the /// Nakamoto chain state. In the first Nakamoto reward cycle, where the reward set is computed @@ -286,23 +286,20 @@ pub fn get_nakamoto_reward_cycle_info( /// Returns Ok(None) if the reward set is not yet known, but could be known by the time a /// subsequent call is made. pub fn load_nakamoto_reward_set( - burn_height: u64, + reward_cycle: u64, sortition_tip: &SortitionId, burnchain: &Burnchain, chain_state: &mut StacksChainState, sort_db: &SortitionDB, provider: &U, ) -> Result, Error> { - let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? - .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)); - - let reward_cycle = burnchain - .pox_reward_cycle(burn_height) - .expect("FATAL: no reward cycle for burn height"); - let prepare_end_height = burnchain .reward_cycle_to_block_height(reward_cycle) .saturating_sub(1); + + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_end_height)? + .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", prepare_end_height)); + let Some(prepare_end_sortition_id) = get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? else { @@ -320,10 +317,7 @@ pub fn load_nakamoto_reward_set( .pox_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); - if epoch_at_height.epoch_id < StacksEpochId::Epoch30 - || (epoch_at_height.epoch_id == StacksEpochId::Epoch30 - && reward_cycle == first_epoch30_reward_cycle) - { + if !epoch_at_height.epoch_id.uses_nakamoto_reward_set(reward_cycle, first_epoch30_reward_cycle) { // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. @@ -490,9 +484,9 @@ pub fn get_nakamoto_next_recipients( chain_state: &mut StacksChainState, burnchain: &Burnchain, ) -> Result, Error> { - let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height + 1) { + let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { let Some((reward_set, _)) = load_nakamoto_reward_set( - sortition_tip.block_height, + burnchain.pox_reward_cycle(sortition_tip.block_height.saturating_add(1)).expect("Sortition block height has no reward cycle"), &sortition_tip.sortition_id, burnchain, chain_state, @@ -564,7 +558,7 @@ impl< // only proceed if we have processed the _anchor block_ for this reward cycle let Some((rc_info, _)) = load_nakamoto_reward_set( - canonical_sn.block_height, + self.burnchain.pox_reward_cycle(canonical_sn.block_height).expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, @@ -820,7 +814,7 @@ impl< )? .ok_or(DBError::NotFoundError)?; let Some((rc_info, _)) = load_nakamoto_reward_set( - canonical_sn.block_height, + self.burnchain.pox_reward_cycle(canonical_sn.block_height).expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 33f6ff2109..23a8895ee2 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2704,6 +2704,12 @@ impl NakamotoChainState { &mut clarity_tx, vote_for_agg_key_ops.clone(), )); + + if signer_set_calc.is_some() { + debug!("Setup block: computed reward set for the next reward cycle"; + "anchor_block_height" => coinbase_height, + "burn_header_height" => burn_header_height); + } } else { signer_set_calc = None; } diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index e776ca41db..f09f41529d 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -217,6 +217,8 @@ impl NakamotoSigners { Ok(slots) } + /// Compute the reward set for the next reward cycle, store it, and write it to the .signers + /// contract. `reward_cycle` is the _current_ reward cycle. pub fn handle_signer_stackerdb_update( clarity: &mut ClarityTransactionConnection, pox_constants: &PoxConstants, @@ -351,6 +353,11 @@ impl NakamotoSigners { Ok(SignerCalculation { events, reward_set }) } + /// If this block is mined in the prepare phase, based on its tenure's `burn_tip_height`. If + /// so, and if we haven't done so yet, then compute the PoX reward set, store it, and update + /// the .signers contract. The stored PoX reward set is the reward set for the next reward + /// cycle, and will be used by the Nakamoto chains coordinator to validate its block-commits + /// and block signatures. pub fn check_and_handle_prepare_phase_start( clarity_tx: &mut ClarityTx, first_block_height: u64, diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 5b08a398fa..c29abcc0fb 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -585,7 +585,7 @@ impl TestStacksNode { // Get the reward set let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let reward_set = load_nakamoto_reward_set( - sort_tip_sn.block_height, + miner.burnchain.pox_reward_cycle(sort_tip_sn.block_height).expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, &miner.burnchain, chainstate, diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 1d195323f5..0b85f177cf 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5499,7 +5499,7 @@ impl PeerNetwork { } let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( - rc_start_height, + rc, &tip_sn.sortition_id, &self.burnchain, chainstate, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index a073398f42..d04ba3d88c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -730,7 +730,7 @@ impl Relayer { let tip = block_sn.sortition_id; let reward_info = match load_nakamoto_reward_set( - block_sn.block_height, + burnchain.pox_reward_cycle(block_sn.block_height).expect("FATAL: block snapshot has no reward cycle"), &tip, burnchain, chainstate, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 69b04c3a53..3308cefd57 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -300,7 +300,9 @@ impl BlockMinerThread { })?; let reward_info = match load_nakamoto_reward_set( - tip.block_height, + self.burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .expect("FATAL: no reward cycle for sortition"), &tip.sortition_id, &self.burnchain, &mut chain_state, @@ -402,7 +404,9 @@ impl BlockMinerThread { })?; let reward_info = match load_nakamoto_reward_set( - tip.block_height, + self.burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .expect("FATAL: no reward cycle for sortition"), &tip.sortition_id, &self.burnchain, &mut chain_state, @@ -883,7 +887,9 @@ impl BlockMinerThread { .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::DBError(e)))?; let reward_info = match load_nakamoto_reward_set( - tip.block_height, + self.burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .expect("FATAL: no reward cycle defined for sortition tip"), &tip.sortition_id, &self.burnchain, &mut chain_state, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index c0f42e7820..d973114623 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -202,7 +202,11 @@ impl SignCoordinator { ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { - error!("Could not initialize WSTS coordinator for reward set without signer"); + error!("Could not initialize signing coordinator for reward set without signer"); + debug!( + "reward_cycle: {}, reward set: {:?}", + reward_cycle, &reward_set + ); return Err(ChainstateError::NoRegisteredSigners(0)); }; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2da7444c37..bafd66f6d4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -375,7 +375,9 @@ pub fn read_and_sign_block_proposal( let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let reward_set = load_nakamoto_reward_set( - tip.block_height, + burnchain + .pox_reward_cycle(tip.block_height.saturating_add(1)) + .unwrap(), &tip.sortition_id, &burnchain, &mut chainstate, From d28e44f5cdab906a99b3c6cc6540884c1f60775c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 31 May 2024 23:40:22 -0400 Subject: [PATCH 0228/1400] chore: cargo fmt --- stacks-common/src/types/mod.rs | 8 ++- .../chainstate/nakamoto/coordinator/mod.rs | 55 ++++++++++++------- .../src/chainstate/nakamoto/tests/node.rs | 5 +- stackslib/src/net/relay.rs | 4 +- 4 files changed, 48 insertions(+), 24 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 901d72b40a..1e0a056c21 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -156,7 +156,11 @@ impl StacksEpochId { /// We use the epoch2 reward set in all pre-3.0 epochs. /// We also use the epoch2 reward set in the first 3.0 reward cycle. /// After that, we use the nakamoto reward set. - pub fn uses_nakamoto_reward_set(&self, cur_reward_cycle: u64, first_epoch30_reward_cycle: u64) -> bool { + pub fn uses_nakamoto_reward_set( + &self, + cur_reward_cycle: u64, + first_epoch30_reward_cycle: u64, + ) -> bool { match self { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 @@ -166,7 +170,7 @@ impl StacksEpochId { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => false, - StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle + StacksEpochId::Epoch30 => cur_reward_cycle > first_epoch30_reward_cycle, } } } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 64950f9059..c8d1adb826 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -296,9 +296,14 @@ pub fn load_nakamoto_reward_set( let prepare_end_height = burnchain .reward_cycle_to_block_height(reward_cycle) .saturating_sub(1); - + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_end_height)? - .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", prepare_end_height)); + .unwrap_or_else(|| { + panic!( + "FATAL: no epoch defined for burn height {}", + prepare_end_height + ) + }); let Some(prepare_end_sortition_id) = get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? @@ -317,7 +322,10 @@ pub fn load_nakamoto_reward_set( .pox_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); - if !epoch_at_height.epoch_id.uses_nakamoto_reward_set(reward_cycle, first_epoch30_reward_cycle) { + if !epoch_at_height + .epoch_id + .uses_nakamoto_reward_set(reward_cycle, first_epoch30_reward_cycle) + { // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. @@ -484,22 +492,25 @@ pub fn get_nakamoto_next_recipients( chain_state: &mut StacksChainState, burnchain: &Burnchain, ) -> Result, Error> { - let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { - let Some((reward_set, _)) = load_nakamoto_reward_set( - burnchain.pox_reward_cycle(sortition_tip.block_height.saturating_add(1)).expect("Sortition block height has no reward cycle"), - &sortition_tip.sortition_id, - burnchain, - chain_state, - sort_db, - &OnChainRewardSetProvider::new(), - )? - else { - return Ok(None); + let reward_cycle_info = + if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { + let Some((reward_set, _)) = load_nakamoto_reward_set( + burnchain + .pox_reward_cycle(sortition_tip.block_height.saturating_add(1)) + .expect("Sortition block height has no reward cycle"), + &sortition_tip.sortition_id, + burnchain, + chain_state, + sort_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(None); + }; + Some(reward_set) + } else { + None }; - Some(reward_set) - } else { - None - }; sort_db .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) .map_err(Error::from) @@ -558,7 +569,9 @@ impl< // only proceed if we have processed the _anchor block_ for this reward cycle let Some((rc_info, _)) = load_nakamoto_reward_set( - self.burnchain.pox_reward_cycle(canonical_sn.block_height).expect("FATAL: snapshot has no reward cycle"), + self.burnchain + .pox_reward_cycle(canonical_sn.block_height) + .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, @@ -814,7 +827,9 @@ impl< )? .ok_or(DBError::NotFoundError)?; let Some((rc_info, _)) = load_nakamoto_reward_set( - self.burnchain.pox_reward_cycle(canonical_sn.block_height).expect("FATAL: snapshot has no reward cycle"), + self.burnchain + .pox_reward_cycle(canonical_sn.block_height) + .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index c29abcc0fb..201cadb9ac 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -585,7 +585,10 @@ impl TestStacksNode { // Get the reward set let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let reward_set = load_nakamoto_reward_set( - miner.burnchain.pox_reward_cycle(sort_tip_sn.block_height).expect("FATAL: no reward cycle for sortition"), + miner + .burnchain + .pox_reward_cycle(sort_tip_sn.block_height) + .expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, &miner.burnchain, chainstate, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index d04ba3d88c..022d1edf14 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -730,7 +730,9 @@ impl Relayer { let tip = block_sn.sortition_id; let reward_info = match load_nakamoto_reward_set( - burnchain.pox_reward_cycle(block_sn.block_height).expect("FATAL: block snapshot has no reward cycle"), + burnchain + .pox_reward_cycle(block_sn.block_height) + .expect("FATAL: block snapshot has no reward cycle"), &tip, burnchain, chainstate, From f03bedf9f24c5a5ecd06b4196302b439faf95eb9 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 3 Jun 2024 13:11:19 +0300 Subject: [PATCH 0229/1400] Add tree logging for test-runs statistics --- .../tests/pox-4/pox_CommandModel.ts | 38 ++++++++++++++++++- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts index 3d4b7415f9..653a1acbff 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -52,9 +52,43 @@ export class Stub { }, ); - orderedStatistics.forEach(([commandName, count]) => { - console.log(`${commandName}: ${count}`); + this.logAsTree(orderedStatistics); + } + + private logAsTree(statistics: [string, number][]) { + const tree: { [key: string]: any } = {}; + + statistics.forEach(([commandName, count]) => { + const split = commandName.split("_"); + let root: string = split[0], + rest: string = "base"; + + if (split.length > 1) { + rest = split.slice(1).join("_"); + } + if (!tree[root]) { + tree[root] = {}; + } + tree[root][rest] = count; }); + + const printTree = (node: any, indent: string = "") => { + const keys = Object.keys(node); + keys.forEach((key, index) => { + const isLast = index === keys.length - 1; + const boxChar = isLast ? "└─ " : "├─ "; + if (key !== "base") { + if (typeof node[key] === "object") { + console.log(`${indent}${boxChar}${key}: ${node[key]["base"]}`); + printTree(node[key], indent + (isLast ? " " : "│ ")); + } else { + console.log(`${indent}${boxChar}${key}: ${node[key]}`); + } + } + }); + }; + + printTree(tree); } refreshStateForNextRewardCycle(real: Real) { From bdd2f186cd9e7b9d0597d42b63b90692523def00 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 3 Jun 2024 17:38:01 +0300 Subject: [PATCH 0230/1400] removed mutants-dependency --- clarity/Cargo.toml | 2 +- libsigner/Cargo.toml | 1 - pox-locking/Cargo.toml | 2 ++ stackslib/Cargo.toml | 1 - testnet/stacks-node/Cargo.toml | 1 - 5 files changed, 3 insertions(+), 4 deletions(-) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 8420934af1..e10a36806e 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -31,7 +31,6 @@ stacks_common = { package = "stacks-common", path = "../stacks-common", optional rstest = "0.17.0" rstest_reuse = "0.5.0" hashbrown = { workspace = true } -mutants = "0.0.3" [dependencies.serde_json] version = "1.0" @@ -48,6 +47,7 @@ features = ["std"] [dev-dependencies] assert-json-diff = "1.0.0" +mutants = "0.0.3" # a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling # but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. # criterion = "0.3" diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 2f86d48cf8..7da9801674 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -33,7 +33,6 @@ stackslib = { path = "../stackslib"} thiserror = "1.0" tiny_http = "0.12" wsts = { workspace = true } -mutants = "0.0.3" [dev-dependencies] mutants = "0.0.3" diff --git a/pox-locking/Cargo.toml b/pox-locking/Cargo.toml index 199a58d18e..4fbc9885dc 100644 --- a/pox-locking/Cargo.toml +++ b/pox-locking/Cargo.toml @@ -22,6 +22,8 @@ path = "src/lib.rs" clarity = { package = "clarity", path = "../clarity" } stacks_common = { package = "stacks-common", path = "../stacks-common" } slog = { version = "2.5.2", features = [ "max_level_trace" ] } + +[dev-dependencies] mutants = "0.0.3" [features] diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index 7c254f0384..be75337115 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -93,7 +93,6 @@ features = ["serde"] [dependencies.time] version = "0.2.23" features = ["std"] -mutants = "0.0.3" [dev-dependencies] assert-json-diff = "1.0.0" diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 6c20f79dcd..bceb484cd7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -31,7 +31,6 @@ wsts = { workspace = true } rand = { workspace = true } rand_core = { workspace = true } hashbrown = { workspace = true } -mutants = "0.0.3" [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} From 99b49afc55542de44ae9bf7d594607950af0fc98 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 3 Jun 2024 17:39:16 +0300 Subject: [PATCH 0231/1400] migrate some mutants todos to issues --- clarity/src/vm/ast/parser/v1.rs | 10 +--------- clarity/src/vm/ast/parser/v2/mod.rs | 10 +--------- clarity/src/vm/ast/sugar_expander/mod.rs | 10 +--------- clarity/src/vm/costs/mod.rs | 7 ++----- clarity/src/vm/types/mod.rs | 8 ++------ libsigner/src/events.rs | 4 +--- pox-locking/src/events.rs | 3 +-- stackslib/src/burnchains/db.rs | 3 +-- stackslib/src/burnchains/mod.rs | 3 +-- 9 files changed, 11 insertions(+), 47 deletions(-) diff --git a/clarity/src/vm/ast/parser/v1.rs b/clarity/src/vm/ast/parser/v1.rs index 65f9b66b11..5c2715e9f7 100644 --- a/clarity/src/vm/ast/parser/v1.rs +++ b/clarity/src/vm/ast/parser/v1.rs @@ -508,15 +508,7 @@ fn handle_expression( } } -// TODO: #4587 create default for `SymbolicExpression`, then check if mutation tests are caught for these cases: -// ParseResult::from(vec![Default::default()]) -// ParseResult::from(vec![]) -// ParseResult::new() -// ParseResult::from_iter([vec![Default::default()]]) -// ParseResult::new(vec![Default::default()]) -// ParseResult::new(vec![]) -// ParseResult::from_iter([vec![]]) -// Or keep the skip and remove the comment +// TODO: add tests from mutation testing results #4828 #[cfg_attr(test, mutants::skip)] pub fn parse_lexed(input: Vec<(LexItem, u32, u32)>) -> ParseResult> { let mut parse_stack = Vec::new(); diff --git a/clarity/src/vm/ast/parser/v2/mod.rs b/clarity/src/vm/ast/parser/v2/mod.rs index 8f039eedb1..f97aeb6b72 100644 --- a/clarity/src/vm/ast/parser/v2/mod.rs +++ b/clarity/src/vm/ast/parser/v2/mod.rs @@ -206,15 +206,7 @@ impl<'a> Parser<'a> { } } - // TODO: #4587 create default for `SymbolicExpression`, then check if mutation tests are caught for these cases: - // ParseResult::from_iter([Some(Default::default())]) - // ParseResult::new(None) - // ParseResult::from_iter([None]) - // ParseResult::new(Some(Default::default())) - // ParseResult::from(None) - // ParseResult::from(Some(Default::default())) - // ParseResult::new() - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4829 #[cfg_attr(test, mutants::skip)] /// Process a new child node for an AST expression that is open and waiting for children nodes. For example, /// a list or tuple expression that is waiting for child expressions. diff --git a/clarity/src/vm/ast/sugar_expander/mod.rs b/clarity/src/vm/ast/sugar_expander/mod.rs index 01528700fa..670796cf4c 100644 --- a/clarity/src/vm/ast/sugar_expander/mod.rs +++ b/clarity/src/vm/ast/sugar_expander/mod.rs @@ -58,15 +58,7 @@ impl SugarExpander { Ok(()) } - // TODO: #4587 create default for `SymbolicExpression`, then check if mutation tests are caught for these cases: - // ParseResult::from(vec![Default::default()]) - // ParseResult::from(vec![]) - // ParseResult::new() - // ParseResult::from_iter([vec![Default::default()]]) - // ParseResult::new(vec![Default::default()]) - // ParseResult::new(vec![]) - // ParseResult::from_iter([vec![]]) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4830 #[cfg_attr(test, mutants::skip)] pub fn transform( &self, diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 17e052d97e..0751822ed0 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -782,8 +782,7 @@ impl LimitedCostTracker { } impl TrackerData { - // TODO: #4587 add test for Err cases - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4831 #[cfg_attr(test, mutants::skip)] /// `apply_updates` - tells this function to look for any changes in the cost voting contract /// which would need to be applied. if `false`, just load the last computed cost state in this @@ -957,9 +956,7 @@ fn parse_cost( } } -// TODO: #4587 create default for `ExecutionCost`, then check if mutation tests are caught for these case: -// Ok(Default::default()) -// Or keep the skip and remove the comment +// TODO: add tests from mutation testing results #4832 #[cfg_attr(test, mutants::skip)] fn compute_cost( cost_tracker: &mut TrackerData, diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 98ed421828..d04586504f 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -1516,9 +1516,7 @@ impl TupleData { self.data_map.is_empty() } - // TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: - // Ok((Default::default())) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4833 #[cfg_attr(test, mutants::skip)] pub fn from_data(data: Vec<(ClarityName, Value)>) -> Result { let mut type_map = BTreeMap::new(); @@ -1536,9 +1534,7 @@ impl TupleData { Self::new(TupleTypeSignature::try_from(type_map)?, data_map) } - // TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: - // Ok((Default::default())) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4834 #[cfg_attr(test, mutants::skip)] pub fn from_data_typed( epoch: &StacksEpochId, diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index a24219b1ce..aadfd81e64 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -375,9 +375,7 @@ fn ack_dispatcher(request: HttpRequest) { }; } -// TODO: #4587 create default for `SignerEvent` and `EventError`, then check if mutation tests are caught for these case: -// Ok(Default::default()) -// Or keep the skip and remove the comment +// TODO: add tests from mutation testing results #4835 #[cfg_attr(test, mutants::skip)] /// Process a stackerdb event from the node fn process_stackerdb_event( diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 2146a15688..0a1dc9d3c4 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -105,8 +105,7 @@ fn create_event_info_aggregation_code(function_name: &str) -> String { ) } -// TODO: #4587 add tests for `String::new()` and `"xyzzy".into()` returning case. -// Or keep the skip and remove the comment +// TODO: add tests from mutation testing results #4836 #[cfg_attr(test, mutants::skip)] /// Craft the code snippet to generate the method-specific `data` payload fn create_event_info_data_code( diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 1c6d0d2c61..79e34b3539 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -946,8 +946,7 @@ impl<'a> BurnchainDBTransaction<'a> { BurnchainDB::inner_get_canonical_chain_tip(&self.sql_tx) } - // TODO: #4587 add tests for `Ok(())` returning case - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4837 #[cfg_attr(test, mutants::skip)] /// You'd only do this in network emergencies, where node operators are expected to declare an /// anchor block missing (or present). Ideally there'd be a smart contract somewhere for this. diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 5f47ddb5ed..228f7895ee 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -468,8 +468,7 @@ impl PoxConstants { ) // total liquid supply is 40000000000000000 µSTX } - // TODO: #4587 create default for PoxConstants, then check if the mutation tests are caught: - // Default::default() + // TODO: add tests from mutation testing results #4838 #[cfg_attr(test, mutants::skip)] pub fn regtest_default() -> PoxConstants { PoxConstants::new( From 2c1a229201d3daa3f634bc16cf79314371071ee3 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 3 Jun 2024 12:54:24 -0500 Subject: [PATCH 0232/1400] add burn_view to StacksHeaderInfo for nakamoto blocks, use in append_block --- stackslib/src/chainstate/burn/db/sortdb.rs | 15 +- .../chainstate/nakamoto/coordinator/mod.rs | 9 +- stackslib/src/chainstate/nakamoto/mod.rs | 164 +++++++++++++----- stackslib/src/chainstate/nakamoto/tenure.rs | 66 ++----- stackslib/src/chainstate/stacks/db/mod.rs | 19 +- stackslib/src/chainstate/stacks/miner.rs | 1 + 6 files changed, 152 insertions(+), 122 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 1c6fe26606..db7c15dfea 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2677,17 +2677,10 @@ impl SortitionDB { // if its a nakamoto block, we want to use the burnchain view of the block let burn_view = match &header.anchored_header { StacksBlockHeaderTypes::Epoch2(_) => header.consensus_hash, - StacksBlockHeaderTypes::Nakamoto(_) => { - NakamotoChainState::get_tenure_for_block(chainstate.db(), &header) - .map_err(|e| { - warn!( - "Failed to get tenure for block header: {:?}", e; - "block_id" => %stacks_block_id, - ); - db_error::NotFoundError - })? - .burn_view_consensus_hash - } + StacksBlockHeaderTypes::Nakamoto(_) => header.burn_view.ok_or_else(|| { + error!("Loaded nakamoto block header without a burn view"; "block_id" => %stacks_block_id); + db_error::Other("Nakamoto block header without burn view".into()) + })?, }; let snapshot = SortitionDB::get_block_snapshot_consensus(&self.conn(), &burn_view)? diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80..2bca7609a3 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -573,13 +573,10 @@ impl< loop { // process at most one block per loop pass - let mut sortdb_handle = self - .sortition_db - .tx_handle_begin(&canonical_sortition_tip)?; - let mut processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( &mut self.chain_state_db, - &mut sortdb_handle, + &mut self.sortition_db, + &canonical_sortition_tip, self.dispatcher, ) { Ok(receipt_opt) => receipt_opt, @@ -606,8 +603,6 @@ impl< } }; - sortdb_handle.commit()?; - let Some(block_receipt) = processed_block_receipt.take() else { // out of blocks debug!("No more blocks to process (no receipts)"); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 4fec156e6f..4929a38c32 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -39,7 +39,7 @@ use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, MINER_REWARD_MATURITY, }; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -221,6 +221,13 @@ lazy_static! { NAKAMOTO_TENURES_SCHEMA_2.into(), r#" UPDATE db_config SET version = "5"; + "#.into(), + // make burn_view NULLable. We could use a default value, but NULL should be safer (because it will error). + // there should be no entries in nakamoto_block_headers with a NULL entry when this column is added, because + // nakamoto blocks have not been produced yet. + r#" + ALTER TABLE nakamoto_block_headers + ADD COLUMN burn_view TEXT; "#.into(), ]; } @@ -1273,16 +1280,21 @@ impl NakamotoChainState { /// If there exists a ready Nakamoto block, then this method returns Ok(Some(..)) with the /// receipt. Otherwise, it returns Ok(None). /// + /// Canonical sortition tip is a pointer to the current canonical sortition tip. + /// this is used to store block processed information in the sortition db. + /// /// It returns Err(..) on DB error, or if the child block does not connect to the parent. /// The caller should keep calling this until it gets Ok(None) pub fn process_next_nakamoto_block<'a, T: BlockEventDispatcher>( stacks_chain_state: &mut StacksChainState, - sort_tx: &mut SortitionHandleTx, + sort_db: &mut SortitionDB, + canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); - let Some((next_ready_block, block_size)) = - nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db(), sort_tx)? + let sortition_handle = sort_db.index_handle(canonical_sortition_tip); + let Some((next_ready_block, block_size)) = nakamoto_blocks_db + .next_ready_nakamoto_block(stacks_chain_state.db(), &sortition_handle)? else { // no more blocks test_debug!("No more Nakamoto blocks to process"); @@ -1293,7 +1305,7 @@ impl NakamotoChainState { // find corresponding snapshot let next_ready_block_snapshot = SortitionDB::get_block_snapshot_consensus( - sort_tx, + sort_db.conn(), &next_ready_block.header.consensus_hash, )? .unwrap_or_else(|| { @@ -1350,26 +1362,69 @@ impl NakamotoChainState { // (2) the same as parent block id let burnchain_view = if let Some(tenure_change) = next_ready_block.get_tenure_tx_payload() { + if let Some(ref parent_burn_view) = parent_header_info.burn_view { + // check that the tenure_change's burn view descends from the parent + let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + parent_burn_view, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "block_id" => %next_ready_block.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; + let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "block_id" => %next_ready_block.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })?; + if connected_sort_id != parent_burn_view_sn.sortition_id { + warn!( + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "block_id" => %next_ready_block.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Does not connect to burn view of parent block ID".into(), + )); + } + } tenure_change.burn_view_consensus_hash } else { - let Some(current_tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( - &chainstate_tx.tx, - &next_ready_block.header.consensus_hash, - )? - else { + parent_header_info.burn_view.clone().ok_or_else(|| { warn!( - "Cannot process Nakamoto block: failed to find active tenure"; + "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; "consensus_hash" => %next_ready_block.header.consensus_hash, "block_hash" => %next_ready_block.header.block_hash(), + "block_id" => %next_ready_block.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - return Ok(None); - }; - current_tenure.burn_view_consensus_hash + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })? }; - let Some(burnchain_view_sortid) = - SortitionDB::get_sortition_id_by_consensus(sort_tx.tx(), &burnchain_view)? + let Some(burnchain_view_sn) = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &burnchain_view)? else { + // This should be checked already during block acceptance and parent block processing + // - The check for expected burns returns `NoSuchBlockError` if the burnchain view + // could not be found for a block with a tenure tx. + // We error here anyways, but the check during block acceptance makes sure that the staging + // db doesn't get into a situation where it continuously tries to retry such a block (because + // such a block shouldn't land in the staging db). warn!( "Cannot process Nakamoto block: failed to find Sortition ID associated with burnchain view"; "consensus_hash" => %next_ready_block.header.consensus_hash, @@ -1388,24 +1443,22 @@ impl NakamotoChainState { let (commit_burn, sortition_burn) = if new_tenure { // find block-commit to get commit-burn - let block_commit = sort_tx - .get_block_commit( - &next_ready_block_snapshot.winning_block_txid, - &next_ready_block_snapshot.sortition_id, - )? - .expect("FATAL: no block-commit for tenure-start block"); + let block_commit = SortitionDB::get_block_commit( + sort_db.conn(), + &next_ready_block_snapshot.winning_block_txid, + &next_ready_block_snapshot.sortition_id, + )? + .expect("FATAL: no block-commit for tenure-start block"); - let sort_burn = SortitionDB::get_block_burn_amount( - sort_tx.deref().deref(), - &next_ready_block_snapshot, - )?; + let sort_burn = + SortitionDB::get_block_burn_amount(sort_db.conn(), &next_ready_block_snapshot)?; (block_commit.burn_fee, sort_burn) } else { (0, 0) }; // attach the block to the chain state and calculate the next chain tip. - let pox_constants = sort_tx.context.pox_constants.clone(); + let pox_constants = sort_db.pox_constants.clone(); // NOTE: because block status is updated in a separate transaction, we need `chainstate_tx` // and `clarity_instance` to go out of scope before we can issue the it (since we need a @@ -1419,12 +1472,12 @@ impl NakamotoChainState { // set the sortition tx's tip to the burnchain view -- we must unset this after appending the block, // so we wrap this call in a closure to make sure that the unsetting is infallible - let prior_sort_tip = - std::mem::replace(&mut sort_tx.context.chain_tip, burnchain_view_sortid); + let mut burn_view_handle = sort_db.index_handle(&burnchain_view_sn.sortition_id); let (ok_opt, err_opt) = (|clarity_instance| match NakamotoChainState::append_block( &mut chainstate_tx, clarity_instance, - sort_tx, + &mut burn_view_handle, + &burnchain_view, &pox_constants, &parent_header_info, &next_ready_block_snapshot.burn_header_hash, @@ -1442,8 +1495,6 @@ impl NakamotoChainState { Err(e) => (None, Some(e)), })(clarity_instance); - sort_tx.context.chain_tip = prior_sort_tip; - if let Some(e) = err_opt { // force rollback drop(ok_opt); @@ -1478,6 +1529,7 @@ impl NakamotoChainState { ); // set stacks block accepted + let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; sort_tx.set_stacks_block_accepted( &next_ready_block.header.consensus_hash, &next_ready_block.header.block_hash(), @@ -1529,6 +1581,14 @@ impl NakamotoChainState { ); } + sort_tx + .commit() + .unwrap_or_else(|e| { + error!("Failed to commit sortition db transaction after committing chainstate and clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); + Ok(Some(receipt)) } @@ -1545,7 +1605,7 @@ impl NakamotoChainState { /// however, will flag a block as invalid in this case, because the parent must be available in /// order to process a block. pub(crate) fn get_expected_burns( - sort_handle: &mut SH, + sort_handle: &SH, chainstate_conn: &Connection, block: &NakamotoBlock, ) -> Result, ChainstateError> { @@ -2310,6 +2370,15 @@ impl NakamotoChainState { if tenure_changed { &1i64 } else { &0i64 }, &vrf_proof_bytes.as_ref(), &header.signer_bitvec, + tip_info.burn_view.as_ref().ok_or_else(|| { + error!( + "Attempted to store nakamoto block header information without burnchain view"; + "block_id" => %index_block_hash, + ); + ChainstateError::DBError(DBError::Other( + "Nakamoto block StacksHeaderInfo did not set burnchain view".into(), + )) + })?, ]; chainstate_tx.execute( @@ -2330,9 +2399,10 @@ impl NakamotoChainState { parent_block_id, tenure_changed, vrf_proof, - signer_bitvec + signer_bitvec, + burn_view ) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25)", args )?; @@ -2362,6 +2432,7 @@ impl NakamotoChainState { burn_vote_for_aggregate_key_ops: Vec, new_tenure: bool, block_fees: u128, + burn_view: &ConsensusHash, ) -> Result { if new_tip.parent_block_id != StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) @@ -2409,6 +2480,7 @@ impl NakamotoChainState { burn_header_height: new_burnchain_height, burn_header_timestamp: new_burnchain_timestamp, anchored_block_size: block_size, + burn_view: Some(burn_view.clone()), }; let tenure_fees = block_fees @@ -2792,7 +2864,8 @@ impl NakamotoChainState { fn append_block<'a>( chainstate_tx: &mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &mut SortitionHandleConn, + burnchain_view: &ConsensusHash, pox_constants: &PoxConstants, parent_chain_tip: &StacksHeaderInfo, chain_tip_burn_header_hash: &BurnchainHeaderHash, @@ -2942,18 +3015,18 @@ impl NakamotoChainState { // (note that we can't check this earlier, since we need the parent tenure to have been // processed) if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { - let tenure_block_commit = burn_dbconn - .get_block_commit( - &tenure_block_snapshot.winning_block_txid, - &tenure_block_snapshot.sortition_id, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: has no block-commit in its sortition"; + let tenure_block_commit = SortitionDB::get_block_commit( + burn_dbconn.conn(), + &tenure_block_snapshot.winning_block_txid, + &tenure_block_snapshot.sortition_id, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: has no block-commit in its sortition"; "block_id" => %block.header.block_id(), "sortition_id" => %tenure_block_snapshot.sortition_id, "block_commit_txid" => %tenure_block_snapshot.winning_block_txid); - ChainstateError::NoSuchBlockError - })?; + ChainstateError::NoSuchBlockError + })?; let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header(chainstate_tx.tx(), &parent_ch)? @@ -3198,6 +3271,7 @@ impl NakamotoChainState { burn_vote_for_aggregate_key_ops, new_tenure, block_fees, + burnchain_view, ) .expect("FATAL: failed to advance chain tip"); diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index f68e0ee90a..5793994c80 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -89,7 +89,9 @@ use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + SortitionDB, SortitionHandle, SortitionHandleConn, SortitionHandleTx, +}; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; use crate::chainstate::nakamoto::{ @@ -595,53 +597,6 @@ impl NakamotoChainState { Ok(tenure_opt) } - /// Get the tenure change that was active for a given block header - /// If a tenure change occurred during this block, it will be returned - pub fn get_tenure_for_block( - headers_conn: &Connection, - block_header: &StacksHeaderInfo, - ) -> Result { - let sql = "SELECT * FROM nakamoto_tenures WHERE block_id = ? LIMIT 1"; - let tenure_opt: Option = - query_row(headers_conn, sql, &[block_header.index_block_hash()])?; - if let Some(tenure) = tenure_opt { - return Ok(tenure); - } - // there wasn't a tenure change at that block, so we need to figure out the active tenure - // use the "tenure height" to query for `num_blocks_confirmed` - let block_height = block_header.stacks_block_height; - let tenure_start_height = Self::get_nakamoto_tenure_start_block_header( - headers_conn, - &block_header.consensus_hash, - )? - .ok_or_else(|| ChainstateError::NoSuchBlockError)? - .stacks_block_height; - let blocks_confirmed = u64_to_sql(block_height.saturating_sub(tenure_start_height))?; - // querying by blocks confirmed doesn't work if cause is blockfound, - // so don't try and instead failback to directly querying it - let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ? - AND num_blocks_confirmed <= ? - AND cause <> ? - ORDER BY num_blocks_confirmed DESC LIMIT 1"; - if let Some(tenure) = query_row( - headers_conn, - sql, - params![ - &block_header.consensus_hash, - blocks_confirmed, - TenureChangeCause::BlockFound.as_u8() - ], - )? { - return Ok(tenure); - } - // failback to the BlockFound tenure change - Self::get_highest_nakamoto_tenure_change_by_tenure_id( - headers_conn, - &block_header.consensus_hash, - )? - .ok_or_else(|| ChainstateError::NoSuchBlockError) - } - /// Get the highest non-empty processed tenure on the canonical sortition history. pub fn get_highest_nakamoto_tenure( headers_conn: &Connection, @@ -893,9 +848,9 @@ impl NakamotoChainState { /// tenure-change tx, or just parent_coinbase_height if there was a tenure-extend tx or no tenure /// txs at all). /// TODO: unit test - pub(crate) fn advance_nakamoto_tenure( + pub(crate) fn advance_nakamoto_tenure( headers_tx: &mut StacksDBTx, - sort_tx: &mut SortitionHandleTx, + handle: &mut SH, block: &NakamotoBlock, parent_coinbase_height: u64, ) -> Result { @@ -918,7 +873,7 @@ impl NakamotoChainState { }; let Some(processed_tenure) = - Self::check_nakamoto_tenure(headers_tx, sort_tx, &block.header, tenure_payload)? + Self::check_nakamoto_tenure(headers_tx, handle, &block.header, tenure_payload)? else { return Err(ChainstateError::InvalidStacksTransaction( "Invalid tenure tx".into(), @@ -991,7 +946,7 @@ impl NakamotoChainState { /// TODO: unit test pub(crate) fn calculate_scheduled_tenure_reward( chainstate_tx: &mut ChainstateTx, - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &SortitionHandleConn, block: &NakamotoBlock, evaluated_epoch: StacksEpochId, parent_coinbase_height: u64, @@ -1004,7 +959,7 @@ impl NakamotoChainState { // figure out if there any accumulated rewards by // getting the snapshot that elected this block. let accumulated_rewards = SortitionDB::get_block_snapshot_consensus( - burn_dbconn.tx(), + burn_dbconn.conn(), &block.header.consensus_hash, )? .expect("CORRUPTION: failed to load snapshot that elected processed block") @@ -1076,7 +1031,7 @@ impl NakamotoChainState { /// particular burnchain fork. /// Return the block snapshot if so. pub(crate) fn check_sortition_exists( - burn_dbconn: &mut SortitionHandleTx, + burn_dbconn: &SortitionHandleConn, block_consensus_hash: &ConsensusHash, ) -> Result { // check that the burnchain block that this block is associated with has been processed. @@ -1092,9 +1047,8 @@ impl NakamotoChainState { ChainstateError::NoSuchBlockError })?; - let sortition_tip = burn_dbconn.context.chain_tip.clone(); let snapshot = burn_dbconn - .get_block_snapshot(&burn_header_hash, &sortition_tip)? + .get_block_snapshot(&burn_header_hash)? .ok_or_else(|| { warn!( "Tried to process Nakamoto block before its burn view was processed"; diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 865758ed01..d9b6d47775 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -196,6 +196,9 @@ pub struct StacksHeaderInfo { pub burn_header_timestamp: u64, /// Size of the block corresponding to `anchored_header` in bytes pub anchored_block_size: u64, + /// The burnchain tip that is passed to Clarity while processing this block. + /// This should always be `Some()` for Nakamoto blocks and `None` for 2.x blocks + pub burn_view: Option, } #[derive(Debug, Clone, PartialEq)] @@ -387,6 +390,7 @@ impl StacksHeaderInfo { consensus_hash: ConsensusHash::empty(), burn_header_timestamp: 0, anchored_block_size: 0, + burn_view: None, } } @@ -406,6 +410,7 @@ impl StacksHeaderInfo { consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), burn_header_timestamp: first_burnchain_block_timestamp, anchored_block_size: 0, + burn_view: None, } } @@ -452,15 +457,21 @@ impl FromRow for StacksHeaderInfo { .parse::() .map_err(|_| db_error::ParseError)?; + let header_type: HeaderTypeNames = row + .get("header_type") + .unwrap_or_else(|_e| HeaderTypeNames::Epoch2); let stacks_header: StacksBlockHeaderTypes = { - let header_type: HeaderTypeNames = row - .get("header_type") - .unwrap_or_else(|_e| HeaderTypeNames::Epoch2); match header_type { HeaderTypeNames::Epoch2 => StacksBlockHeader::from_row(row)?.into(), HeaderTypeNames::Nakamoto => NakamotoBlockHeader::from_row(row)?.into(), } }; + let burn_view = { + match header_type { + HeaderTypeNames::Epoch2 => None, + HeaderTypeNames::Nakamoto => Some(ConsensusHash::from_column(row, "burn_view")?), + } + }; if block_height != stacks_header.height() { return Err(db_error::ParseError); @@ -476,6 +487,7 @@ impl FromRow for StacksHeaderInfo { burn_header_height: burn_header_height as u32, burn_header_timestamp, anchored_block_size, + burn_view, }) } } @@ -2617,6 +2629,7 @@ impl StacksChainState { burn_header_height: new_burnchain_height, burn_header_timestamp: new_burnchain_timestamp, anchored_block_size: anchor_block_size, + burn_view: None, }; StacksChainState::insert_stacks_block_header( diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 22fdb782b2..0ec979c623 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1495,6 +1495,7 @@ impl StacksBlockBuilder { burn_header_timestamp: genesis_burn_header_timestamp, burn_header_height: genesis_burn_header_height, anchored_block_size: 0, + burn_view: None, }; let mut builder = StacksBlockBuilder::from_parent_pubkey_hash( From 55567d777bc6ea3edbfd7a89e79b770d32c75ff7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 3 Jun 2024 11:56:35 -0700 Subject: [PATCH 0233/1400] fix: return errors instead of panics in miner thread --- .../stacks-node/src/nakamoto_node/miner.rs | 27 +++++++++++++------ .../stacks-node/src/nakamoto_node/relayer.rs | 2 +- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7d4e54b1d5..b324ec561d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -132,15 +132,22 @@ impl BlockMinerThread { } /// Stop a miner tenure by blocking the miner and then joining the tenure thread - pub fn stop_miner(globals: &Globals, prior_miner: JoinHandle<()>) { + pub fn stop_miner( + globals: &Globals, + prior_miner: JoinHandle>, + ) -> Result<(), NakamotoNodeError> { globals.block_miner(); prior_miner .join() - .expect("FATAL: IO failure joining prior mining thread"); + .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))??; globals.unblock_miner(); + Ok(()) } - pub fn run_miner(mut self, prior_miner: Option>) { + pub fn run_miner( + mut self, + prior_miner: Option>>, + ) -> Result<(), NakamotoNodeError> { // when starting a new tenure, block the mining thread if its currently running. // the new mining thread will join it (so that the new mining thread stalls, not the relayer) debug!( @@ -150,10 +157,10 @@ impl BlockMinerThread { "thread_id" => ?thread::current().id(), ); if let Some(prior_miner) = prior_miner { - Self::stop_miner(&self.globals, prior_miner); + Self::stop_miner(&self.globals, prior_miner)?; } let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) - .expect("FATAL: failed to connect to stacker DB"); + .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; let mut attempts = 0; // now, actually run this tenure @@ -176,7 +183,9 @@ impl BlockMinerThread { } Err(e) => { warn!("Failed to mine block: {e:?}"); - return; + return Err(NakamotoNodeError::MiningFailure( + ChainstateError::MinerAborted, + )); } } }; @@ -193,7 +202,9 @@ impl BlockMinerThread { error!( "Unrecoverable error while gathering signatures: {e:?}. Ending tenure." ); - return; + return Err(NakamotoNodeError::MiningFailure( + ChainstateError::MinerAborted, + )); } }; @@ -234,7 +245,7 @@ impl BlockMinerThread { while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); if self.check_burn_tip_changed(&sort_db).is_err() { - return; + return Err(NakamotoNodeError::BurnchainTipChanged); } } } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index fc4ca1ae0d..d0ff26acdb 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -154,7 +154,7 @@ pub struct RelayerThread { relayer: Relayer, /// handle to the subordinate miner thread - miner_thread: Option>, + miner_thread: Option>>, /// The relayer thread reads directives from the relay_rcv, but it also periodically wakes up /// to check if it should issue a block commit or try to register a VRF key next_initiative: Instant, From 0e592954101807134ca4adaf447a8f5664ddb072 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 3 Jun 2024 12:34:04 -0700 Subject: [PATCH 0234/1400] fix: cargo check --tests errors after merge --- stackslib/src/net/tests/download/nakamoto.rs | 22 ++++++++++++++----- .../src/tests/nakamoto_integrations.rs | 4 ++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 6e1e4c1bcb..c084527336 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -915,10 +915,22 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // Does not consume blocks beyond the highest processed block ID { let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); + utd.confirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); + utd.unconfirmed_signer_keys = Some( + current_reward_sets + .get(&tip_rc) + .cloned() + .unwrap() + .known_selected_anchor_block_owned() + .unwrap(), + ); assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); @@ -945,7 +957,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { &sort_tip, peer.chainstate(), tenure_tip.clone(), - &agg_pubkeys, + ¤t_reward_sets, ) .unwrap(); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4ecddf6e78..7c57e8c14c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3938,7 +3938,7 @@ fn nakamoto_attempt_time() { return; } - let signers = TestSigners::default(); + let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); naka_conf.connection_options.block_proposal_token = Some(password.clone()); @@ -4019,7 +4019,7 @@ fn nakamoto_attempt_time() { &blocks_processed, &[stacker_sk], &[sender_signer_sk], - Some(&signers), + &mut Some(&mut signers), &mut btc_regtest_controller, ); From 737cf14b336c7fcbf846b7da6387728abde95ea2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 3 Jun 2024 16:00:47 -0500 Subject: [PATCH 0235/1400] cleanup --- stackslib/src/chainstate/nakamoto/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 4929a38c32..1f29cda7cf 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1470,10 +1470,8 @@ impl NakamotoChainState { // to access `stacks_chain_state` again. In the `Ok(..)` case, it's instead sufficient so // simply commit the block before beginning the second transaction to mark it processed. - // set the sortition tx's tip to the burnchain view -- we must unset this after appending the block, - // so we wrap this call in a closure to make sure that the unsetting is infallible let mut burn_view_handle = sort_db.index_handle(&burnchain_view_sn.sortition_id); - let (ok_opt, err_opt) = (|clarity_instance| match NakamotoChainState::append_block( + let (ok_opt, err_opt) = match NakamotoChainState::append_block( &mut chainstate_tx, clarity_instance, &mut burn_view_handle, @@ -1493,7 +1491,7 @@ impl NakamotoChainState { ) { Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), Err(e) => (None, Some(e)), - })(clarity_instance); + }; if let Some(e) = err_opt { // force rollback From e22a62dd2707a1b9b00cbf8912baf2e58f74c2ef Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 3 Jun 2024 17:43:35 -0400 Subject: [PATCH 0236/1400] fix: method docs --- stackslib/src/chainstate/burn/db/sortdb.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3ee746971f..d4304a0e2f 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3457,8 +3457,10 @@ impl SortitionDB { Ok(()) } - /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_cycle(). - /// See that method for details. + /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) + /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_ccyle() pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( &self, tip: &SortitionId, @@ -3473,8 +3475,10 @@ impl SortitionDB { ) } + /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare + /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned + /// sortition will be in the preceding reward cycle) /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). - /// See that method for details. pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( &self, tip: &SortitionId, @@ -3489,8 +3493,11 @@ impl SortitionDB { ) } + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_for_reward_cycle(). - /// See that method for details. pub fn get_preprocessed_reward_set_for_reward_cycle( &self, tip: &SortitionId, @@ -3505,8 +3512,11 @@ impl SortitionDB { ) } + /// Figure out the reward cycle for `tip` and lookup the preprocessed + /// reward set (if it exists) for the active reward cycle during `tip`. + /// Returns the reward cycle info on success. + /// Returns Error on DB errors, as well as if the reward set is not yet processed. /// Wrapper around SortitionDBConn::get_preprocessed_reward_set_of(). - /// See that method for details. pub fn get_preprocessed_reward_set_of( &self, tip: &SortitionId, From ded87549fb5d68b14fb301938697e8c479ecd68b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 08:29:57 -0500 Subject: [PATCH 0237/1400] address PR reviews --- stacks-signer/src/client/stacks_client.rs | 18 +++++++++--------- stackslib/src/net/api/get_tenures_fork_info.rs | 3 ++- stackslib/src/net/api/getsortition.rs | 5 ++--- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 8a7ade028c..f24679ff69 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,3 +1,4 @@ +use std::collections::VecDeque; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -370,26 +371,25 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { - let mut tenures = self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; + let mut tenures: VecDeque = + self.get_tenure_forking_info_step(chosen_parent, last_sortition)?; if tenures.is_empty() { - return Ok(tenures); + return Ok(vec![]); } - while tenures.last().map(|x| &x.consensus_hash) != Some(chosen_parent) { - let new_start = tenures.last().ok_or_else(|| { + while tenures.back().map(|x| &x.consensus_hash) != Some(chosen_parent) { + let new_start = tenures.back().ok_or_else(|| { ClientError::InvalidResponse( "Should have tenure data in forking info response".into(), ) })?; let mut next_results = self.get_tenure_forking_info_step(chosen_parent, &new_start.consensus_hash)?; - if next_results.is_empty() { + if next_results.pop_front().is_none() { return Err(ClientError::InvalidResponse( "Could not fetch forking info all the way back to the requested chosen_parent" .into(), )); } - // SAFETY check: next_results isn't empty, because of the above check. otherwise, remove(0) could panic. - next_results.remove(0); if next_results.is_empty() { return Err(ClientError::InvalidResponse( "Could not fetch forking info all the way back to the requested chosen_parent" @@ -399,14 +399,14 @@ impl StacksClient { tenures.extend(next_results.into_iter()); } - Ok(tenures) + Ok(tenures.into_iter().collect()) } fn get_tenure_forking_info_step( &self, chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, - ) -> Result, ClientError> { + ) -> Result, ClientError> { let send_request = || { self.stacks_node_client .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 4abc8ab6e1..778e4cbf68 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -233,7 +233,8 @@ impl RPCRequestHandler for GetTenuresForkInfo { if height_bound >= cursor.block_height { return Err(ChainError::NotInSameFork); } - cursor = handle.get_last_snapshot_with_sortition(cursor.block_height - 1)?; + cursor = handle + .get_last_snapshot_with_sortition(cursor.block_height.saturating_sub(1))?; results.push(TenureForkingInfo::from_snapshot( &cursor, sortdb, chainstate, )?); diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 1e2551eb83..73789c3f6a 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -53,8 +53,8 @@ pub enum QuerySpecifier { Latest, } -pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortition"; -static PATH_REGEX: &str = "^/v3/sortition(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; +pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortitions"; +static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; /// Struct for sortition information returned via the GetSortition API call #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -167,7 +167,6 @@ impl HttpRequest for GetSortitionHandler { let req_contents = HttpRequestContents::new().query_string(query); self.query = QuerySpecifier::Latest; - eprintln!("{captures:?}"); if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; } From 3b15824577a1fad29005c928c0e31262ab0dc091 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 08:37:36 -0500 Subject: [PATCH 0238/1400] chore: cargo fmt --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f596340c55..98cdca63d7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -50,8 +50,7 @@ use stacks::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, Transa use stacks::chainstate::stacks::{ SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, - MAX_BLOCK_LEN + TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, MAX_BLOCK_LEN, }; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ @@ -84,8 +83,8 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; -use stacks_signer::signerdb::{BlockInfo, SignerDb}; use stacks_signer::chainstate::SortitionsView; +use stacks_signer::signerdb::{BlockInfo, SignerDb}; use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; From 73eacaed8532bb846d15fd1db0ac864bd65f5b0b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 4 Jun 2024 12:14:20 -0400 Subject: [PATCH 0239/1400] WIP: no idea what I am doing Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 115 ++++++++---------- .../stacks-node/src/nakamoto_node/relayer.rs | 76 ++++-------- .../src/tests/nakamoto_integrations.rs | 54 ++++++-- 4 files changed, 122 insertions(+), 125 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 2cdf93eef5..302ac5a6e6 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -732,7 +732,7 @@ impl NakamotoBlock { // discontinuous warn!( "Invalid block -- discontiguous"; - "previosu_tenure_end" => %tc_payload.previous_tenure_end, + "previous_tenure_end" => %tc_payload.previous_tenure_end, "parent_block_id" => %self.header.parent_block_id ); return Err(()); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d20abe0ea5..9f424a7379 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -38,7 +38,6 @@ use stacks::net::stackerdb::StackerDBs; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; -use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; @@ -68,18 +67,21 @@ pub enum MinerDirective { burnchain_tip: BlockSnapshot, }, /// The miner should try to continue their tenure if they are the active miner - ContinueTenure { new_burn_view: ConsensusHash }, + ContinueTenure { + parent_tenure_start: StacksBlockId, + new_burn_view: ConsensusHash, + }, /// The miner did not win sortition StopTenure, } #[derive(PartialEq, Debug, Clone)] /// Tenure info needed to construct a tenure change or tenure extend transaction -pub struct ParentTenureInfo { +struct ParentTenureInfo { /// The number of blocks in the parent tenure - pub parent_tenure_blocks: u64, + parent_tenure_blocks: u64, /// The consensus hash of the parent tenure - pub parent_tenure_consensus_hash: ConsensusHash, + parent_tenure_consensus_hash: ConsensusHash, } /// Metadata required for beginning a new tenure @@ -98,8 +100,9 @@ pub enum MinerReason { BlockFound, /// The miner thread was spawned to extend an existing tenure Extended { - /// The parent tenure info to extend - parent_tenure_info: ParentTenureInfo, + /// Current consensus hash on the underlying burnchain. Corresponds to the last-seen + /// sortition. + burn_view_consensus_hash: ConsensusHash, /// Wether the tenure change transaction was mined tenure_change_mined: bool, }, @@ -109,9 +112,9 @@ impl std::fmt::Display for MinerReason { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { MinerReason::BlockFound => write!(f, "BlockFound"), - MinerReason::Extended { parent_tenure_info, tenure_change_mined } => write!( + MinerReason::Extended { burn_view_consensus_hash, tenure_change_mined } => write!( f, - "Extended: tenure_info = {parent_tenure_info:?}, tenure_change_mined = {tenure_change_mined:?}", + "Extended: burn_view_consensus_hash = {burn_view_consensus_hash:?}, tenure_change_mined = {tenure_change_mined}", ), } } @@ -248,7 +251,10 @@ impl BlockMinerThread { } = &mut self.reason { // We should not issue multiple tenure change transactions for the same tenure - *tenure_change_mined = true; + if !*tenure_change_mined { + debug!("Miner: Tenure change mined"); + *tenure_change_mined = true; + } } } @@ -545,24 +551,11 @@ impl BlockMinerThread { fn generate_tenure_change_tx( &self, nonce: u64, - parent_block_id: StacksBlockId, - parent_tenure_consensus_hash: ConsensusHash, - parent_tenure_blocks: u64, - miner_pkh: Hash160, - cause: TenureChangeCause, + payload: TenureChangePayload, ) -> Result { let is_mainnet = self.config.is_mainnet(); let chain_id = self.config.burnchain.chain_id; - let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: self.burn_block.consensus_hash.clone(), - prev_tenure_consensus_hash: parent_tenure_consensus_hash, - burn_view_consensus_hash: self.burn_block.consensus_hash.clone(), - previous_tenure_end: parent_block_id, - previous_tenure_blocks: u32::try_from(parent_tenure_blocks) - .expect("FATAL: more than u32 blocks in a tenure"), - cause, - pubkey_hash: miner_pkh, - }); + let tenure_change_tx_payload = TransactionPayload::TenureChange(payload); let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); tx_auth.set_origin_nonce(nonce); @@ -847,53 +840,45 @@ impl BlockMinerThread { vrf_proof: VRFProof, target_epoch_id: StacksEpochId, ) -> Result { + debug!("MAKING TENURE START INFO"); let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let current_miner_nonce = parent_block_info.coinbase_nonce; - let (coinbase_tx, tenure_change_tx) = match &self.reason { - MinerReason::BlockFound => { - // create our coinbase if this is the first block we've mined this tenure - if let Some(ref par_tenure_info) = parent_block_info.parent_tenure { - let tenure_change_tx = self.generate_tenure_change_tx( - current_miner_nonce, - parent_block_id, - par_tenure_info.parent_tenure_consensus_hash, - par_tenure_info.parent_tenure_blocks, - self.keychain.get_nakamoto_pkh(), - TenureChangeCause::BlockFound, - )?; - let coinbase_tx = self.generate_coinbase_tx( - current_miner_nonce + 1, - target_epoch_id, - vrf_proof, - ); - (Some(coinbase_tx), Some(tenure_change_tx)) - } else { - (None, None) - } - } - MinerReason::Extended { - parent_tenure_info, + let (tenure_change_tx, coinbase_tx) = if let Some(ref parent_tenure_info) = + parent_block_info.parent_tenure + { + debug!("Miner: Constructing tenure change and coinbase transactions"); + let num_blocks_so_far = u32::try_from(parent_tenure_info.parent_tenure_blocks) + .expect("FATAL: more than u32 blocks in a tenure"); + let mut payload = TenureChangePayload { + tenure_consensus_hash: self.burn_block.consensus_hash.clone(), + prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, + burn_view_consensus_hash: self.burn_block.consensus_hash.clone(), + previous_tenure_end: parent_block_id, + previous_tenure_blocks: num_blocks_so_far, + cause: TenureChangeCause::BlockFound, + pubkey_hash: self.keychain.get_nakamoto_pkh(), + }; + if let MinerReason::Extended { + burn_view_consensus_hash, tenure_change_mined, - } => { - if !tenure_change_mined { - let tenure_change_tx = self.generate_tenure_change_tx( - current_miner_nonce, + } = &self.reason + { + debug!("Tenure change mined {tenure_change_mined}"); + if !*tenure_change_mined { + debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); + payload = payload.extend( + *burn_view_consensus_hash, parent_block_id, - parent_tenure_info.parent_tenure_consensus_hash, - parent_tenure_info.parent_tenure_blocks, - self.keychain.get_nakamoto_pkh(), - TenureChangeCause::Extended, - )?; - let coinbase_tx = self.generate_coinbase_tx( - current_miner_nonce + 1, - target_epoch_id, - vrf_proof, + num_blocks_so_far, ); - (Some(coinbase_tx), Some(tenure_change_tx)) - } else { - (None, None) } } + let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; + let coinbase_tx = + self.generate_coinbase_tx(current_miner_nonce + 1, target_epoch_id, vrf_proof); + (Some(tenure_change_tx), Some(coinbase_tx)) + } else { + (None, None) }; Ok(NakamotoTenureInfo { diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 2bb83c8db2..c2a86e0942 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -57,7 +57,7 @@ use super::{ BLOCK_PROCESSOR_STACK_SIZE, }; use crate::burnchains::BurnchainController; -use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective, ParentTenureInfo}; +use crate::nakamoto_node::miner::{BlockMinerThread, MinerDirective}; use crate::neon_node::{ fault_injection_skip_mining, open_chainstate_with_faults, LeaderKeyRegistrationState, }; @@ -302,7 +302,7 @@ impl RelayerThread { /// Given the pointer to a recently processed sortition, see if we won the sortition. /// - /// Returns `true` if we won this last sortition. + /// Returns a directive to the relayer thread to either start, stop, or continue a tenure. pub fn process_sortition( &mut self, consensus_hash: ConsensusHash, @@ -341,6 +341,7 @@ impl RelayerThread { } } else { MinerDirective::ContinueTenure { + parent_tenure_start: committed_index_hash, new_burn_view: consensus_hash, } } @@ -546,6 +547,9 @@ impl RelayerThread { parent_tenure_id: StacksBlockId, reason: MinerReason, ) -> Result { + debug!("Relayer: creating block miner thread"; + "reason" => %reason + ); if fault_injection_skip_mining(&self.config.node.rpc_bind, last_burn_block.block_height) { debug!( "Relayer: fault injection skip mining at block height {}", @@ -560,7 +564,7 @@ impl RelayerThread { let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); - if burn_chain_tip != burn_header_hash { + if burn_chain_tip != burn_header_hash && matches!(reason, MinerReason::BlockFound) { debug!( "Relayer: Drop stale RunTenure for {}: current sortition is for {}", &burn_header_hash, &burn_chain_tip @@ -647,7 +651,11 @@ impl RelayerThread { Ok(()) } - fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { + fn continue_tenure( + &mut self, + parent_tenure_start: StacksBlockId, + new_burn_view: ConsensusHash, + ) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { error!("Relayer: Failed to stop tenure: {:?}", e); return Ok(()); @@ -669,45 +677,14 @@ impl RelayerThread { "block_snapshot_winning_block_txid" => %block_snapshot.winning_block_txid ); return Ok(()); - }; - - let block_header = NakamotoChainState::get_block_header_by_consensus_hash( - self.chainstate.db(), - &block_snapshot.consensus_hash, - ) - .map_err(|e| { - error!("Relayer: failed to get block header for the last sortition snapshsot: {e:?}"); - NakamotoNodeError::MissingTenureStartBlockHeader - })? - .ok_or_else(|| { - error!("Relayer: failed to find block header for the last sortition snapshsot"); - NakamotoNodeError::MissingTenureStartBlockHeader - })?; - - let last_parent_tenure_header = - NakamotoChainState::get_nakamoto_tenure_finish_block_header( - self.chainstate.db(), - &block_header.consensus_hash, - ) - .map_err(|e| { - error!("Relayer: failed to get last block of parent tenure: {e:?}"); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("Relayer: failed to find block header for parent tenure"); - NakamotoNodeError::ParentNotFound - })?; - - let parent_tenure_info = ParentTenureInfo { - parent_tenure_blocks: 1 + last_parent_tenure_header.stacks_block_height - - block_header.stacks_block_height, - parent_tenure_consensus_hash: new_burn_view, + } else { + debug!("Relayer: the miner won the last sortition. Continuing tenure."); }; match self.start_new_tenure( - block_header.index_block_hash(), + parent_tenure_start, block_snapshot, MinerReason::Extended { - parent_tenure_info, + burn_view_consensus_hash: new_burn_view, tenure_change_mined: false, }, ) { @@ -746,17 +723,18 @@ impl RelayerThread { error!("Relayer: Failed to start new tenure: {:?}", e); } }, - MinerDirective::ContinueTenure { new_burn_view } => { - match self.continue_tenure(new_burn_view) { - Ok(()) => { - debug!("Relayer: successfully handled continue tenure."); - } - Err(e) => { - error!("Relayer: Failed to continue tenure: {:?}", e); - return false; - } + MinerDirective::ContinueTenure { + new_burn_view, + parent_tenure_start, + } => match self.continue_tenure(parent_tenure_start, new_burn_view) { + Ok(()) => { + debug!("Relayer: successfully handled continue tenure."); } - } + Err(e) => { + error!("Relayer: Failed to continue tenure: {:?}", e); + return false; + } + }, MinerDirective::StopTenure => match self.stop_tenure() { Ok(()) => { debug!("Relayer: successfully stopped tenure."); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d1d9f5ebcb..cf615d6391 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4021,7 +4021,7 @@ fn continue_tenure_extend() { }) .unwrap(); - // Mine a regular nakamoto tenures + // Mine a regular nakamoto tenure next_block_and_mine_commit( &mut btc_regtest_controller, 60, @@ -4037,9 +4037,11 @@ fn continue_tenure_extend() { &signers, ); + info!("Pausing commit op for the next block"); TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); - next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); signer_vote_if_needed( &btc_regtest_controller, @@ -4048,7 +4050,6 @@ fn continue_tenure_extend() { &signers, ); - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); // Submit a TX let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); @@ -4072,14 +4073,47 @@ fn continue_tenure_extend() { &StacksEpochId::Epoch30, ) .unwrap(); - // Mine 15 more nakamoto tenures + + debug!("MINING A STACKS BLOCK"); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + debug!("MINING THE NEXT BLOCK"); + next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + + debug!("Unpausing commit op"); + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + + debug!("MINING THE NEXT TENURES"); + // Mine 15 more regular nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before && blocks_processed > blocks_processed_before) + }) .unwrap(); signer_vote_if_needed( From 7b1ad1e1a11225b72b6f1b88289b12907e719a42 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 11:17:25 -0500 Subject: [PATCH 0240/1400] fix issues from merge, fix an upstream bug in miner thread hand-offs --- stacks-signer/src/client/stacks_client.rs | 3 +-- stackslib/src/net/api/getsortition.rs | 2 +- stackslib/src/net/stackerdb/config.rs | 2 +- testnet/stacks-node/src/nakamoto_node/miner.rs | 12 ++++++++++-- .../stacks-node/src/tests/nakamoto_integrations.rs | 14 +++++++++++++- 5 files changed, 26 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index f24679ff69..17a5916f6f 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -445,8 +445,7 @@ impl StacksClient { pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { let send_request = || { self.stacks_node_client - .get(self.sortition_info_path()) - .query(&[("consensus", ch.to_hex().as_str())]) + .get(format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex())) .send() .map_err(|e| { warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 73789c3f6a..5df67e3636 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -243,7 +243,7 @@ impl RPCRequestHandler for GetSortitionHandler { stacks_parent_sn.consensus_hash.clone() } else { // we actually need to perform the marf lookup - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + let last_sortition = handle.get_last_snapshot_with_sortition(stacks_parent_sn.block_height)?; last_sortition.consensus_hash }; diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 5545aa46cd..8e88086f0a 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -385,7 +385,7 @@ impl StackerDBConfig { } if max_neighbors > u128::from(local_max_neighbors) { - warn!( + debug!( "Contract {} stipulates a maximum number of neighbors ({}) beyond locally-configured maximum {}; defaulting to locally-configured maximum", contract_id, max_neighbors, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index b8adc92643..5be483a9af 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -137,9 +137,16 @@ impl BlockMinerThread { prior_miner: JoinHandle>, ) -> Result<(), NakamotoNodeError> { globals.block_miner(); - prior_miner + let prior_miner_result = prior_miner .join() - .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))??; + .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))?; + if let Err(e) = prior_miner_result { + // it's okay if the prior miner thread exited with an error. + // in many cases this is expected (i.e., a burnchain block occurred) + // if some error condition should be handled though, this is the place + // to do that handling. + debug!("Prior mining thread exited with: {e:?}"); + } globals.unblock_miner(); Ok(()) } @@ -155,6 +162,7 @@ impl BlockMinerThread { "had_prior_miner" => prior_miner.is_some(), "parent_tenure_id" => %self.parent_tenure_id, "thread_id" => ?thread::current().id(), + "burn_block_consensus_hash" => %self.burn_block.consensus_hash, ); if let Some(prior_miner) = prior_miner { Self::stop_miner(&self.globals, prior_miner)?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 98cdca63d7..8080721933 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4402,7 +4402,19 @@ fn signer_chainstate() { } } - let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + // make sure we're getting a proposal from the current sortition (not 100% guaranteed by + // `next_block_and_mine_commit`) by looping + let time_start = Instant::now(); + let proposal = loop { + let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); + if proposal.0.header.consensus_hash == sortitions_view.latest_consensus_hash { + break proposal; + } + if time_start.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for block proposal from the current bitcoin block"); + } + thread::sleep(Duration::from_secs(1)); + }; let valid = sortitions_view .check_proposal(&signer_client, &signer_db, &proposal.0, &proposal.1) From 7d0557805fe3b6733675eaed706aa474990de874 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 11:52:18 -0500 Subject: [PATCH 0241/1400] fix test build failures --- .../src/chainstate/nakamoto/coordinator/tests.rs | 15 ++++++++------- stackslib/src/chainstate/nakamoto/tests/mod.rs | 4 ++++ stackslib/src/core/tests/mod.rs | 1 + stackslib/src/cost_estimates/tests/common.rs | 1 + 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0f3abe5c29..648584991d 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -635,18 +635,19 @@ fn test_nakamoto_chainstate_getters() { // scope this to drop the chainstate ref and db tx let chainstate = &peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); + let sort_handle = sort_db.index_handle(&sort_tip.sortition_id); // no tenures yet - assert!( - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), sort_tx.sqlite()) - .unwrap() - .is_none() - ); + assert!(NakamotoChainState::get_highest_nakamoto_tenure( + chainstate.db(), + sort_handle.sqlite() + ) + .unwrap() + .is_none()); // sortition-existence-check works assert_eq!( - NakamotoChainState::check_sortition_exists(&mut sort_tx, &sort_tip.consensus_hash) + NakamotoChainState::check_sortition_exists(&sort_handle, &sort_tip.consensus_hash) .unwrap(), sort_tip ); diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 4a1b0ad714..959377b7c3 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -678,6 +678,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 100, burn_header_timestamp: 1000, anchored_block_size: 12345, + burn_view: None, }; let epoch2_execution_cost = ExecutionCost { @@ -778,6 +779,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header.consensus_hash), }; let epoch2_block = StacksBlock { @@ -822,6 +824,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header_2.consensus_hash), }; let nakamoto_block_2 = NakamotoBlock { @@ -861,6 +864,7 @@ pub fn test_load_store_update_nakamoto_blocks() { burn_header_height: 200, burn_header_timestamp: 1001, anchored_block_size: 123, + burn_view: Some(nakamoto_header_3.consensus_hash), }; let nakamoto_block_3 = NakamotoBlock { diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 905f788dc2..6a3b700186 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -128,6 +128,7 @@ pub fn make_block( burn_header_height: burn_height as u32, burn_header_timestamp: 0, anchored_block_size: 1, + burn_view: None, }; c_tx.commit_block(); diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 6fd21b0676..fe6527ff53 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -39,6 +39,7 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE burn_header_height: 2, burn_header_timestamp: 2, anchored_block_size: 1, + burn_view: None, }, tx_receipts, matured_rewards: vec![], From 7c8df315cb1d73560f07429f7a93934da7aa37a6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 13:09:05 -0500 Subject: [PATCH 0242/1400] fix: miner should allow prior miner thread to error --- testnet/stacks-node/src/nakamoto_node/miner.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index b324ec561d..0d04d12537 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -137,9 +137,16 @@ impl BlockMinerThread { prior_miner: JoinHandle>, ) -> Result<(), NakamotoNodeError> { globals.block_miner(); - prior_miner + let prior_miner_result = prior_miner .join() - .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))??; + .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))?; + if let Err(e) = prior_miner_result { + // it's okay if the prior miner thread exited with an error. + // in many cases this is expected (i.e., a burnchain block occurred) + // if some error condition should be handled though, this is the place + // to do that handling. + debug!("Prior mining thread exited with: {e:?}"); + } globals.unblock_miner(); Ok(()) } From 2f0d7cd08297a535bee8826adfeeda7407e32f6b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 4 Jun 2024 14:17:42 -0400 Subject: [PATCH 0243/1400] WIP: burnchain tip has changed Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 20 ++++++++++--------- .../src/tests/nakamoto_integrations.rs | 3 +-- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 9f424a7379..09bab45751 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -745,19 +745,20 @@ impl BlockMinerThread { .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)? .expect("FATAL: no epoch defined") .epoch_id; + debug!("HERE WE GO"); let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; let vrf_proof = self .make_vrf_proof() .ok_or_else(|| NakamotoNodeError::BadVrfConstruction)?; - if self.mined_blocks.is_empty() { - if parent_block_info.parent_tenure.is_none() { - warn!( - "Miner should be starting a new tenure, but failed to load parent tenure info" - ); - return Err(NakamotoNodeError::ParentNotFound); - } - } + if self.mined_blocks.is_empty() && parent_block_info.parent_tenure.is_none() { + warn!( + "Miner should be starting a new tenure, but failed to load parent tenure info" + ); + return Err(NakamotoNodeError::ParentNotFound); + }; + + debug!("Parent block info parent tenure: {:?} and {:?} mined blocks", parent_block_info.parent_tenure, self.mined_blocks.len()); // create our coinbase if this is the first block we've mined this tenure let tenure_start_info = @@ -846,7 +847,7 @@ impl BlockMinerThread { let (tenure_change_tx, coinbase_tx) = if let Some(ref parent_tenure_info) = parent_block_info.parent_tenure { - debug!("Miner: Constructing tenure change and coinbase transactions"); + debug!("Miner: Constructing tenure change and coinbase transactions: {}", self.reason); let num_blocks_so_far = u32::try_from(parent_tenure_info.parent_tenure_blocks) .expect("FATAL: more than u32 blocks in a tenure"); let mut payload = TenureChangePayload { @@ -878,6 +879,7 @@ impl BlockMinerThread { self.generate_coinbase_tx(current_miner_nonce + 1, target_epoch_id, vrf_proof); (Some(tenure_change_tx), Some(coinbase_tx)) } else { + debug!("Miner: NOT Constructing tenure change and coinbase transactions"); (None, None) }; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index cf615d6391..6ec75bc0af 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4040,8 +4040,7 @@ fn continue_tenure_extend() { info!("Pausing commit op for the next block"); TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); - next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) - .unwrap(); + next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); signer_vote_if_needed( &btc_regtest_controller, From 386d24380167c45b9338384a09d13b30142423de Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 4 Jun 2024 21:55:47 +0300 Subject: [PATCH 0244/1400] migrate the rest of the mutants todos to issues --- clarity/src/vm/ast/parser/v2/mod.rs | 10 +------- stackslib/src/chainstate/burn/db/sortdb.rs | 7 +----- .../chainstate/burn/operations/stack_stx.rs | 11 ++------- stackslib/src/chainstate/coordinator/mod.rs | 3 +-- stackslib/src/chainstate/stacks/boot/mod.rs | 15 ++++-------- stackslib/src/chainstate/stacks/db/blocks.rs | 15 +++--------- stackslib/src/chainstate/stacks/miner.rs | 16 ++++--------- .../burnchains/bitcoin_regtest_controller.rs | 24 +++++-------------- testnet/stacks-node/src/config.rs | 7 ++---- .../stacks-node/src/nakamoto_node/miner.rs | 12 +++------- testnet/stacks-node/src/neon_node.rs | 13 ++++------ testnet/stacks-node/src/tenure.rs | 4 ---- 12 files changed, 32 insertions(+), 105 deletions(-) diff --git a/clarity/src/vm/ast/parser/v2/mod.rs b/clarity/src/vm/ast/parser/v2/mod.rs index f97aeb6b72..4c46e76a4d 100644 --- a/clarity/src/vm/ast/parser/v2/mod.rs +++ b/clarity/src/vm/ast/parser/v2/mod.rs @@ -277,15 +277,7 @@ impl<'a> Parser<'a> { } } - // TODO: #4587 create default for `SymbolicExpression`, then check if mutation tests are caught for these cases: - // ParseResult::from_iter([Some(Default::default())]) - // ParseResult::new(None) - // ParseResult::from_iter([None]) - // ParseResult::new(Some(Default::default())) - // ParseResult::from(None) - // ParseResult::from(Some(Default::default())) - // ParseResult::new() - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4848 #[cfg_attr(test, mutants::skip)] fn handle_open_tuple( &mut self, diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 64ed6b9772..b494e58a7f 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -307,10 +307,6 @@ impl FromRow for LeaderBlockCommitOp { } impl FromRow for StackStxOp { - // TODO: #4587 create default for `StackStxOp`, then check if mutation tests are caught for these case: - // Ok(Default::default()) - // Or keep the skip and remove the comment - #[cfg_attr(test, mutants::skip)] fn from_row<'a>(row: &'a Row) -> Result { let txid = Txid::from_column(row, "txid")?; let vtxindex: u32 = row.get_unwrap("vtxindex"); @@ -5132,8 +5128,7 @@ impl SortitionDB { query_row(conn, sql, args) } - // TODO: #4587 add test for the `None` case returning Ok(false) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4849 #[cfg_attr(test, mutants::skip)] /// Are microblocks disabled by Epoch 2.5 at the height specified /// in `at_burn_height`? diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index 3e213e8811..c4c54b9737 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -181,9 +181,7 @@ impl StackStxOp { } } - // TODO: #4587 create default for `ParsedData`, then check if mutation tests are caught for these case: - // Some(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4850 #[cfg_attr(test, mutants::skip)] fn parse_data(data: &Vec) -> Option { /* @@ -275,9 +273,7 @@ impl StackStxOp { ) } - // TODO: #4587 create default for `StackStxOp` and `op_error`, then check if mutation tests are caught for these case: - // Ok(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4851 #[cfg_attr(test, mutants::skip)] /// parse a StackStxOp /// `pox_sunset_ht` is the height at which PoX *disables* @@ -403,9 +399,6 @@ impl StacksMessageCodec for StackStxOp { } impl StackStxOp { - // TODO: #4587 add tests for `Ok(())` returning case. - // Or keep the skip and remove the comment - #[cfg_attr(test, mutants::skip)] pub fn check(&self) -> Result<(), op_error> { if self.stacked_ustx == 0 { warn!("Invalid StackStxOp, must have positive ustx"); diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index c34eaaafab..ac47d9d92b 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -2414,8 +2414,7 @@ impl< return false; } - // TODO: #4587,create default for BlockHeaderHash, then check if mutation tests are caught for these cases: - // Ok(Some(Default::default)) + // TODO: add tests from mutation testing results #4852 #[cfg_attr(test, mutants::skip)] /// Handle a new burnchain block, optionally rolling back the canonical PoX sortition history /// and setting it up to be replayed in the event the network affirms a different history. If diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 96ab7a6639..c79e28cb8b 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -423,8 +423,7 @@ impl StacksChainState { result } - // TODO: #4587 create default for StacksTransactionEvent , then check if mutation tests are caught for these cases: - // Ok(vec![Default::default()]) + // TODO: add tests from mutation testing results #4854 #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. @@ -438,8 +437,7 @@ impl StacksChainState { Self::handle_pox_cycle_missed_unlocks(clarity, cycle_number, cycle_info, &PoxVersions::Pox2) } - // TODO: #4587 create default for StacksTransactionEvent , then check if mutation tests are caught for these cases: - // Ok(vec![Default::default()]) + // TODO: add tests from mutation testing results #4854 #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. @@ -453,8 +451,7 @@ impl StacksChainState { Self::handle_pox_cycle_missed_unlocks(clarity, cycle_number, cycle_info, &PoxVersions::Pox3) } - // TODO: #4587 create default for StacksTransactionEvent , then check if mutation tests are caught for these cases: - // Ok(vec![Default::default()]) + // TODO: add tests from mutation testing results #4854 #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. @@ -469,8 +466,7 @@ impl StacksChainState { Ok(vec![]) } - // TODO: #4587 create default for StacksTransactionEvent , then check if mutation tests are caught for these cases: - // Ok(vec![Default::default()]) + // TODO: add tests from mutation testing results #4854 #[cfg_attr(test, mutants::skip)] /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. @@ -764,8 +760,7 @@ impl StacksChainState { Some(signer_set) } - // TODO: #4587 create default for RewardSet , then check if mutation tests are caught for these cases: - // Default::default() + // TODO: add tests from mutation testing results #4855 #[cfg_attr(test, mutants::skip)] /// Given a threshold and set of registered addresses, return a reward set where /// every entry address has stacked more than the threshold, and addresses diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index e18421632d..de093d2234 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4132,8 +4132,7 @@ impl StacksChainState { Ok((applied, receipts)) } - // TODO: #4587 create default for `StacksTransactionReceipt`, then check if mutation tests are caught for these case: - // vec![Default::default()] + // TODO: add tests from mutation testing results #4856 // Or keep the skip and remove the comment #[cfg_attr(test, mutants::skip)] /// Process any Stacking-related bitcoin operations @@ -4241,10 +4240,7 @@ impl StacksChainState { all_receipts } - // TODO: #4587 add test for `Ok(vec![])` returning case. - // TODO: #4587 create default for `Value`, then check if mutation tests are caught for these case: - // Ok(vec![Default::default()]) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4857 #[cfg_attr(test, mutants::skip)] pub fn collect_pox_4_stacking_args(op: &StackStxOp) -> Result, String> { let signer_key = match op.signer_key { @@ -6030,12 +6026,7 @@ impl StacksChainState { Ok(next_microblocks) } - // TODO: #4587 add default for `StacksEpochReceipt` and `TransactionPayload`, then check if mutation tests are caught for these cases: - // Ok((None, Some(Default::default()))) - // Ok((Some(Default::default()), None)) - // Ok((Some(Default::default()), Some(Default::default()))) - // This is caught: Ok(None, None) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4858 #[cfg_attr(test, mutants::skip)] /// Find and process the next staging block. /// Return the next chain tip if we processed this block, or None if we couldn't. diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 6fb2bc4be3..3db5ca89c0 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1801,9 +1801,7 @@ impl StacksBlockBuilder { } } - // TODO: #4587 create default for MinerEpochInfo, then check if mutation tests are caught for these case: - // Ok(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4859 #[cfg_attr(test, mutants::skip)] /// This function should be called before `epoch_begin`. /// It loads the parent microblock stream, sets the parent microblock, and returns @@ -2059,9 +2057,7 @@ impl StacksBlockBuilder { Ok((block, size, cost, mblock_opt)) } - // TODO: #4587 create default for `StacksBlockBuilder`, then check if mutation tests are caught for these case: - // Ok(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4860 #[cfg_attr(test, mutants::skip)] /// Create a block builder for mining pub fn make_block_builder( @@ -2117,9 +2113,7 @@ impl StacksBlockBuilder { Ok(builder) } - // TODO: #4587 create default for `StacksBlockBuilder`, then check if mutation tests are caught for these case: - // Ok(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4860 #[cfg_attr(test, mutants::skip)] /// Create a block builder for regtest mining pub fn make_regtest_block_builder( @@ -2401,9 +2395,7 @@ impl StacksBlockBuilder { Ok((blocked, tx_events)) } - // TODO: #4587 create default for `StacksBlock` and `ExecutionCost`, then check if mutation tests are caught for these cases: - // Ok((Default::default(), Default::default(), 1)) - // Ok((Default::default(), Default::default(), 0)) + // TODO: add tests from mutation testing results #4861 // Or keep the skip and remove the comment #[cfg_attr(test, mutants::skip)] /// Given access to the mempool, mine an anchored block with no more than the given execution cost. diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index bb6a319651..30f088a96f 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -125,9 +125,7 @@ pub fn addr2str(btc_addr: &BitcoinAddress) -> String { format!("{}", &btc_addr) } -// TODO: #4587 create default for `BurnchainParameters`, then check if mutation tests are caught for these case: -// Default::default() -// Or keep the skip and remove the comment +// TODO: add tests from mutation testing results #4862 #[cfg_attr(test, mutants::skip)] pub fn burnchain_params_from_config(config: &BurnchainConfig) -> BurnchainParameters { let (network, _) = config.get_bitcoin_network(); @@ -139,9 +137,7 @@ pub fn burnchain_params_from_config(config: &BurnchainConfig) -> BurnchainParame params } -// TODO: #4587 create default for `BitcoinIndexer`, then check if mutation tests are caught for these case: -// Default::default() -// Or keep the skip and remove the comment +// TODO: add tests from mutation testing results #4863 #[cfg_attr(test, mutants::skip)] /// Helper method to create a BitcoinIndexer pub fn make_bitcoin_indexer( @@ -280,9 +276,7 @@ impl BitcoinRegtestController { BitcoinRegtestController::with_burnchain(config, coordinator_channel, None, None) } - // TODO: #4587 create default for `BitcoinRegtestController`, then check if mutation tests are caught for these case: - // Default::default() - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4864 #[cfg_attr(test, mutants::skip)] pub fn with_burnchain( config: Config, @@ -353,9 +347,7 @@ impl BitcoinRegtestController { } } - // TODO: #4587 create default for `BitcoinRegtestController`, then check if mutation tests are caught for these case: - // Default::default() - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4864 #[cfg_attr(test, mutants::skip)] /// create a dummy bitcoin regtest controller. /// used just for submitting bitcoin ops. @@ -1634,9 +1626,7 @@ impl BitcoinRegtestController { } } - // TODO: #4587 create default for `Transaction` and `UTXOSet`, then check if mutation tests are caught for these case: - // Some((Default::default(), Default::default())) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4865 #[cfg_attr(test, mutants::skip)] fn prepare_tx( &mut self, @@ -2005,9 +1995,7 @@ impl BitcoinRegtestController { self.config.miner.segwit = segwit; } - // TODO: #4587 create default for `SerializedTx`, then check if mutation tests are caught for these case: - // Some(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4866 #[cfg_attr(test, mutants::skip)] pub fn make_operation_tx( &mut self, diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index c2e6193685..b028169729 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -827,9 +827,7 @@ impl Config { } } - // TODO: #4587 create default for `StacksEpoch`, then check if mutation tests are caught for these case: - // Ok(vec![Default::default()]) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4866 #[cfg_attr(test, mutants::skip)] fn make_epochs( conf_epochs: &[StacksEpochConfigFile], @@ -1278,8 +1276,7 @@ impl Config { } } - // TODO: #4587 create default for BlockBuilderSettings, then check if mutation tests are caught for these case: - // Default::default() + // TODO: add tests from mutation testing results #4867 #[cfg_attr(test, mutants::skip)] pub fn make_block_builder_settings( &self, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 1e17a19cdc..1bbdd25512 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -576,9 +576,7 @@ impl BlockMinerThread { tx_signer.get_tx().unwrap() } - // TODO: #4587 create default for `NakamotoNodeError` and `ParentStacksBlockInfo`, then check if mutation tests are caught for these case: - // Ok(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4869 #[cfg_attr(test, mutants::skip)] /// Load up the parent block info for mining. /// If there's no parent because this is the first block, then return the genesis block's info. @@ -683,9 +681,7 @@ impl BlockMinerThread { Some(vrf_proof) } - // TODO: #4587 create default for `NakamotoBlock` and `NakamotoNodeError`, then check if mutation tests are caught for these case: - // Ok(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4869 #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. @@ -840,9 +836,7 @@ impl BlockMinerThread { } impl ParentStacksBlockInfo { - // TODO: #4587 craete default for `NakamotoBlock` and `NakamotoNodeError`, then check if mutation tests are caught for these case: - // Ok(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4869 #[cfg_attr(test, mutants::skip)] /// Determine where in the set of forks to attempt to mine the next anchored block. /// `mine_tip_ch` and `mine_tip_bhh` identify the parent block on top of which to mine. diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 7c8295f9aa..8b149f0b1a 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1507,10 +1507,7 @@ impl BlockMinerThread { Some((*best_tip).clone()) } - // TODO: #4587 create default for `ParentStacksBlockInfo`, then check if mutation tests are caught for these cases: - // (Some(Default::default()), true) - // (Some(Default::default()), false) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4870 #[cfg_attr(test, mutants::skip)] /// Load up the parent block info for mining. /// If there's no parent because this is the first block, then return the genesis block's info. @@ -2227,9 +2224,7 @@ impl BlockMinerThread { return false; } - // TODO: #4587 create default for `MinerThreadResult`, then check if mutation tests are caught for these case: - // Some(Default::default()) - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4871 #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block data as @@ -3105,7 +3100,7 @@ impl RelayerThread { (true, miner_tip) } - // TODO: #4587 add tests for `true` and `false` returning cases + // TODO: add tests from mutation testing results #4872 #[cfg_attr(test, mutants::skip)] /// Process all new tenures that we're aware of. /// Clear out stale tenure artifacts as well. @@ -3580,7 +3575,7 @@ impl RelayerThread { true } - // TODO: #4587 add tests for `true` and `false` returning cases + // TODO: add tests from mutation testing results #4872 #[cfg_attr(test, mutants::skip)] /// See if we should run a microblock tenure now. /// Return true if so; false if not diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index 30f49e1773..fd7683f569 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -72,10 +72,6 @@ impl<'a> Tenure { } } - // TODO: #4587 create default for `TenureArtifacts` , then check if mutation tests are caught for these case: - // Some(Default::default()) - // Or keep the skip and remove the comment - #[cfg_attr(test, mutants::skip)] pub fn run(&mut self, burn_dbconn: &SortitionDBConn) -> Option { info!("Node starting new tenure with VRF {:?}", self.vrf_seed); From 64598a221d581cbb0800cbb7510a84df6d4734fa Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 4 Jun 2024 22:04:09 +0300 Subject: [PATCH 0245/1400] migrate missed mutant todo to issue --- stackslib/src/chainstate/stacks/miner.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 3db5ca89c0..008de4afe1 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -170,9 +170,7 @@ pub struct BlockBuilderSettings { } impl BlockBuilderSettings { - // TODO: #4587 create default for BlockBuilderSettings, then check if mutation tests are caught for these case: - // Default::default() - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4873 #[cfg_attr(test, mutants::skip)] pub fn limited() -> BlockBuilderSettings { BlockBuilderSettings { @@ -183,9 +181,7 @@ impl BlockBuilderSettings { } } - // TODO: #4587 create default for BlockBuilderSettings, then check if mutation tests are caught for these case: - // Default::default() - // Or keep the skip and remove the comment + // TODO: add tests from mutation testing results #4873 #[cfg_attr(test, mutants::skip)] pub fn max_value() -> BlockBuilderSettings { BlockBuilderSettings { From ba0e654391f69ed3dc20de31a081e9a553053ff3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 4 Jun 2024 15:12:11 -0400 Subject: [PATCH 0246/1400] chore: fix build issue --- stackslib/src/net/tests/download/nakamoto.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index c0595301d4..719e901076 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -930,6 +930,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); @@ -938,6 +939,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .get(&tip_rc) .cloned() .unwrap() + .reward_cycle_info .known_selected_anchor_block_owned() .unwrap(), ); From ed15f3b6a3dc072cd3a5c983f5af4ea6899f633a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 4 Jun 2024 15:44:37 -0400 Subject: [PATCH 0247/1400] WIP: burnchain tip has changed due to not using selected burn block Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 93 ++++++++++++------- .../stacks-node/src/nakamoto_node/relayer.rs | 59 ++++++++---- 2 files changed, 98 insertions(+), 54 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 09bab45751..8f2a4c3aab 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -68,7 +68,6 @@ pub enum MinerDirective { }, /// The miner should try to continue their tenure if they are the active miner ContinueTenure { - parent_tenure_start: StacksBlockId, new_burn_view: ConsensusHash, }, /// The miner did not win sortition @@ -134,6 +133,8 @@ pub struct BlockMinerThread { /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner + burn_election_block: BlockSnapshot, + /// Current burnchain tip burn_block: BlockSnapshot, /// The start of the parent tenure for this tenure parent_tenure_id: StacksBlockId, @@ -148,6 +149,7 @@ impl BlockMinerThread { pub fn new( rt: &RelayerThread, registered_key: RegisteredKey, + burn_election_block: BlockSnapshot, burn_block: BlockSnapshot, parent_tenure_id: StacksBlockId, reason: MinerReason, @@ -159,6 +161,7 @@ impl BlockMinerThread { burnchain: rt.burnchain.clone(), mined_blocks: vec![], registered_key, + burn_election_block, burn_block, event_dispatcher: rt.event_dispatcher.clone(), parent_tenure_id, @@ -251,10 +254,7 @@ impl BlockMinerThread { } = &mut self.reason { // We should not issue multiple tenure change transactions for the same tenure - if !*tenure_change_mined { - debug!("Miner: Tenure change mined"); - *tenure_change_mined = true; - } + *tenure_change_mined = true; } } @@ -745,24 +745,30 @@ impl BlockMinerThread { .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)? .expect("FATAL: no epoch defined") .epoch_id; - debug!("HERE WE GO"); + debug!("HERE WE GO"); let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; let vrf_proof = self .make_vrf_proof() .ok_or_else(|| NakamotoNodeError::BadVrfConstruction)?; if self.mined_blocks.is_empty() && parent_block_info.parent_tenure.is_none() { - warn!( - "Miner should be starting a new tenure, but failed to load parent tenure info" - ); + warn!("Miner should be starting a new tenure, but failed to load parent tenure info"); return Err(NakamotoNodeError::ParentNotFound); }; - debug!("Parent block info parent tenure: {:?} and {:?} mined blocks", parent_block_info.parent_tenure, self.mined_blocks.len()); + debug!( + "Parent block info parent tenure: {:?} and {:?} mined blocks", + parent_block_info.parent_tenure, + self.mined_blocks.len() + ); // create our coinbase if this is the first block we've mined this tenure - let tenure_start_info = - self.make_tenure_start_info(&parent_block_info, vrf_proof, target_epoch_id)?; + let tenure_start_info = self.make_tenure_start_info( + &chain_state, + &parent_block_info, + vrf_proof, + target_epoch_id, + )?; parent_block_info.stacks_parent_header.microblock_tail = None; @@ -778,8 +784,8 @@ impl BlockMinerThread { &burn_db.index_conn(), &mut mem_pool, &parent_block_info.stacks_parent_header, - &self.burn_block.consensus_hash, - self.burn_block.total_burn, + &self.burn_election_block.consensus_hash, + self.burn_election_block.total_burn, tenure_start_info, self.config .make_nakamoto_block_builder_settings(self.globals.get_miner_status()), @@ -837,6 +843,7 @@ impl BlockMinerThread { /// Create the tenure start info for the block we're going to build fn make_tenure_start_info( &self, + chainstate: &StacksChainState, parent_block_info: &ParentStacksBlockInfo, vrf_proof: VRFProof, target_epoch_id: StacksEpochId, @@ -847,37 +854,53 @@ impl BlockMinerThread { let (tenure_change_tx, coinbase_tx) = if let Some(ref parent_tenure_info) = parent_block_info.parent_tenure { - debug!("Miner: Constructing tenure change and coinbase transactions: {}", self.reason); - let num_blocks_so_far = u32::try_from(parent_tenure_info.parent_tenure_blocks) - .expect("FATAL: more than u32 blocks in a tenure"); + debug!( + "Miner: Constructing tenure change and coinbase transactions: {}", + self.reason + ); let mut payload = TenureChangePayload { tenure_consensus_hash: self.burn_block.consensus_hash.clone(), prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, burn_view_consensus_hash: self.burn_block.consensus_hash.clone(), previous_tenure_end: parent_block_id, - previous_tenure_blocks: num_blocks_so_far, + previous_tenure_blocks: u32::try_from(parent_tenure_info.parent_tenure_blocks) + .expect("FATAL: more than u32 blocks in a tenure"), cause: TenureChangeCause::BlockFound, pubkey_hash: self.keychain.get_nakamoto_pkh(), }; - if let MinerReason::Extended { - burn_view_consensus_hash, - tenure_change_mined, - } = &self.reason - { - debug!("Tenure change mined {tenure_change_mined}"); - if !*tenure_change_mined { - debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); - payload = payload.extend( - *burn_view_consensus_hash, - parent_block_id, - num_blocks_so_far, - ); + + match &self.reason { + MinerReason::BlockFound => { + debug!("Miner: Constructing tenure change and coinbase transactions"); + let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; + let coinbase_tx = + self.generate_coinbase_tx(current_miner_nonce + 1, target_epoch_id, vrf_proof); + (Some(tenure_change_tx), Some(coinbase_tx)) + }, + MinerReason::Extended { + burn_view_consensus_hash, + tenure_change_mined, + } => { + let num_blocks_so_far = NakamotoChainState::get_nakamoto_tenure_length( + chainstate.db(), + &self.burn_election_block.consensus_hash, + ) + .map_err(NakamotoNodeError::MiningFailure)?; + debug!("Tenure change mined {tenure_change_mined}"); + if !*tenure_change_mined { + debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); + payload = payload.extend( + *burn_view_consensus_hash, + parent_block_id, + num_blocks_so_far, + ); + let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; + (Some(tenure_change_tx), None) + } else { + (None, None) + } } } - let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; - let coinbase_tx = - self.generate_coinbase_tx(current_miner_nonce + 1, target_epoch_id, vrf_proof); - (Some(tenure_change_tx), Some(coinbase_tx)) } else { debug!("Miner: NOT Constructing tenure change and coinbase transactions"); (None, None) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index c2a86e0942..aa52ce43ca 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -341,7 +341,6 @@ impl RelayerThread { } } else { MinerDirective::ContinueTenure { - parent_tenure_start: committed_index_hash, new_burn_view: consensus_hash, } } @@ -543,28 +542,29 @@ impl RelayerThread { fn create_block_miner( &mut self, registered_key: RegisteredKey, - last_burn_block: BlockSnapshot, + burn_tip: BlockSnapshot, + burn_election_block: BlockSnapshot, parent_tenure_id: StacksBlockId, reason: MinerReason, ) -> Result { debug!("Relayer: creating block miner thread"; "reason" => %reason ); - if fault_injection_skip_mining(&self.config.node.rpc_bind, last_burn_block.block_height) { + if fault_injection_skip_mining(&self.config.node.rpc_bind, burn_tip.block_height) { debug!( "Relayer: fault injection skip mining at block height {}", - last_burn_block.block_height + burn_tip.block_height ); return Err(NakamotoNodeError::FaultInjection); } - let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_header_hash = burn_tip.burn_header_hash.clone(); let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); - if burn_chain_tip != burn_header_hash && matches!(reason, MinerReason::BlockFound) { + if burn_chain_tip != burn_header_hash { debug!( "Relayer: Drop stale RunTenure for {}: current sortition is for {}", &burn_header_hash, &burn_chain_tip @@ -575,16 +575,19 @@ impl RelayerThread { debug!( "Relayer: Spawn tenure thread"; - "height" => last_burn_block.block_height, + "height" => burn_tip.block_height, "burn_header_hash" => %burn_header_hash, "parent_tenure_id" => %parent_tenure_id, - "reason" => %reason + "reason" => %reason, + "burn_election_block.consensus_hash" => %burn_election_block.consensus_hash, + "burn_tip.consensus_hash" => %burn_tip.consensus_hash, ); let miner_thread_state = BlockMinerThread::new( self, registered_key, - last_burn_block, + burn_election_block, + burn_tip, parent_tenure_id, reason, ); @@ -595,6 +598,7 @@ impl RelayerThread { &mut self, parent_tenure_start: StacksBlockId, burn_tip: BlockSnapshot, + block_election_snapshot: BlockSnapshot, reason: MinerReason, ) -> Result<(), NakamotoNodeError> { // when starting a new tenure, block the mining thread if its currently running. @@ -608,8 +612,13 @@ impl RelayerThread { warn!("Trying to start new tenure, but no VRF key active"); NakamotoNodeError::NoVRFKeyActive })?; - let new_miner_state = - self.create_block_miner(vrf_key, burn_tip, parent_tenure_start, reason)?; + let new_miner_state = self.create_block_miner( + vrf_key, + burn_tip, + block_election_snapshot, + parent_tenure_start, + reason, + )?; let new_miner_handle = std::thread::Builder::new() .name(format!("miner.{parent_tenure_start}")) @@ -653,7 +662,6 @@ impl RelayerThread { fn continue_tenure( &mut self, - parent_tenure_start: StacksBlockId, new_burn_view: ConsensusHash, ) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { @@ -662,7 +670,7 @@ impl RelayerThread { } debug!("Relayer: successfully stopped tenure."); // Check if we should undergo a tenure change to switch to the new burn view - let block_snapshot = self + let block_election_snapshot = self .sortdb .index_handle_at_tip() .get_last_snapshot_with_sortition_from_tip() @@ -671,18 +679,31 @@ impl RelayerThread { NakamotoNodeError::SnapshotNotFoundForChainTip })?; - if Some(block_snapshot.winning_block_txid) != self.current_mining_commit_tx { + if Some(block_election_snapshot.winning_block_txid) != self.current_mining_commit_tx { debug!("Relayer: the miner did not win the last sortition. No tenure to continue."; "current_mining_commit_tx" => %self.current_mining_commit_tx.unwrap_or(Txid([0u8; 32])), - "block_snapshot_winning_block_txid" => %block_snapshot.winning_block_txid + "block_snapshot_winning_block_txid" => %block_election_snapshot.winning_block_txid ); return Ok(()); } else { debug!("Relayer: the miner won the last sortition. Continuing tenure."); }; + + let burn_tip = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for new burn view: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for new burn view"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + match self.start_new_tenure( - parent_tenure_start, - block_snapshot, + burn_tip.get_canonical_stacks_block_id(), // For tenure extend, we should be extending off the canonical tip + burn_tip, + block_election_snapshot, MinerReason::Extended { burn_view_consensus_hash: new_burn_view, tenure_change_mined: false, @@ -713,6 +734,7 @@ impl RelayerThread { burnchain_tip, } => match self.start_new_tenure( parent_tenure_start, + burnchain_tip.clone(), burnchain_tip, MinerReason::BlockFound, ) { @@ -725,8 +747,7 @@ impl RelayerThread { }, MinerDirective::ContinueTenure { new_burn_view, - parent_tenure_start, - } => match self.continue_tenure(parent_tenure_start, new_burn_view) { + } => match self.continue_tenure(new_burn_view) { Ok(()) => { debug!("Relayer: successfully handled continue tenure."); } From fd0c97913cfb64516c62b54bdf720ee036bd59a5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 4 Jun 2024 15:00:34 -0500 Subject: [PATCH 0248/1400] fix tests --- stackslib/src/net/api/tests/get_tenures_fork_info.rs | 2 +- stackslib/src/net/api/tests/getsortition.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs index 88e3d875ff..2b5abcfb36 100644 --- a/stackslib/src/net/api/tests/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -31,7 +31,7 @@ fn make_preamble(start: &T, stop: &R) -> HttpRequestPrea HttpRequestPreamble { version: HttpVersion::Http11, verb: "GET".into(), - path_and_query_str: format!("/v3/tenures_fork_info/{start}/{stop}"), + path_and_query_str: format!("/v3/tenures/fork_info/{start}/{stop}"), host: PeerHost::DNS("localhost".into(), 0), content_type: None, content_length: Some(0), diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index d48bc54a3a..8541b73eb6 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -29,7 +29,7 @@ fn make_preamble(query: &str) -> HttpRequestPreamble { HttpRequestPreamble { version: HttpVersion::Http11, verb: "GET".into(), - path_and_query_str: format!("/v3/sortition{query}"), + path_and_query_str: format!("/v3/sortitions{query}"), host: PeerHost::DNS("localhost".into(), 0), content_type: None, content_length: Some(0), From a2c0553f0b7655521e7761f976defd4c41529335 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 4 Jun 2024 16:19:19 -0400 Subject: [PATCH 0249/1400] WIP: cargo fmt stuff Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 33 ++++++++++--------- .../stacks-node/src/nakamoto_node/relayer.rs | 25 +++++++------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 8f2a4c3aab..fdff434044 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -67,9 +67,7 @@ pub enum MinerDirective { burnchain_tip: BlockSnapshot, }, /// The miner should try to continue their tenure if they are the active miner - ContinueTenure { - new_burn_view: ConsensusHash, - }, + ContinueTenure { new_burn_view: ConsensusHash }, /// The miner did not win sortition StopTenure, } @@ -859,9 +857,9 @@ impl BlockMinerThread { self.reason ); let mut payload = TenureChangePayload { - tenure_consensus_hash: self.burn_block.consensus_hash.clone(), + tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, - burn_view_consensus_hash: self.burn_block.consensus_hash.clone(), + burn_view_consensus_hash: self.burn_election_block.consensus_hash.clone(), previous_tenure_end: parent_block_id, previous_tenure_blocks: u32::try_from(parent_tenure_info.parent_tenure_blocks) .expect("FATAL: more than u32 blocks in a tenure"), @@ -872,29 +870,34 @@ impl BlockMinerThread { match &self.reason { MinerReason::BlockFound => { debug!("Miner: Constructing tenure change and coinbase transactions"); - let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; - let coinbase_tx = - self.generate_coinbase_tx(current_miner_nonce + 1, target_epoch_id, vrf_proof); + let tenure_change_tx = + self.generate_tenure_change_tx(current_miner_nonce, payload)?; + let coinbase_tx = self.generate_coinbase_tx( + current_miner_nonce + 1, + target_epoch_id, + vrf_proof, + ); (Some(tenure_change_tx), Some(coinbase_tx)) - }, + } MinerReason::Extended { burn_view_consensus_hash, tenure_change_mined, } => { - let num_blocks_so_far = NakamotoChainState::get_nakamoto_tenure_length( - chainstate.db(), - &self.burn_election_block.consensus_hash, - ) - .map_err(NakamotoNodeError::MiningFailure)?; debug!("Tenure change mined {tenure_change_mined}"); if !*tenure_change_mined { + let num_blocks_so_far = NakamotoChainState::get_nakamoto_tenure_length( + chainstate.db(), + &self.burn_election_block.consensus_hash, + ) + .map_err(NakamotoNodeError::MiningFailure)?; debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); payload = payload.extend( *burn_view_consensus_hash, parent_block_id, num_blocks_so_far, ); - let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; + let tenure_change_tx = + self.generate_tenure_change_tx(current_miner_nonce, payload)?; (Some(tenure_change_tx), None) } else { (None, None) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index aa52ce43ca..62026add20 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -660,10 +660,7 @@ impl RelayerThread { Ok(()) } - fn continue_tenure( - &mut self, - new_burn_view: ConsensusHash, - ) -> Result<(), NakamotoNodeError> { + fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { error!("Relayer: Failed to stop tenure: {:?}", e); return Ok(()); @@ -745,17 +742,17 @@ impl RelayerThread { error!("Relayer: Failed to start new tenure: {:?}", e); } }, - MinerDirective::ContinueTenure { - new_burn_view, - } => match self.continue_tenure(new_burn_view) { - Ok(()) => { - debug!("Relayer: successfully handled continue tenure."); + MinerDirective::ContinueTenure { new_burn_view } => { + match self.continue_tenure(new_burn_view) { + Ok(()) => { + debug!("Relayer: successfully handled continue tenure."); + } + Err(e) => { + error!("Relayer: Failed to continue tenure: {:?}", e); + return false; + } } - Err(e) => { - error!("Relayer: Failed to continue tenure: {:?}", e); - return false; - } - }, + } MinerDirective::StopTenure => match self.stop_tenure() { Ok(()) => { debug!("Relayer: successfully stopped tenure."); From 5a11ee13d9614b095af9de7c645c49917d73197d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 3 Jun 2024 20:07:06 -0400 Subject: [PATCH 0250/1400] feat: add `timestamp` to nakamoto block header `(get-block-info? time block-height)` uses this new timestamp in Clarity 3, instead of using the timestamp from the burn block. --- clarity/src/vm/database/clarity_db.rs | 16 +++- clarity/src/vm/docs/mod.rs | 12 ++- clarity/src/vm/functions/database.rs | 19 +++-- clarity/src/vm/test_util/mod.rs | 3 + clarity/src/vm/version.rs | 4 + stackslib/src/chainstate/nakamoto/mod.rs | 23 ++++- .../src/chainstate/nakamoto/tests/mod.rs | 10 +++ .../chainstate/stacks/boot/contract_tests.rs | 5 ++ stackslib/src/clarity_cli.rs | 9 ++ stackslib/src/clarity_vm/database/mod.rs | 18 ++++ stackslib/src/clarity_vm/tests/contracts.rs | 83 +++++++++++++++++++ stackslib/src/net/tests/download/nakamoto.rs | 3 + .../stacks-node/src/nakamoto_node/miner.rs | 2 +- testnet/stacks-node/src/tests/signer/v1.rs | 2 + 14 files changed, 198 insertions(+), 11 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 7a1aa3e3bc..0e23443cec 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -93,6 +93,7 @@ pub trait HeadersDB { -> Option; fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option; fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option; fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option; @@ -169,6 +170,9 @@ impl HeadersDB for &dyn HeadersDB { fn get_burn_block_time_for_block(&self, bhh: &StacksBlockId) -> Option { (*self).get_burn_block_time_for_block(bhh) } + fn get_block_time_for_block(&self, bhh: &StacksBlockId) -> Option { + (*self).get_block_time_for_block(bhh) + } fn get_burn_block_height_for_block(&self, bhh: &StacksBlockId) -> Option { (*self).get_burn_block_height_for_block(bhh) } @@ -312,6 +316,9 @@ impl HeadersDB for NullHeadersDB { None } } + fn get_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + None + } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -993,13 +1000,20 @@ impl<'a> ClarityDatabase<'a> { .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } - pub fn get_block_time(&mut self, block_height: u32) -> Result { + pub fn get_burn_block_time(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; self.headers_db .get_burn_block_time_for_block(&id_bhh) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } + pub fn get_block_time(&mut self, block_height: u32) -> Result { + let id_bhh = self.get_index_block_header_hash(block_height)?; + self.headers_db + .get_block_time_for_block(&id_bhh) + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) + } + pub fn get_burnchain_block_header_hash( &mut self, block_height: u32, diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 940b2f2f6a..91ec6741f1 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1746,8 +1746,15 @@ be the same `principal` that received the block reward, since Stacks 2.1 support the address of the `principal` that produced the block. `time`: This property returns a `uint` value of the block header time field. This is a Unix epoch timestamp in seconds -which roughly corresponds to when the block was mined. **Note**: this does not increase monotonically with each block +which roughly corresponds to when the block was mined. + In Clarity 2, this timestamp comes from the burnchain block. **Note**: this does not increase monotonically with each block and block times are accurate only to within two hours. See [BIP113](https://github.com/bitcoin/bips/blob/master/bip-0113.mediawiki) for more information. + In Clarity 3, which activates with epoch 3.0 (Nakamoto), this timestamp comes from the Stacks block itself. **Note**: this is the time, according to the miner, when +the block started to be mined, but is not guaranteed to be accurate. It will be validated by the signers to be: + - Greater than the timestamp of the previous block + - Less than 15 seconds into the future (according to their own local clocks) + +`vrf-seed`: This property returns a `(buff 32)` value of the VRF seed for the corresponding block. New in Stacks 2.1: @@ -2740,6 +2747,9 @@ mod test { fn get_burn_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { Some(1557860301) } + fn get_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + Some(1557860302) + } fn get_burn_block_height_for_block(&self, _id_bhh: &StacksBlockId) -> Option { Some(567890) } diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index b047faf682..c84d037aca 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -34,7 +34,7 @@ use crate::vm::types::{ BlockInfoProperty, BuffData, BurnBlockInfoProperty, OptionalData, PrincipalData, SequenceData, TupleData, TypeSignature, Value, BUFF_32, }; -use crate::vm::{eval, Environment, LocalContext}; +use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; switch_on_global_epoch!(special_fetch_variable( special_fetch_variable_v200, @@ -732,11 +732,10 @@ pub fn special_get_block_info( .match_atom() .ok_or(CheckErrors::GetBlockInfoExpectPropertyName)?; - let block_info_prop = BlockInfoProperty::lookup_by_name_at_version( - property_name, - env.contract_context.get_clarity_version(), - ) - .ok_or(CheckErrors::GetBlockInfoExpectPropertyName)?; + let version = env.contract_context.get_clarity_version(); + + let block_info_prop = BlockInfoProperty::lookup_by_name_at_version(property_name, version) + .ok_or(CheckErrors::GetBlockInfoExpectPropertyName)?; // Handle the block-height input arg clause. let height_eval = eval(&args[1], env, context)?; @@ -757,7 +756,13 @@ pub fn special_get_block_info( let result = match block_info_prop { BlockInfoProperty::Time => { - let block_time = env.global_context.database.get_block_time(height_value)?; + let block_time = if version.uses_nakamoto_block_timestamp() { + env.global_context.database.get_block_time(height_value)? + } else { + env.global_context + .database + .get_burn_block_time(height_value)? + }; Value::UInt(u128::from(block_time)) } BlockInfoProperty::VrfSeed => { diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index b7e58919aa..e2516a8e30 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -161,6 +161,9 @@ impl HeadersDB for UnitTestHeaderDB { Some(1 + 10 * (id_bhh.as_bytes()[0] as u64)) } } + fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + Some(1713799973 + 10 * (id_bhh.as_bytes()[0] as u64)) + } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 4c437d52cc..d26ecd44ff 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -42,6 +42,10 @@ impl ClarityVersion { StacksEpochId::Epoch30 => ClarityVersion::Clarity3, } } + + pub fn uses_nakamoto_block_timestamp(&self) -> bool { + self >= &ClarityVersion::Clarity3 + } } impl FromStr for ClarityVersion { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index c106132b34..92c4448f38 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -223,6 +223,10 @@ lazy_static! { pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_2: Vec = vec![ NAKAMOTO_TENURES_SCHEMA_2.into(), r#" + ALTER TABLE nakamoto_block_headers + ADD COLUMN timestamp INTEGER NOT NULL; + "#.into(), + r#" UPDATE db_config SET version = "5"; "#.into(), ]; @@ -313,6 +317,11 @@ pub struct NakamotoBlockHeader { pub tx_merkle_root: Sha512Trunc256Sum, /// The MARF trie root hash after this block has been processed pub state_index_root: TrieHash, + /// A Unix time timestamp of when this block was mined, according to the miner. + /// For the signers to consider a block valid, this timestamp must be: + /// * Greater than the timestamp of its parent block + /// * Less than 15 seconds into the future + pub timestamp: u64, /// Recoverable ECDSA signature from the tenure's miner. pub miner_signature: MessageSignature, /// The set of recoverable ECDSA signatures over @@ -337,6 +346,8 @@ impl FromRow for NakamotoBlockHeader { let parent_block_id = row.get("parent_block_id")?; let tx_merkle_root = row.get("tx_merkle_root")?; let state_index_root = row.get("state_index_root")?; + let timestamp_i64: i64 = row.get("timestamp")?; + let timestamp = timestamp_i64.try_into().map_err(|_| DBError::ParseError)?; let miner_signature = row.get("miner_signature")?; let signer_bitvec = row.get("signer_bitvec")?; let signer_signature_json: String = row.get("signer_signature")?; @@ -351,6 +362,7 @@ impl FromRow for NakamotoBlockHeader { parent_block_id, tx_merkle_root, state_index_root, + timestamp, signer_signature, miner_signature, signer_bitvec, @@ -402,6 +414,7 @@ impl StacksMessageCodec for NakamotoBlockHeader { write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; + write_next(fd, &self.timestamp)?; write_next(fd, &self.miner_signature)?; write_next(fd, &self.signer_signature)?; write_next(fd, &self.signer_bitvec)?; @@ -418,6 +431,7 @@ impl StacksMessageCodec for NakamotoBlockHeader { parent_block_id: read_next(fd)?, tx_merkle_root: read_next(fd)?, state_index_root: read_next(fd)?, + timestamp: read_next(fd)?, miner_signature: read_next(fd)?, signer_signature: read_next(fd)?, signer_bitvec: read_next(fd)?, @@ -452,6 +466,7 @@ impl NakamotoBlockHeader { write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; + write_next(fd, &self.timestamp)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -467,6 +482,7 @@ impl NakamotoBlockHeader { write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; + write_next(fd, &self.timestamp)?; write_next(fd, &self.miner_signature)?; write_next(fd, &self.signer_bitvec)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) @@ -615,6 +631,7 @@ impl NakamotoBlockHeader { parent_block_id, tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), + timestamp: get_epoch_time_secs(), miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::ones(bitvec_len) @@ -632,6 +649,7 @@ impl NakamotoBlockHeader { parent_block_id: StacksBlockId([0u8; 32]), tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), + timestamp: 0, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), @@ -648,6 +666,7 @@ impl NakamotoBlockHeader { parent_block_id: StacksBlockId(BOOT_BLOCK_HASH.0.clone()), tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), + timestamp: get_epoch_time_secs(), miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), @@ -2279,6 +2298,7 @@ impl NakamotoChainState { &signer_signature, &header.tx_merkle_root, &header.state_index_root, + &u64_to_sql(header.timestamp)?, &block_hash, &index_block_hash, block_cost, @@ -2299,6 +2319,7 @@ impl NakamotoChainState { header_type, version, chain_length, burn_spent, miner_signature, signer_signature, tx_merkle_root, state_index_root, + timestamp, block_hash, index_block_hash, @@ -2310,7 +2331,7 @@ impl NakamotoChainState { vrf_proof, signer_bitvec ) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25)", args )?; diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index abc9cf203b..f03cad424a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -235,6 +235,7 @@ fn codec_nakamoto_header() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![MessageSignature::from_bytes(&[0x01; 65]).unwrap()], signer_bitvec: BitVec::zeros(8).unwrap(), @@ -286,6 +287,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -848,6 +850,7 @@ pub fn test_load_store_update_nakamoto_blocks() { parent_block_id: epoch2_parent_block_id.clone(), tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: header_signatures.clone(), signer_bitvec: BitVec::zeros(1).unwrap(), @@ -892,6 +895,7 @@ pub fn test_load_store_update_nakamoto_blocks() { parent_block_id: nakamoto_header.block_id(), tx_merkle_root: nakamoto_tx_merkle_root_2, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -931,6 +935,7 @@ pub fn test_load_store_update_nakamoto_blocks() { parent_block_id: nakamoto_header_2.block_id(), tx_merkle_root: nakamoto_tx_merkle_root_3, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -1609,6 +1614,7 @@ fn test_nakamoto_block_static_verification() { parent_block_id: StacksBlockId([0x03; 32]), tx_merkle_root: nakamoto_tx_merkle_root, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -1628,6 +1634,7 @@ fn test_nakamoto_block_static_verification() { parent_block_id: StacksBlockId([0x03; 32]), tx_merkle_root: nakamoto_tx_merkle_root_bad_ch, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -1647,6 +1654,7 @@ fn test_nakamoto_block_static_verification() { parent_block_id: StacksBlockId([0x03; 32]), tx_merkle_root: nakamoto_tx_merkle_root_bad_miner_sig, state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -1800,6 +1808,7 @@ pub fn test_get_highest_nakamoto_tenure() { .unwrap_or(FIRST_STACKS_BLOCK_ID.clone()), tx_merkle_root: Sha512Trunc256Sum([0x00; 32]), state_index_root: TrieHash([0x00; 32]), + timestamp: get_epoch_time_secs(), miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -2104,6 +2113,7 @@ fn test_make_miners_stackerdb_config() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 5d8588836e..c30f974259 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -565,6 +565,11 @@ impl HeadersDB for TestSimHeadersDB { } } + fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + let block_height = test_sim_hash_to_height(&id_bhh.0)?; + Some(1713799973 + block_height) + } + fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { Some(BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index c14e14aad4..7e508602e3 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -703,6 +703,15 @@ impl HeadersDB for CLIHeadersDB { } } + fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + let conn = self.conn(); + if let Some(height) = get_cli_block_height(&conn, id_bhh) { + Some((height * 10 + 1713799973) as u64) + } else { + None + } + } + fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index be25078521..cf1fda7e03 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -71,6 +71,12 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { }) } + fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + get_stacks_header_column(self.0, id_bhh, "timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed timestamp") + }) + } + fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { get_stacks_header_column(self.0, id_bhh, "burn_header_height", |r| { u64::from_row(r) @@ -145,6 +151,12 @@ impl<'a> HeadersDB for ChainstateTx<'a> { }) } + fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + get_stacks_header_column(self.deref().deref(), id_bhh, "timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed timestamp") + }) + } + fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { get_stacks_header_column(self.deref().deref(), id_bhh, "burn_header_height", |r| { u64::from_row(r) @@ -222,6 +234,12 @@ impl HeadersDB for MARF { }) } + fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + get_stacks_header_column(self.sqlite_conn(), id_bhh, "timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed timestamp") + }) + } + fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { get_stacks_header_column(self.sqlite_conn(), id_bhh, "burn_header_height", |r| { u64::from_row(r) diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 0cdc1ad8bf..116443c524 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -1597,3 +1597,86 @@ fn test_block_heights_at_block() { ); }); } + +#[test] +fn test_get_block_info_time() { + let mut sim = ClarityTestSim::new(); + sim.epoch_bounds = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + let contract_identifier2 = QualifiedContractIdentifier::local("test-contract-2").unwrap(); + let contract_identifier3 = QualifiedContractIdentifier::local("test-contract-3").unwrap(); + + // Advance to epoch 3.0 + while sim.block_height <= 7 { + sim.execute_next_block(|_env| {}); + } + + let block_height = sim.block_height as u128; + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + assert_eq!(epoch, StacksEpochId::Epoch30); + + let contract2 = "(define-private (get-time) (get-block-info? time (- block-height u1)))"; + let contract3 = + "(define-private (get-time) (get-block-info? time (- stacks-block-height u1)))"; + + conn.as_transaction(|clarity_db| { + // Analyze the contract as Clarity 2 + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity2, + &contract2, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the contract as Clarity 2 + clarity_db + .initialize_smart_contract( + &contract_identifier2, + ClarityVersion::Clarity2, + &ast, + contract2, + None, + |_, _| false, + ) + .unwrap(); + + // Analyze the contract as Clarity 3 + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_identifier3, + ClarityVersion::Clarity3, + &contract3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the contract as Clarity 3 + clarity_db + .initialize_smart_contract( + &contract_identifier3, + ClarityVersion::Clarity3, + &ast, + contract3, + None, + |_, _| false, + ) + .unwrap(); + }); + + // Call the contracts and validate the results + let mut tx = conn.start_transaction_processing(); + assert_eq!( + Value::some(Value::UInt(8)).unwrap(), + tx.eval_read_only(&contract_identifier2, "(get-time)") + .unwrap() + ); + assert_eq!( + Value::some(Value::UInt(1713799981)).unwrap(), + tx.eval_read_only(&contract_identifier3, "(get-time)") + .unwrap() + ); + }); +} diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index c084527336..cac8dda840 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -102,6 +102,7 @@ fn test_nakamoto_tenure_downloader() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -169,6 +170,7 @@ fn test_nakamoto_tenure_downloader() { parent_block_id: last_block.header.block_id(), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -190,6 +192,7 @@ fn test_nakamoto_tenure_downloader() { parent_block_id: blocks.last().unwrap().header.block_id(), tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), + timestamp: 9, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 0d04d12537..90bd680d88 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -901,7 +901,7 @@ impl BlockMinerThread { // last chance -- confirm that the stacks tip is unchanged (since it could have taken long // enough to build this block that another block could have arrived), and confirm that all - // Stacks blocks with heights higher than the canoincal tip are processed. + // Stacks blocks with heights higher than the canonical tip are processed. self.check_burn_tip_changed(&burn_db)?; Ok(block) } diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 30f499caae..3b2d2c140c 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -532,6 +532,7 @@ fn sign_request_rejected() { parent_block_id: StacksBlockId([0x05; 32]), tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), @@ -559,6 +560,7 @@ fn sign_request_rejected() { parent_block_id: StacksBlockId([0x06; 32]), tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), state_index_root: TrieHash([0x08; 32]), + timestamp: 9, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), From ad4339977fa3a6b91bd690b5438639106f369900 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 3 Jun 2024 20:55:49 -0400 Subject: [PATCH 0251/1400] feat: add timestamp check for block validation --- stackslib/src/net/api/postblock_proposal.rs | 25 +++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 2669c64356..3965e6dfde 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -30,9 +30,9 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::net::PeerHost; use stacks_common::types::StacksPublicKeyBuffer; -use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; @@ -40,7 +40,7 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; -use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction, TransactionResult}; use crate::chainstate::stacks::{ Error as ChainError, StacksBlock, StacksBlockHeader, StacksTransaction, TransactionPayload, @@ -236,6 +236,27 @@ impl NakamotoBlockProposal { reason_code: ValidateRejectCode::InvalidBlock, reason: "Invalid parent block".into(), })?; + + // Validate the block's timestamp. It must be: + // - Greater than the parent block's timestamp + // - Less than 15 seconds into the future + if let StacksBlockHeaderTypes::Nakamoto(parent_nakamoto_header) = + &parent_stacks_header.anchored_header + { + if self.block.header.timestamp <= parent_nakamoto_header.timestamp { + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Block timestamp is not greater than parent block".into(), + }); + } + } + if self.block.header.timestamp > get_epoch_time_secs() + 15 { + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Block timestamp is too far into the future".into(), + }); + } + let tenure_change = self .block .txs From ddbb51fed6dddef20d393fe40a3f5b1e71b08636 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 4 Jun 2024 22:54:44 -0400 Subject: [PATCH 0252/1400] test: add tests for block proposal endpoint --- stackslib/src/chainstate/stacks/mod.rs | 107 ++++++++ stackslib/src/net/api/tests/mod.rs | 22 +- .../src/net/api/tests/postblock_proposal.rs | 237 ++++++++++++++++++ stackslib/src/net/tests/mod.rs | 1 + 4 files changed, 363 insertions(+), 4 deletions(-) create mode 100644 stackslib/src/net/api/tests/postblock_proposal.rs diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 4cb958a248..6a10c15463 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1126,12 +1126,15 @@ pub const MAX_MICROBLOCK_SIZE: u32 = 65536; #[cfg(test)] pub mod test { + use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; + use stacks_common::bitvec::BitVec; use stacks_common::util::hash::*; use stacks_common::util::log; use super::*; + use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use crate::chainstate::stacks::{StacksPublicKey as PubKey, *}; use crate::core::*; use crate::net::codec::test::check_codec_and_corruption; @@ -1657,6 +1660,110 @@ pub mod test { } } + pub fn make_codec_test_nakamoto_block( + num_txs: usize, + epoch_id: StacksEpochId, + ) -> NakamotoBlock { + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let origin_auth = TransactionAuth::Standard( + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( + &privk, + )) + .unwrap(), + ); + let mut tx_coinbase = StacksTransaction::new( + TransactionVersion::Mainnet, + origin_auth.clone(), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), + ); + let tx_coinbase_proof = StacksTransaction::new( + TransactionVersion::Mainnet, + origin_auth.clone(), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, Some(proof.clone())), + ); + + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let tx_tenure_change = StacksTransaction::new( + TransactionVersion::Mainnet, + origin_auth.clone(), + TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0x01; 20]), + prev_tenure_consensus_hash: ConsensusHash([0x02; 20]), + burn_view_consensus_hash: ConsensusHash([0x03; 20]), + previous_tenure_end: StacksBlockId([0x05; 32]), + previous_tenure_blocks: 0, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x00; 20]), + }), + ); + + let mut all_txs = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::OnChainOnly, + &TransactionPostConditionMode::Allow, + epoch_id, + ); + + // remove all coinbases, except for an initial coinbase + let mut txs_anchored = vec![]; + + if epoch_id >= StacksEpochId::Epoch30 { + txs_anchored.push(tx_tenure_change); + txs_anchored.push(tx_coinbase_proof); + } else { + txs_anchored.push(tx_coinbase); + } + + for tx in all_txs.drain(..) { + match tx.payload { + TransactionPayload::Coinbase(..) => { + continue; + } + _ => {} + } + txs_anchored.push(tx); + if txs_anchored.len() >= num_txs { + break; + } + } + + let txid_vecs = txs_anchored + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + let tr = tx_merkle_root.as_bytes().to_vec(); + + let header = NakamotoBlockHeader { + version: 0x01, + chain_length: 2, + burn_spent: 3, + consensus_hash: ConsensusHash([4u8; 20]), + parent_block_id: StacksBlockId([5u8; 32]), + tx_merkle_root, + state_index_root: TrieHash([8u8; 32]), + timestamp: get_epoch_time_secs(), + miner_signature: MessageSignature::empty(), + signer_signature: Vec::new(), + signer_bitvec: BitVec::zeros(8).unwrap(), + }; + + NakamotoBlock { + header, + txs: txs_anchored, + } + } + pub fn make_codec_test_microblock(num_txs: usize) -> StacksMicroblock { let privk = StacksPrivateKey::from_hex( "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ce67147a9e..259223a8da 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -47,7 +47,8 @@ use crate::net::rpc::ConversationHttp; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; use crate::net::{ - Attachment, AttachmentInstance, RPCHandlerArgs, StackerDBConfig, StacksNodeState, UrlString, + Attachment, AttachmentInstance, MemPoolEventDispatcher, RPCHandlerArgs, StackerDBConfig, + StacksNodeState, UrlString, }; mod callreadonly; @@ -77,6 +78,7 @@ mod gettenureinfo; mod gettransaction_unconfirmed; mod liststackerdbreplicas; mod postblock; +mod postblock_proposal; mod postfeerate; mod postmempoolquery; mod postmicroblock; @@ -258,6 +260,7 @@ impl<'a> TestRPC<'a> { runtime: 2000000, }; peer_1_config.connection_opts.maximum_call_argument_size = 4096; + peer_1_config.connection_opts.block_proposal_token = Some("password".to_string()); peer_2_config.connection_opts.read_only_call_limit = ExecutionCost { write_length: 0, @@ -267,6 +270,7 @@ impl<'a> TestRPC<'a> { runtime: 2000000, }; peer_2_config.connection_opts.maximum_call_argument_size = 4096; + peer_2_config.connection_opts.block_proposal_token = Some("password".to_string()); // stacker DBs get initialized thru reconfiguration when the above block gets processed peer_1_config.add_stacker_db( @@ -907,9 +911,17 @@ impl<'a> TestRPC<'a> { } } + pub fn run(self, requests: Vec) -> Vec { + self.run_with_observer(requests, None) + } + /// Run zero or more HTTP requests on this setup RPC test harness. /// Return the list of responses. - pub fn run(self, requests: Vec) -> Vec { + pub fn run_with_observer( + self, + requests: Vec, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Vec { let mut peer_1 = self.peer_1; let mut peer_2 = self.peer_2; let peer_1_indexer = self.peer_1_indexer; @@ -943,7 +955,8 @@ impl<'a> TestRPC<'a> { } { - let rpc_args = RPCHandlerArgs::default(); + let mut rpc_args = RPCHandlerArgs::default(); + rpc_args.event_observer = event_observer; let mut node_state = StacksNodeState::new( &mut peer_1.network, &peer_1_sortdb, @@ -985,7 +998,8 @@ impl<'a> TestRPC<'a> { } { - let rpc_args = RPCHandlerArgs::default(); + let mut rpc_args = RPCHandlerArgs::default(); + rpc_args.event_observer = event_observer; let mut node_state = StacksNodeState::new( &mut peer_2.network, &peer_2_sortdb, diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs new file mode 100644 index 0000000000..fedecfc689 --- /dev/null +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -0,0 +1,237 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use mempool::{MemPoolEventDispatcher, ProposalCallbackReceiver}; +use postblock_proposal::NakamotoBlockProposal; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::{Address, StacksEpochId}; + +use super::TestRPC; +use crate::chainstate::stacks::test::{make_codec_test_block, make_codec_test_nakamoto_block}; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::test::TestEventObserver; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let block = make_codec_test_nakamoto_block(3, StacksEpochId::Epoch30); + let proposal = NakamotoBlockProposal { + block: block.clone(), + chain_id: 0x80000000, + }; + let mut request = StacksHttpRequest::new_for_peer( + addr.into(), + "POST".into(), + "/v2/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = + postblock_proposal::RPCBlockProposalRequestHandler::new(Some("password".into())); + + // missing authorization header + let bad_request = http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ); + match bad_request { + Err(crate::net::Error::Http(crate::net::http::Error::Http(err_code, message))) => { + assert_eq!(err_code, 401); + assert_eq!(message, "Unauthorized"); + } + _ => panic!("expected error"), + } + + // add the authorization header + request.add_header("authorization".into(), "password".into()); + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!( + handler.block_proposal, + Some(NakamotoBlockProposal { + block, + chain_id: 0x80000000 + }) + ); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + // but the authorization header should still be there + parsed_request.add_header("authorization".into(), "password".into()); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.auth.is_some()); + assert!(handler.block_proposal.is_none()); +} + +struct NullObserver; +impl MemPoolEventDispatcher for NullObserver { + fn get_proposal_callback_receiver(&self) -> Option> { + Some(Box::new(NullObserver {})) + } + + fn mempool_txs_dropped(&self, txids: Vec, reason: mempool::MemPoolDropReason) {} + + fn mined_block_event( + &self, + target_burn_height: u64, + block: &crate::chainstate::stacks::StacksBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + confirmed_microblock_cost: &ExecutionCost, + tx_results: Vec, + ) { + } + + fn mined_microblock_event( + &self, + microblock: &StacksMicroblock, + tx_results: Vec, + anchor_block_consensus_hash: ConsensusHash, + anchor_block: BlockHeaderHash, + ) { + } + + fn mined_nakamoto_block_event( + &self, + target_burn_height: u64, + block: &crate::chainstate::nakamoto::NakamotoBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + tx_results: Vec, + ) { + } +} + +impl ProposalCallbackReceiver for NullObserver { + fn notify_proposal_result( + &self, + result: Result< + postblock_proposal::BlockValidateOk, + postblock_proposal::BlockValidateReject, + >, + ) { + } +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let test_observer = TestEventObserver::new(); + let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + let mut requests = vec![]; + + let block = make_codec_test_nakamoto_block(3, StacksEpochId::Epoch30); + + // post the block proposal + let proposal = NakamotoBlockProposal { + block: block.clone(), + chain_id: 0x80000000, + }; + println!( + "Peer1 host: {:?} {}", + rpc_test.peer_1.to_peer_host(), + rpc_test.peer_1.config.http_port + ); + println!( + "Peer2 host: {:?} {}", + rpc_test.peer_2.to_peer_host(), + rpc_test.peer_2.config.http_port + ); + let mut request = StacksHttpRequest::new_for_peer( + rpc_test.peer_1.to_peer_host(), + "POST".into(), + "/v2/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + request.add_header("authorization".into(), "password".into()); + requests.push(request); + + // // idempotent + // let request = + // StacksHttpRequest::new_post_block(addr.into(), next_block.0.clone(), next_block.1.clone()); + // requests.push(request); + + // // fails if the consensus hash is not recognized + // let request = StacksHttpRequest::new_post_block( + // addr.into(), + // ConsensusHash([0x11; 20]), + // next_block.1.clone(), + // ); + // requests.push(request); + + let observer = NullObserver {}; + let mut responses = rpc_test.run_with_observer(requests, Some(&observer)); + + let response = responses.remove(0); + println!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + // let resp = response.decode_stacks_block_accepted().unwrap(); + // assert_eq!(resp.accepted, true); + // assert_eq!(resp.stacks_block_id, stacks_block_id); + + // let response = responses.remove(0); + // debug!( + // "Response:\n{}\n", + // std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + // ); + + // let resp = response.decode_stacks_block_accepted().unwrap(); + // assert_eq!(resp.accepted, false); + // assert_eq!(resp.stacks_block_id, stacks_block_id); + + // let response = responses.remove(0); + // debug!( + // "Response:\n{}\n", + // std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + // ); + + // let (preamble, body) = response.destruct(); + // assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 5e2cb3e6cc..30fc7aa16e 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -290,6 +290,7 @@ impl NakamotoBootPlan { peer_config .initial_balances .append(&mut self.initial_balances.clone()); + peer_config.connection_opts.block_proposal_token = Some("password".to_string()); // Create some balances for test Stackers // They need their stacking amount + enough to pay fees From 0061683696142fbf00c4b756d87bbb192258257e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 5 Jun 2024 09:42:47 -0400 Subject: [PATCH 0253/1400] test: fix codec unit test --- stackslib/src/chainstate/nakamoto/tests/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index f03cad424a..1b37f3817b 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -256,7 +256,8 @@ fn codec_nakamoto_header() { 0x06, 0x06, // state index root 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, // miner signature + 0x07, 0x07, // timestamp + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, // miner signature 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, From ade4db7820b44c25fe4de5d2bfa4efe5611a9b8b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 5 Jun 2024 09:42:46 -0500 Subject: [PATCH 0254/1400] test: speedup the nakamoto_attempt_time integration test --- .../src/tests/nakamoto_integrations.rs | 112 +++++++++--------- 1 file changed, 59 insertions(+), 53 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 376ef1409b..9c98d4ab33 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4095,8 +4095,8 @@ fn nakamoto_attempt_time() { // ----- Setup boilerplate finished, test block proposal API endpoint ----- let mut sender_nonce = 0; - let tenure_count = 3; - let inter_blocks_per_tenure = 10; + let tenure_count = 2; + let inter_blocks_per_tenure = 3; // Subtest 1 // Mine nakamoto tenures with a few transactions @@ -4127,16 +4127,24 @@ fn nakamoto_attempt_time() { submit_tx(&http_origin, &transfer_tx); } - // Sleep a bit longer than what our max block time should be - thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms + 100)); - // Miner should have made a new block by now - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - - assert!(blocks_processed > blocks_processed_before); + let wait_start = Instant::now(); + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + // wait a little longer than what the max block time should be + if wait_start.elapsed() > Duration::from_millis(nakamoto_attempt_time_ms + 100) { + panic!( + "A block should have been produced within {nakamoto_attempt_time_ms} ms" + ); + } + thread::sleep(Duration::from_secs(1)); + } let info = get_chain_info_result(&naka_conf).unwrap(); assert_ne!(info.stacks_tip, last_tip); @@ -4184,57 +4192,55 @@ fn nakamoto_attempt_time() { // Subtest 3 // Add more than `nakamoto_attempt_time_ms` worth of transactions into mempool // Multiple blocks should be mined - for _ in 0..tenure_count { - let info_before = get_chain_info_result(&naka_conf).unwrap(); + let info_before = get_chain_info_result(&naka_conf).unwrap(); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); - let tx_limit = 10000; - let tx_fee = 500; - let amount = 500; - let mut tx_total_size = 0; - let mut tx_count = 0; - let mut acct_idx = 0; - - // Submit max # of txs from each account to reach tx_limit - 'submit_txs: loop { - let acct = &mut account[acct_idx]; - for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { - let transfer_tx = - make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); - submit_tx(&http_origin, &transfer_tx); - tx_total_size += transfer_tx.len(); - tx_count += 1; - acct.nonce += 1; - if tx_count >= tx_limit { - break 'submit_txs; - } + let tx_limit = 10000; + let tx_fee = 500; + let amount = 500; + let mut tx_total_size = 0; + let mut tx_count = 0; + let mut acct_idx = 0; + + // Submit max # of txs from each account to reach tx_limit + 'submit_txs: loop { + let acct = &mut account[acct_idx]; + for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { + let transfer_tx = + make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); + submit_tx(&http_origin, &transfer_tx); + tx_total_size += transfer_tx.len(); + tx_count += 1; + acct.nonce += 1; + if tx_count >= tx_limit { + break 'submit_txs; } - acct_idx += 1; } + acct_idx += 1; + } - // Make sure that these transactions *could* fit into a single block - assert!(tx_total_size < MAX_BLOCK_LEN as usize); + // Make sure that these transactions *could* fit into a single block + assert!(tx_total_size < MAX_BLOCK_LEN as usize); - // Wait long enough for 2 blocks to be made - thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2 + 100)); + // Wait long enough for 2 blocks to be made + thread::sleep(Duration::from_millis(nakamoto_attempt_time_ms * 2 + 100)); - // Check that 2 blocks were made - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + // Check that 2 blocks were made + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); - let blocks_mined = blocks_processed - blocks_processed_before; - assert!(blocks_mined > 2); + let blocks_mined = blocks_processed - blocks_processed_before; + assert!(blocks_mined > 2); - let info = get_chain_info_result(&naka_conf).unwrap(); - assert_ne!(info.stacks_tip, info_before.stacks_tip); - assert_ne!(info.stacks_tip_height, info_before.stacks_tip_height); - } + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, info_before.stacks_tip); + assert_ne!(info.stacks_tip_height, info_before.stacks_tip_height); // ----- Clean up ----- coord_channel From ff40a27249483bf82cf922217669480df3813203 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 5 Jun 2024 16:09:51 -0400 Subject: [PATCH 0255/1400] WIP: Can't figure out why block election snapshot does not match between new block tenure and following continue tenure Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/miner.rs | 9 +++ stackslib/src/chainstate/nakamoto/mod.rs | 65 ++++++++----------- stackslib/src/chainstate/nakamoto/tenure.rs | 15 ++++- .../stacks-node/src/nakamoto_node/relayer.rs | 10 +-- 4 files changed, 56 insertions(+), 43 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index ab9ae6a5f9..6b434cfb6d 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -79,6 +79,7 @@ use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as DBError; /// Nakamaoto tenure information +#[derive(Debug)] pub struct NakamotoTenureInfo { /// Coinbase tx, if this is a new tenure pub coinbase_tx: Option, @@ -410,6 +411,13 @@ impl NakamotoBlockBuilder { signer_transactions: Vec, signer_bitvec_len: u16, ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { + let parent_block_id = parent_stacks_header.index_block_hash(); + info!("Building a nakamoto block"; + "parent_block_id" => %parent_block_id, + "tenure_id_consensus_hash" => %tenure_id_consensus_hash, + "parent_consensus_hash" => %parent_stacks_header.consensus_hash, + "tenure_info" => ?tenure_info + ); let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), parent_stacks_header.anchored_header.block_hash(), @@ -503,6 +511,7 @@ impl NakamotoBlockBuilder { "execution_consumed" => %consumed, "%-full" => block_limit.proportion_largest_dimension(&consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), + "consensus_hash" => %block.header.consensus_hash ); Ok((block, consumed, size, tx_events)) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 302ac5a6e6..cd97c2a5ed 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2784,6 +2784,10 @@ impl NakamotoChainState { ) }; + error!( + "Processing block: block.header.consensus_hash {:?}, parent_ch {:?}", + block.header.consensus_hash, parent_ch + ); let parent_block_id = StacksBlockId::new(&parent_ch, &parent_block_hash); if parent_block_id != block.header.parent_block_id { warn!("Error processing nakamoto block: Parent consensus hash does not match db view"; @@ -2802,44 +2806,31 @@ impl NakamotoChainState { let burn_header_height = tenure_block_snapshot.block_height; let block_hash = block.header.block_hash(); - let new_tenure = match block.is_wellformed_tenure_start_block() { - Ok(true) => true, - Ok(false) => { - // this block is mined in the ongoing tenure. - if !Self::check_tenure_continuity( - chainstate_tx, - burn_dbconn.sqlite(), - &parent_ch, - &block.header, - )? { - // this block is not part of the ongoing tenure; it's invalid - return Err(ChainstateError::ExpectedTenureChange); - } - false - } - Err(_) => { - return Err(ChainstateError::InvalidStacksBlock( - "Invalid tenure changes in nakamoto block".into(), - )); - } - }; + let new_tenure = block.is_wellformed_tenure_start_block().map_err(|_| { + ChainstateError::InvalidStacksBlock("Invalid tenure changes in nakamoto block".into()) + })?; + // this block is mined in the ongoing tenure. + if !new_tenure + && !Self::check_tenure_continuity( + chainstate_tx, + burn_dbconn.sqlite(), + &parent_ch, + &block.header, + )? + { + warn!("FAILED"); + // this block is not part of the ongoing tenure; it's invalid + return Err(ChainstateError::ExpectedTenureChange); + } + let tenure_extend = block.is_wellformed_tenure_extend_block().map_err(|_| { + ChainstateError::InvalidStacksBlock("Invalid tenure changes in nakamoto block".into()) + })?; - let tenure_extend = match block.is_wellformed_tenure_extend_block() { - Ok(true) => { - if new_tenure { - return Err(ChainstateError::InvalidStacksBlock( - "Both started and extended tenure".into(), - )); - } - true - } - Ok(false) => false, - Err(_) => { - return Err(ChainstateError::InvalidStacksBlock( - "Invalid tenure extend in nakamoto block".into(), - )); - } - }; + if tenure_extend && new_tenure { + return Err(ChainstateError::InvalidStacksBlock( + "Both started and extended tenure".into(), + )); + } let parent_coinbase_height = if block.is_first_mined() { 0 diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index c9e5c0cf59..ed1b3dd35e 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -853,7 +853,7 @@ impl NakamotoChainState { } /// Check that this block is in the same tenure as its parent, and that this tenure is the - /// highest-seen tenure. Use this to check blocks that do _not_ have tenure-changes. + /// highest-seen tenure. Use this to check blocks that do _not_ have BlockFound tenure-changes. /// /// Returns Ok(bool) to indicate whether or not this block is in the same tenure as its parent. /// Returns Err(..) on DB error @@ -865,17 +865,30 @@ impl NakamotoChainState { ) -> Result { // block must have the same consensus hash as its parent if block_header.is_first_mined() || parent_ch != &block_header.consensus_hash { + error!( + "BLOCK HEADER IS FIRST MINED: {}", + block_header.is_first_mined() + ); + error!("Block is not in the same tenure as its parent"; + "parent_ch" => %parent_ch, + "block_header.consensus_hash" => %block_header.consensus_hash, + "block_header" => ?block_header); return Ok(false); } // block must be in the same tenure as the highest-processed tenure. let Some(highest_tenure) = Self::get_highest_nakamoto_tenure(headers_conn, sortdb_conn)? else { + error!("No tenure found"); // no tenures yet, so definitely not continuous return Ok(false); }; if &highest_tenure.tenure_id_consensus_hash != parent_ch { + error!("Block is not in the highest-known tenure"; + "highest_tenure" => %highest_tenure.tenure_id_consensus_hash, + "block_header.consensus_hash" => %block_header.consensus_hash, + "block_header" => ?block_header); // this block is not in the highest-known tenure, so it can't be continuous return Ok(false); } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 62026add20..8da99d1e36 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -542,8 +542,8 @@ impl RelayerThread { fn create_block_miner( &mut self, registered_key: RegisteredKey, - burn_tip: BlockSnapshot, burn_election_block: BlockSnapshot, + burn_tip: BlockSnapshot, parent_tenure_id: StacksBlockId, reason: MinerReason, ) -> Result { @@ -573,7 +573,7 @@ impl RelayerThread { return Err(NakamotoNodeError::MissedMiningOpportunity); } - debug!( + info!( "Relayer: Spawn tenure thread"; "height" => burn_tip.block_height, "burn_header_hash" => %burn_header_hash, @@ -597,8 +597,8 @@ impl RelayerThread { fn start_new_tenure( &mut self, parent_tenure_start: StacksBlockId, - burn_tip: BlockSnapshot, block_election_snapshot: BlockSnapshot, + burn_tip: BlockSnapshot, reason: MinerReason, ) -> Result<(), NakamotoNodeError> { // when starting a new tenure, block the mining thread if its currently running. @@ -614,8 +614,8 @@ impl RelayerThread { })?; let new_miner_state = self.create_block_miner( vrf_key, - burn_tip, block_election_snapshot, + burn_tip, parent_tenure_start, reason, )?; @@ -699,8 +699,8 @@ impl RelayerThread { match self.start_new_tenure( burn_tip.get_canonical_stacks_block_id(), // For tenure extend, we should be extending off the canonical tip - burn_tip, block_election_snapshot, + burn_tip, MinerReason::Extended { burn_view_consensus_hash: new_burn_view, tenure_change_mined: false, From 27e206c45143a2ceccb6a6da258a72ac80b54e03 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 5 Jun 2024 17:27:40 -0400 Subject: [PATCH 0256/1400] WIP: fetch the bitcoin snapshot that elected the current canconical tip Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/miner.rs | 2 +- .../stacks-node/src/nakamoto_node/relayer.rs | 52 +++++++++++-------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 6b434cfb6d..32402325b1 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -412,7 +412,7 @@ impl NakamotoBlockBuilder { signer_bitvec_len: u16, ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { let parent_block_id = parent_stacks_header.index_block_hash(); - info!("Building a nakamoto block"; + info!("Building a nakamoto block"; "parent_block_id" => %parent_block_id, "tenure_id_consensus_hash" => %tenure_id_consensus_hash, "parent_consensus_hash" => %parent_stacks_header.consensus_hash, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 8da99d1e36..75653d98ae 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -44,7 +44,7 @@ use stacks::net::db::LocalPeer; use stacks::net::relay::Relayer; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed, }; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; @@ -667,25 +667,6 @@ impl RelayerThread { } debug!("Relayer: successfully stopped tenure."); // Check if we should undergo a tenure change to switch to the new burn view - let block_election_snapshot = self - .sortdb - .index_handle_at_tip() - .get_last_snapshot_with_sortition_from_tip() - .map_err(|e| { - error!("Relayer: failed to get last sortition snapshot: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; - - if Some(block_election_snapshot.winning_block_txid) != self.current_mining_commit_tx { - debug!("Relayer: the miner did not win the last sortition. No tenure to continue."; - "current_mining_commit_tx" => %self.current_mining_commit_tx.unwrap_or(Txid([0u8; 32])), - "block_snapshot_winning_block_txid" => %block_election_snapshot.winning_block_txid - ); - return Ok(()); - } else { - debug!("Relayer: the miner won the last sortition. Continuing tenure."); - }; - let burn_tip = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view) .map_err(|e| { @@ -697,8 +678,37 @@ impl RelayerThread { NakamotoNodeError::SnapshotNotFoundForChainTip })?; + let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); + let canonical_stacks_tip = + StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); + let block_election_snapshot = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let Some(ref mining_key) = self.config.miner.mining_key else { + return Ok(()); + }; + let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); + if block_election_snapshot.miner_pk_hash != Some(mining_pkh) { + debug!("Relayer: the miner did not win the last sortition. No tenure to continue."; + "current_mining_pkh" => %mining_pkh, + "block_snapshot.miner_pk_hash" => ?block_election_snapshot.miner_pk_hash, + ); + return Ok(()); + } else { + debug!("Relayer: the miner won the last sortition. Continuing tenure."); + } + match self.start_new_tenure( - burn_tip.get_canonical_stacks_block_id(), // For tenure extend, we should be extending off the canonical tip + canonical_stacks_tip, // For tenure extend, we should be extending off the canonical tip block_election_snapshot, burn_tip, MinerReason::Extended { From 2d32ba08ce311542510ac42f7c71204b5ad5aba0 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 6 Jun 2024 09:34:25 -0400 Subject: [PATCH 0257/1400] Fix test to only care about the 15 tenures we mine Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/miner.rs | 7 -- stackslib/src/chainstate/nakamoto/mod.rs | 5 -- stackslib/src/chainstate/nakamoto/tenure.rs | 13 ---- .../stacks-node/src/nakamoto_node/miner.rs | 66 ++++++------------- .../stacks-node/src/nakamoto_node/relayer.rs | 6 +- .../src/tests/nakamoto_integrations.rs | 14 +--- 6 files changed, 23 insertions(+), 88 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 32402325b1..233304adb2 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -411,13 +411,6 @@ impl NakamotoBlockBuilder { signer_transactions: Vec, signer_bitvec_len: u16, ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { - let parent_block_id = parent_stacks_header.index_block_hash(); - info!("Building a nakamoto block"; - "parent_block_id" => %parent_block_id, - "tenure_id_consensus_hash" => %tenure_id_consensus_hash, - "parent_consensus_hash" => %parent_stacks_header.consensus_hash, - "tenure_info" => ?tenure_info - ); let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), parent_stacks_header.anchored_header.block_hash(), diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index cd97c2a5ed..c7a0fd3c95 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2784,10 +2784,6 @@ impl NakamotoChainState { ) }; - error!( - "Processing block: block.header.consensus_hash {:?}, parent_ch {:?}", - block.header.consensus_hash, parent_ch - ); let parent_block_id = StacksBlockId::new(&parent_ch, &parent_block_hash); if parent_block_id != block.header.parent_block_id { warn!("Error processing nakamoto block: Parent consensus hash does not match db view"; @@ -2818,7 +2814,6 @@ impl NakamotoChainState { &block.header, )? { - warn!("FAILED"); // this block is not part of the ongoing tenure; it's invalid return Err(ChainstateError::ExpectedTenureChange); } diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index ed1b3dd35e..45a0149b64 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -865,30 +865,17 @@ impl NakamotoChainState { ) -> Result { // block must have the same consensus hash as its parent if block_header.is_first_mined() || parent_ch != &block_header.consensus_hash { - error!( - "BLOCK HEADER IS FIRST MINED: {}", - block_header.is_first_mined() - ); - error!("Block is not in the same tenure as its parent"; - "parent_ch" => %parent_ch, - "block_header.consensus_hash" => %block_header.consensus_hash, - "block_header" => ?block_header); return Ok(false); } // block must be in the same tenure as the highest-processed tenure. let Some(highest_tenure) = Self::get_highest_nakamoto_tenure(headers_conn, sortdb_conn)? else { - error!("No tenure found"); // no tenures yet, so definitely not continuous return Ok(false); }; if &highest_tenure.tenure_id_consensus_hash != parent_ch { - error!("Block is not in the highest-known tenure"; - "highest_tenure" => %highest_tenure.tenure_id_consensus_hash, - "block_header.consensus_hash" => %block_header.consensus_hash, - "block_header" => ?block_header); // this block is not in the highest-known tenure, so it can't be continuous return Ok(false); } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fdff434044..ae16ca3ff1 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -100,8 +100,6 @@ pub enum MinerReason { /// Current consensus hash on the underlying burnchain. Corresponds to the last-seen /// sortition. burn_view_consensus_hash: ConsensusHash, - /// Wether the tenure change transaction was mined - tenure_change_mined: bool, }, } @@ -109,9 +107,11 @@ impl std::fmt::Display for MinerReason { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { MinerReason::BlockFound => write!(f, "BlockFound"), - MinerReason::Extended { burn_view_consensus_hash, tenure_change_mined } => write!( + MinerReason::Extended { + burn_view_consensus_hash, + } => write!( f, - "Extended: burn_view_consensus_hash = {burn_view_consensus_hash:?}, tenure_change_mined = {tenure_change_mined}", + "Extended: burn_view_consensus_hash = {burn_view_consensus_hash:?}", ), } } @@ -246,14 +246,6 @@ impl BlockMinerThread { "consensus_hash" => %new_block.header.consensus_hash, ); self.globals.coord().announce_new_stacks_block(); - if let MinerReason::Extended { - tenure_change_mined, - .. - } = &mut self.reason - { - // We should not issue multiple tenure change transactions for the same tenure - *tenure_change_mined = true; - } } self.globals.counters.bump_naka_mined_blocks(); @@ -743,7 +735,6 @@ impl BlockMinerThread { .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)? .expect("FATAL: no epoch defined") .epoch_id; - debug!("HERE WE GO"); let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; let vrf_proof = self .make_vrf_proof() @@ -754,12 +745,6 @@ impl BlockMinerThread { return Err(NakamotoNodeError::ParentNotFound); }; - debug!( - "Parent block info parent tenure: {:?} and {:?} mined blocks", - parent_block_info.parent_tenure, - self.mined_blocks.len() - ); - // create our coinbase if this is the first block we've mined this tenure let tenure_start_info = self.make_tenure_start_info( &chain_state, @@ -783,7 +768,7 @@ impl BlockMinerThread { &mut mem_pool, &parent_block_info.stacks_parent_header, &self.burn_election_block.consensus_hash, - self.burn_election_block.total_burn, + self.burn_block.total_burn, tenure_start_info, self.config .make_nakamoto_block_builder_settings(self.globals.get_miner_status()), @@ -846,16 +831,11 @@ impl BlockMinerThread { vrf_proof: VRFProof, target_epoch_id: StacksEpochId, ) -> Result { - debug!("MAKING TENURE START INFO"); let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let current_miner_nonce = parent_block_info.coinbase_nonce; let (tenure_change_tx, coinbase_tx) = if let Some(ref parent_tenure_info) = parent_block_info.parent_tenure { - debug!( - "Miner: Constructing tenure change and coinbase transactions: {}", - self.reason - ); let mut payload = TenureChangePayload { tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, @@ -869,7 +849,6 @@ impl BlockMinerThread { match &self.reason { MinerReason::BlockFound => { - debug!("Miner: Constructing tenure change and coinbase transactions"); let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; let coinbase_tx = self.generate_coinbase_tx( @@ -881,31 +860,24 @@ impl BlockMinerThread { } MinerReason::Extended { burn_view_consensus_hash, - tenure_change_mined, } => { - debug!("Tenure change mined {tenure_change_mined}"); - if !*tenure_change_mined { - let num_blocks_so_far = NakamotoChainState::get_nakamoto_tenure_length( - chainstate.db(), - &self.burn_election_block.consensus_hash, - ) - .map_err(NakamotoNodeError::MiningFailure)?; - debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); - payload = payload.extend( - *burn_view_consensus_hash, - parent_block_id, - num_blocks_so_far, - ); - let tenure_change_tx = - self.generate_tenure_change_tx(current_miner_nonce, payload)?; - (Some(tenure_change_tx), None) - } else { - (None, None) - } + let num_blocks_so_far = NakamotoChainState::get_nakamoto_tenure_length( + chainstate.db(), + &self.burn_election_block.consensus_hash, + ) + .map_err(NakamotoNodeError::MiningFailure)?; + debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); + payload = payload.extend( + *burn_view_consensus_hash, + parent_block_id, + num_blocks_so_far, + ); + let tenure_change_tx = + self.generate_tenure_change_tx(current_miner_nonce, payload)?; + (Some(tenure_change_tx), None) } } } else { - debug!("Miner: NOT Constructing tenure change and coinbase transactions"); (None, None) }; diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 75653d98ae..5ae749faac 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -547,9 +547,6 @@ impl RelayerThread { parent_tenure_id: StacksBlockId, reason: MinerReason, ) -> Result { - debug!("Relayer: creating block miner thread"; - "reason" => %reason - ); if fault_injection_skip_mining(&self.config.node.rpc_bind, burn_tip.block_height) { debug!( "Relayer: fault injection skip mining at block height {}", @@ -573,7 +570,7 @@ impl RelayerThread { return Err(NakamotoNodeError::MissedMiningOpportunity); } - info!( + debug!( "Relayer: Spawn tenure thread"; "height" => burn_tip.block_height, "burn_header_hash" => %burn_header_hash, @@ -713,7 +710,6 @@ impl RelayerThread { burn_tip, MinerReason::Extended { burn_view_consensus_hash: new_burn_view, - tenure_change_mined: false, }, ) { Ok(()) => { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6ec75bc0af..63c95070a7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4037,7 +4037,7 @@ fn continue_tenure_extend() { &signers, ); - info!("Pausing commit op for the next block"); + info!("Pausing commit ops to trigger a tenure extend."); TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -4073,7 +4073,6 @@ fn continue_tenure_extend() { ) .unwrap(); - debug!("MINING A STACKS BLOCK"); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -4084,7 +4083,6 @@ fn continue_tenure_extend() { &signers, ); - debug!("MINING THE NEXT BLOCK"); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); signer_vote_if_needed( @@ -4094,10 +4092,9 @@ fn continue_tenure_extend() { &signers, ); - debug!("Unpausing commit op"); + info!("Resuming commit ops to mine regular tenures."); TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); - debug!("MINING THE NEXT TENURES"); // Mine 15 more regular nakamoto tenures for _i in 0..15 { let commits_before = commits_submitted.load(Ordering::SeqCst); @@ -4127,11 +4124,6 @@ fn continue_tenure_extend() { let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); - info!( - "Latest tip"; - "height" => tip.stacks_block_height, - "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), - ); // assert that the transfer tx was observed let transfer_tx_included = test_observer::get_blocks() @@ -4152,7 +4144,7 @@ fn continue_tenure_extend() { ); assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); - assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 15); // make sure prometheus returns an updated height #[cfg(feature = "monitoring_prom")] From 309cafe9b87ebe6cc580cf06b6a64dddafd2c462 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 6 Jun 2024 10:11:51 -0400 Subject: [PATCH 0258/1400] Fix bad merge Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 94f2ad7016..42f540540a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -886,8 +886,6 @@ impl BlockMinerThread { return Err(NakamotoNodeError::ParentNotFound); }; - let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); - // create our coinbase if this is the first block we've mined this tenure let tenure_start_info = self.make_tenure_start_info( &chain_state, @@ -1021,6 +1019,7 @@ impl BlockMinerThread { }); }; + let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, From 24b5e790f4b8bff2b31a4253ff5bc7c82182338d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 6 Jun 2024 12:35:51 -0400 Subject: [PATCH 0259/1400] Cleanup stale data from prior commits Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node.rs | 2 -- testnet/stacks-node/src/nakamoto_node/relayer.rs | 6 ++---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index a60370f612..8a1d80de32 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -103,8 +103,6 @@ pub enum Error { SigningCoordinatorFailure(String), // The thread that we tried to send to has closed ChannelClosed, - /// The block header for the tenure start is missing - MissingTenureStartBlockHeader, } impl StacksNode { diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 073787f565..ddad31ad72 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -163,8 +163,6 @@ pub struct RelayerThread { /// This is the last snapshot in which the relayer committed, and the parent_tenure_id /// which was committed to last_committed: Option<(BlockSnapshot, StacksBlockId)>, - /// The last commit that the relayer submitted which won the sortition - current_mining_commit_tx: Option, } impl RelayerThread { @@ -222,7 +220,6 @@ impl RelayerThread { is_miner, next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay), last_committed: None, - current_mining_commit_tx: None, } } @@ -315,7 +312,9 @@ impl RelayerThread { .expect("FATAL: unknown consensus hash"); self.globals.set_last_sortition(sn.clone()); + let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); + info!( "Relayer: Process sortition"; "sortition_ch" => %consensus_hash, @@ -328,7 +327,6 @@ impl RelayerThread { if won_sortition { increment_stx_blocks_mined_counter(); - self.current_mining_commit_tx = Some(sn.winning_block_txid); } if sn.sortition { From 2f7aedd5bac2778a0cc0da6bd61936c6ba503dbc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:20:40 -0400 Subject: [PATCH 0260/1400] chore: move existing relayer tests into net/tests/relay/epoch2x.rs, to keep them separate from new Nakamoto functionality --- stackslib/src/net/tests/relay/epoch2x.rs | 3719 ++++++++++++++++++++++ stackslib/src/net/tests/relay/mod.rs | 18 + 2 files changed, 3737 insertions(+) create mode 100644 stackslib/src/net/tests/relay/epoch2x.rs create mode 100644 stackslib/src/net/tests/relay/mod.rs diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs new file mode 100644 index 0000000000..e7409ac779 --- /dev/null +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -0,0 +1,3719 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::RefCell; +use std::collections::HashMap; + +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::ast::ASTRules; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; +use rand::Rng; +use stacks_common::address::AddressHashMode; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash}; +use stacks_common::types::Address; +use stacks_common::util::hash::{MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::sleep_ms; +use stacks_common::util::vrf::VRFProof; + +use crate::burnchains::tests::TestMiner; +use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::test::codec_all_transactions; +use crate::chainstate::stacks::tests::{ + make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, + make_user_stacks_transfer, +}; +use crate::chainstate::stacks::{Error as ChainstateError, *}; +use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::*; +use crate::net::api::getinfo::RPCPeerInfoData; +use crate::net::asn::*; +use crate::net::chat::*; +use crate::net::codec::*; +use crate::net::db::PeerDB; +use crate::net::download::*; +use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; +use crate::net::httpcore::StacksHttpMessage; +use crate::net::inv::inv2x::*; +use crate::net::p2p::*; +use crate::net::relay::*; +use crate::net::test::*; +use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; +use crate::net::{Error as net_error, *}; +use crate::util_lib::test::*; + +#[test] +fn test_sample_neighbors() { + let neighbors: Vec<_> = (0..10) + .map(|i| { + let nk = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: i, + }; + nk + }) + .collect(); + + let neighbors_set: HashSet<_> = neighbors.clone().into_iter().collect(); + + let empty_distribution: HashMap = HashMap::new(); + + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 1).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 5).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(empty_distribution.clone(), 10).len(), + 0 + ); + + let flat_distribution: HashMap<_, _> = neighbors.iter().map(|nk| (nk.clone(), 1)).collect(); + + assert_eq!( + RelayerStats::sample_neighbors(flat_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(flat_distribution.clone(), 1).len(), + 1 + ); + + let flat_full_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 10) + .into_iter() + .collect(); + + assert_eq!(flat_full_sample_set, neighbors_set); + + let flat_partial_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 5) + .into_iter() + .collect(); + + assert_eq!(flat_partial_sample_set.len(), 5); + + let flat_unit_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(flat_distribution.clone(), 1) + .into_iter() + .collect(); + + assert_eq!(flat_unit_sample_set.len(), 1); + + let biased_distribution: HashMap<_, _> = neighbors + .iter() + .enumerate() + .map(|(i, nk)| (nk.clone(), if i == 0 { 10 } else { 1 })) + .collect(); + + assert_eq!( + RelayerStats::sample_neighbors(biased_distribution.clone(), 0).len(), + 0 + ); + assert_eq!( + RelayerStats::sample_neighbors(biased_distribution.clone(), 1).len(), + 1 + ); + + let flat_full_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 10) + .into_iter() + .collect(); + + assert_eq!(flat_full_sample_set, neighbors_set); + + let flat_partial_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 5) + .into_iter() + .collect(); + + assert_eq!(flat_partial_sample_set.len(), 5); + + let flat_unit_sample_set: HashSet<_> = + RelayerStats::sample_neighbors(biased_distribution.clone(), 1) + .into_iter() + .collect(); + + assert_eq!(flat_unit_sample_set.len(), 1); +} + +#[test] +fn test_relayer_stats_add_relyed_messages() { + let mut relay_stats = RelayerStats::new(); + + let all_transactions = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + ); + assert!(all_transactions.len() > MAX_RECENT_MESSAGES); + + eprintln!("Test with {} transactions", all_transactions.len()); + + let nk = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + }; + + // never overflow recent messages for a neighbor + for (i, tx) in all_transactions.iter().enumerate() { + relay_stats.add_relayed_message(nk.clone(), tx); + + assert_eq!(relay_stats.recent_messages.len(), 1); + assert!(relay_stats.recent_messages.get(&nk).unwrap().len() <= MAX_RECENT_MESSAGES); + + assert_eq!(relay_stats.recent_updates.len(), 1); + } + + assert_eq!( + relay_stats.recent_messages.get(&nk).unwrap().len(), + MAX_RECENT_MESSAGES + ); + + for i in (all_transactions.len() - MAX_RECENT_MESSAGES)..MAX_RECENT_MESSAGES { + let digest = all_transactions[i].get_digest(); + let mut found = false; + for (_, hash) in relay_stats.recent_messages.get(&nk).unwrap().iter() { + found = found || (*hash == digest); + } + if !found { + assert!(false); + } + } + + // never overflow number of neighbors tracked + for i in 0..(MAX_RELAYER_STATS + 1) { + let mut new_nk = nk.clone(); + new_nk.peer_version += i as u32; + + relay_stats.add_relayed_message(new_nk, &all_transactions[0]); + + assert!(relay_stats.recent_updates.len() <= i + 1); + assert!(relay_stats.recent_updates.len() <= MAX_RELAYER_STATS); + } +} + +#[test] +fn test_relayer_merge_stats() { + let mut relayer_stats = RelayerStats::new(); + + let na = NeighborAddress { + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + public_key_hash: Hash160([0u8; 20]), + }; + + let relay_stats = RelayStats { + num_messages: 1, + num_bytes: 1, + last_seen: 1, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, 1); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + let now = get_epoch_time_secs() + 60; + + let relay_stats_2 = RelayStats { + num_messages: 2, + num_bytes: 2, + last_seen: now, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats_2.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); + assert!( + relayer_stats.relay_stats.get(&na).unwrap().last_seen < now + && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() + ); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + let relay_stats_3 = RelayStats { + num_messages: 3, + num_bytes: 3, + last_seen: 0, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats_3.clone()); + + relayer_stats.merge_relay_stats(rs); + assert_eq!(relayer_stats.relay_stats.len(), 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); + assert!( + relayer_stats.relay_stats.get(&na).unwrap().last_seen < now + && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() + ); + assert_eq!(relayer_stats.relay_updates.len(), 1); + + for i in 0..(MAX_RELAYER_STATS + 1) { + let na = NeighborAddress { + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 14321 + (i as u16), + public_key_hash: Hash160([0u8; 20]), + }; + + let now = get_epoch_time_secs() + (i as u64) + 1; + + let relay_stats = RelayStats { + num_messages: 1, + num_bytes: 1, + last_seen: now, + }; + + let mut rs = HashMap::new(); + rs.insert(na.clone(), relay_stats.clone()); + + relayer_stats.merge_relay_stats(rs); + assert!(relayer_stats.relay_stats.len() <= MAX_RELAYER_STATS); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); + assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, now); + } +} + +#[test] +fn test_relay_inbound_peer_rankings() { + let mut relay_stats = RelayerStats::new(); + + let all_transactions = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + ); + assert!(all_transactions.len() > MAX_RECENT_MESSAGES); + + let nk_1 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54321, + }; + + let nk_2 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54322, + }; + + let nk_3 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), + port: 54323, + }; + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 0); + + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 1); + assert_eq!(*dups.get(&nk_1).unwrap(), 3); + + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); + + let dups = relay_stats.count_relay_dups(&all_transactions[0]); + assert_eq!(dups.len(), 2); + assert_eq!(*dups.get(&nk_1).unwrap(), 3); + assert_eq!(*dups.get(&nk_2).unwrap(), 4); + + // total dups == 7 + let dist = relay_stats.get_inbound_relay_rankings( + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + &all_transactions[0], + 0, + ); + assert_eq!(*dist.get(&nk_1).unwrap(), 7 - 3 + 1); + assert_eq!(*dist.get(&nk_2).unwrap(), 7 - 4 + 1); + assert_eq!(*dist.get(&nk_3).unwrap(), 7 + 1); + + // high warmup period + let dist = relay_stats.get_inbound_relay_rankings( + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + &all_transactions[0], + 100, + ); + assert_eq!(*dist.get(&nk_1).unwrap(), 100 + 1); + assert_eq!(*dist.get(&nk_2).unwrap(), 100 + 1); + assert_eq!(*dist.get(&nk_3).unwrap(), 100 + 1); +} + +#[test] +fn test_relay_outbound_peer_rankings() { + let relay_stats = RelayerStats::new(); + + let asn1 = ASEntry4 { + prefix: 0x10000000, + mask: 8, + asn: 1, + org: 1, + }; + + let asn2 = ASEntry4 { + prefix: 0x20000000, + mask: 8, + asn: 2, + org: 2, + }; + + let nk_1 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x10, 0x11, 0x12, 0x13, + ]), + port: 54321, + }; + + let nk_2 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x23, + ]), + port: 54322, + }; + + let nk_3 = NeighborKey { + peer_version: 12345, + network_id: 0x80000000, + addrbytes: PeerAddress([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x24, + ]), + port: 54323, + }; + + let n1 = Neighbor { + addr: nk_1.clone(), + public_key: Secp256k1PublicKey::from_hex( + "0260569384baa726f877d47045931e5310383f18d0b243a9b6c095cee6ef19abd6", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 1, + org: 1, + in_degree: 0, + out_degree: 0, + }; + + let n2 = Neighbor { + addr: nk_2.clone(), + public_key: Secp256k1PublicKey::from_hex( + "02465f9ff58dfa8e844fec86fa5fc3fd59c75ea807e20d469b0a9f885d2891fbd4", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 2, + org: 2, + in_degree: 0, + out_degree: 0, + }; + + let n3 = Neighbor { + addr: nk_3.clone(), + public_key: Secp256k1PublicKey::from_hex( + "032d8a1ea2282c1514fdc1a6f21019561569d02a225cf7c14b4f803b0393cef031", + ) + .unwrap(), + expire_block: 4302, + last_contact_time: 0, + allowed: 0, + denied: 0, + asn: 2, + org: 2, + in_degree: 0, + out_degree: 0, + }; + + let peerdb = PeerDB::connect_memory( + 0x80000000, + 0, + 4032, + UrlString::try_from("http://foo.com").unwrap(), + &vec![asn1, asn2], + &vec![n1.clone(), n2.clone(), n3.clone()], + ) + .unwrap(); + + let asn_count = RelayerStats::count_ASNs( + peerdb.conn(), + &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], + ) + .unwrap(); + assert_eq!(asn_count.len(), 3); + assert_eq!(*asn_count.get(&nk_1).unwrap(), 1); + assert_eq!(*asn_count.get(&nk_2).unwrap(), 2); + assert_eq!(*asn_count.get(&nk_3).unwrap(), 2); + + let ranking = relay_stats + .get_outbound_relay_rankings(&peerdb, &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()]) + .unwrap(); + assert_eq!(ranking.len(), 3); + assert_eq!(*ranking.get(&nk_1).unwrap(), 5 - 1 + 1); + assert_eq!(*ranking.get(&nk_2).unwrap(), 5 - 2 + 1); + assert_eq!(*ranking.get(&nk_3).unwrap(), 5 - 2 + 1); + + let ranking = relay_stats + .get_outbound_relay_rankings(&peerdb, &vec![nk_2.clone(), nk_3.clone()]) + .unwrap(); + assert_eq!(ranking.len(), 2); + assert_eq!(*ranking.get(&nk_2).unwrap(), 4 - 2 + 1); + assert_eq!(*ranking.get(&nk_3).unwrap(), 4 - 2 + 1); +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_3_peers_push_available() { + with_timeout(600, || { + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_3_peers_push_available", + 4200, + 3, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 3); + + // peer 0 produces the blocks + peer_configs[0].connection_opts.disable_chat_neighbors = true; + + // peer 1 downloads the blocks from peer 0, and sends + // BlocksAvailable and MicroblocksAvailable messages to + // peer 2. + peer_configs[1].connection_opts.disable_chat_neighbors = true; + + // peer 2 learns about the blocks and microblocks from peer 1's + // BlocksAvaiable and MicroblocksAvailable messages, but + // not from inv syncs. + peer_configs[2].connection_opts.disable_chat_neighbors = true; + peer_configs[2].connection_opts.disable_inv_sync = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + peer_configs[2].connection_opts.disable_natpunch = true; + + // do not push blocks and microblocks; only announce them + peer_configs[0].connection_opts.disable_block_push = true; + peer_configs[1].connection_opts.disable_block_push = true; + peer_configs[2].connection_opts.disable_block_push = true; + + peer_configs[0].connection_opts.disable_microblock_push = true; + peer_configs[1].connection_opts.disable_microblock_push = true; + peer_configs[2].connection_opts.disable_microblock_push = true; + + // generous timeouts + peer_configs[0].connection_opts.connect_timeout = 180; + peer_configs[1].connection_opts.connect_timeout = 180; + peer_configs[2].connection_opts.connect_timeout = 180; + peer_configs[0].connection_opts.timeout = 180; + peer_configs[1].connection_opts.timeout = 180; + peer_configs[2].connection_opts.timeout = 180; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + let peer_2 = peer_configs[2].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + peer_configs[2].add_neighbor(&peer_1); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + // only produce blocks for a single reward + // cycle, since pushing block/microblock + // announcements in reward cycles the remote + // peer doesn't know about won't work. + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + assert_eq!(block_data.len(), 5); + + block_data + }, + |ref mut peers| { + // make sure peer 2's inv has an entry for peer 1, even + // though it's not doing an inv sync. This is required for the downloader to + // work, and for (Micro)BlocksAvailable messages to be accepted + let peer_1_nk = peers[1].to_neighbor().addr; + let peer_2_nk = peers[2].to_neighbor().addr; + let bc = peers[1].config.burnchain.clone(); + match peers[2].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_1_nk).is_none() { + test_debug!("initialize inv statistics for peer 1 in peer 2"); + inv_state.add_peer(peer_1_nk.clone(), true); + if let Some(ref mut stats) = inv_state.get_stats_mut(&peer_1_nk) { + stats.scans = 1; + stats.inv.merge_pox_inv(&bc, 0, 6, vec![0xff], false); + stats.inv.merge_blocks_inv( + 0, + 30, + vec![0, 0, 0, 0, 0], + vec![0, 0, 0, 0, 0], + false, + ); + } else { + panic!("Unable to instantiate inv stats for {:?}", &peer_1_nk); + } + } else { + test_debug!("peer 2 has inv state for peer 1"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + let peer_1_nk = peers[1].to_neighbor().addr; + match peers[2].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_1_nk).is_none() { + test_debug!("initialize inv statistics for peer 1 in peer 2"); + inv_state.add_peer(peer_1_nk.clone(), true); + + inv_state + .get_stats_mut(&peer_1_nk) + .unwrap() + .inv + .num_reward_cycles = this_reward_cycle; + inv_state.get_stats_mut(&peer_1_nk).unwrap().inv.pox_inv = vec![0x3f]; + } else { + test_debug!("peer 2 has inv state for peer 1"); + } + } + None => { + test_debug!("No inv state for peer 2"); + } + } + + // peer 2 should never see a BlocksInv + // message. That would imply it asked for an inv + for (_, convo) in peers[2].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // TODO + true + }, + |_| true, + ); + }) +} + +fn is_peer_connected(peer: &TestPeer, dest: &NeighborKey) -> bool { + let event_id = match peer.network.events.get(dest) { + Some(evid) => *evid, + None => { + return false; + } + }; + + match peer.network.peers.get(&event_id) { + Some(convo) => { + return convo.is_authenticated(); + } + None => { + return false; + } + } +} + +fn push_message( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + msg: StacksMessageType, +) -> bool { + let event_id = match peer.network.events.get(dest) { + Some(evid) => *evid, + None => { + panic!("Unreachable peer: {:?}", dest); + } + }; + + let relay_msg = match peer.network.peers.get_mut(&event_id) { + Some(convo) => convo + .sign_relay_message( + &peer.network.local_peer, + &peer.network.chain_view, + relay_hints, + msg, + ) + .unwrap(), + None => { + panic!("No such event ID {} from neighbor {}", event_id, dest); + } + }; + + match peer.network.relay_signed_message(dest, relay_msg.clone()) { + Ok(_) => { + return true; + } + Err(net_error::OutboxOverflow) => { + test_debug!( + "{:?} outbox overflow; try again later", + &peer.to_neighbor().addr + ); + return false; + } + Err(net_error::SendError(msg)) => { + warn!( + "Failed to send to {:?}: SendError({})", + &peer.to_neighbor().addr, + msg + ); + return false; + } + Err(e) => { + test_debug!( + "{:?} encountered fatal error when forwarding: {:?}", + &peer.to_neighbor().addr, + &e + ); + assert!(false); + unreachable!(); + } + } +} + +fn http_rpc(peer_http: u16, request: StacksHttpRequest) -> Result { + use std::net::TcpStream; + + let mut sock = TcpStream::connect( + &format!("127.0.0.1:{}", peer_http) + .parse::() + .unwrap(), + ) + .unwrap(); + + let request_bytes = request.try_serialize().unwrap(); + match sock.write_all(&request_bytes) { + Ok(_) => {} + Err(e) => { + test_debug!("Client failed to write: {:?}", &e); + return Err(net_error::WriteError(e)); + } + } + + let mut resp = vec![]; + match sock.read_to_end(&mut resp) { + Ok(_) => { + if resp.len() == 0 { + test_debug!("Client did not receive any data"); + return Err(net_error::PermanentlyDrained); + } + } + Err(e) => { + test_debug!("Client failed to read: {:?}", &e); + return Err(net_error::ReadError(e)); + } + } + + test_debug!("Client received {} bytes", resp.len()); + let response = StacksHttp::parse_response( + &request.preamble().verb, + &request.preamble().path_and_query_str, + &resp, + ) + .unwrap(); + match response { + StacksHttpMessage::Response(x) => Ok(x), + _ => { + panic!("Did not receive a Response"); + } + } +} + +pub fn broadcast_message( + broadcaster: &mut TestPeer, + relay_hints: Vec, + msg: StacksMessageType, +) -> bool { + let request = NetworkRequest::Broadcast(relay_hints, msg); + match broadcaster.network.dispatch_request(request) { + Ok(_) => true, + Err(e) => { + error!("Failed to broadcast: {:?}", &e); + false + } + } +} + +fn push_block( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block: StacksBlock, +) -> bool { + test_debug!( + "{:?}: Push block {}/{} to {:?}", + peer.to_neighbor().addr, + &consensus_hash, + block.block_hash(), + dest + ); + + let sn = SortitionDB::get_block_snapshot_consensus( + peer.sortdb.as_ref().unwrap().conn(), + &consensus_hash, + ) + .unwrap() + .unwrap(); + let consensus_hash = sn.consensus_hash; + + let msg = StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(consensus_hash, block)], + }); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_block( + peer: &mut TestPeer, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block: StacksBlock, +) -> bool { + test_debug!( + "{:?}: Broadcast block {}/{}", + peer.to_neighbor().addr, + &consensus_hash, + block.block_hash(), + ); + + let sn = SortitionDB::get_block_snapshot_consensus( + peer.sortdb.as_ref().unwrap().conn(), + &consensus_hash, + ) + .unwrap() + .unwrap(); + let consensus_hash = sn.consensus_hash; + + let msg = StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(consensus_hash, block)], + }); + broadcast_message(peer, relay_hints, msg) +} + +fn push_microblocks( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block_hash: BlockHeaderHash, + microblocks: Vec, +) -> bool { + test_debug!( + "{:?}: Push {} microblocksblock {}/{} to {:?}", + peer.to_neighbor().addr, + microblocks.len(), + &consensus_hash, + &block_hash, + dest + ); + let msg = StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash), + microblocks: microblocks, + }); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_microblocks( + peer: &mut TestPeer, + relay_hints: Vec, + consensus_hash: ConsensusHash, + block_hash: BlockHeaderHash, + microblocks: Vec, +) -> bool { + test_debug!( + "{:?}: broadcast {} microblocksblock {}/{}", + peer.to_neighbor().addr, + microblocks.len(), + &consensus_hash, + &block_hash, + ); + let msg = StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash), + microblocks: microblocks, + }); + broadcast_message(peer, relay_hints, msg) +} + +fn push_transaction( + peer: &mut TestPeer, + dest: &NeighborKey, + relay_hints: Vec, + tx: StacksTransaction, +) -> bool { + test_debug!( + "{:?}: Push tx {} to {:?}", + peer.to_neighbor().addr, + tx.txid(), + dest + ); + let msg = StacksMessageType::Transaction(tx); + push_message(peer, dest, relay_hints, msg) +} + +fn broadcast_transaction( + peer: &mut TestPeer, + relay_hints: Vec, + tx: StacksTransaction, +) -> bool { + test_debug!("{:?}: broadcast tx {}", peer.to_neighbor().addr, tx.txid(),); + let msg = StacksMessageType::Transaction(tx); + broadcast_message(peer, relay_hints, msg) +} + +fn http_get_info(http_port: u16) -> RPCPeerInfoData { + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "GET".to_string(), + "/v2/info".to_string(), + ); + request.keep_alive = false; + let getinfo = StacksHttpRequest::new(request, HttpRequestContents::new()); + let response = http_rpc(http_port, getinfo).unwrap(); + let peer_info = response.decode_peer_info().unwrap(); + peer_info +} + +fn http_post_block(http_port: u16, consensus_hash: &ConsensusHash, block: &StacksBlock) -> bool { + test_debug!( + "upload block {}/{} to localhost:{}", + consensus_hash, + block.block_hash(), + http_port + ); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "POST".to_string(), + "/v2/blocks".to_string(), + ); + request.keep_alive = false; + let post_block = + StacksHttpRequest::new(request, HttpRequestContents::new().payload_stacks(block)); + + let response = http_rpc(http_port, post_block).unwrap(); + let accepted = response.decode_stacks_block_accepted().unwrap(); + accepted.accepted +} + +fn http_post_microblock( + http_port: u16, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + mblock: &StacksMicroblock, +) -> bool { + test_debug!( + "upload microblock {}/{}-{} to localhost:{}", + consensus_hash, + block_hash, + mblock.block_hash(), + http_port + ); + let mut request = HttpRequestPreamble::new_for_peer( + PeerHost::from_host_port("127.0.0.1".to_string(), http_port), + "POST".to_string(), + "/v2/microblocks".to_string(), + ); + request.keep_alive = false; + let tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let post_microblock = StacksHttpRequest::new( + request, + HttpRequestContents::new() + .payload_stacks(mblock) + .for_specific_tip(tip), + ); + + let response = http_rpc(http_port, post_microblock).unwrap(); + let payload = response.get_http_payload_ok().unwrap(); + let bhh: BlockHeaderHash = serde_json::from_value(payload.try_into().unwrap()).unwrap(); + return true; +} + +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( + outbound_test: bool, + disable_push: bool, +) { + with_timeout(600, move || { + let original_blocks_and_microblocks = RefCell::new(vec![]); + let blocks_and_microblocks = RefCell::new(vec![]); + let idx = RefCell::new(0); + let sent_blocks = RefCell::new(false); + let sent_microblocks = RefCell::new(false); + + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks", + 4210, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 produces the blocks and pushes them to peer 1 + // peer 1 receives the blocks and microblocks. It + // doesn't download them, nor does it try to get invs + peer_configs[0].connection_opts.disable_block_advertisement = true; + + peer_configs[1].connection_opts.disable_inv_sync = true; + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // force usage of blocksavailable/microblocksavailable? + if disable_push { + peer_configs[0].connection_opts.disable_block_push = true; + peer_configs[0].connection_opts.disable_microblock_push = true; + peer_configs[1].connection_opts.disable_block_push = true; + peer_configs[1].connection_opts.disable_microblock_push = true; + } + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + + if outbound_test { + // neighbor relationship is symmetric -- peer 1 has an outbound connection + // to peer 0. + peer_configs[1].add_neighbor(&peer_0); + } + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + let saved_copy: Vec<(ConsensusHash, StacksBlock, Vec)> = + block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| { + (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) + }) + .collect(); + *blocks_and_microblocks.borrow_mut() = saved_copy.clone(); + *original_blocks_and_microblocks.borrow_mut() = saved_copy; + block_data + }, + |ref mut peers| { + if !disable_push { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = + BurnchainHeaderHash([0u8; 32]); + } + } + + // make sure peer 1's inv has an entry for peer 0, even + // though it's not doing an inv sync. This is required for the downloader to + // work + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + match peers[1].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_0_nk).is_none() { + test_debug!("initialize inv statistics for peer 0 in peer 1"); + inv_state.add_peer(peer_0_nk.clone(), true); + } else { + test_debug!("peer 1 has inv state for peer 0"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + if is_peer_connected(&peers[0], &peer_1_nk) { + // randomly push a block and/or microblocks to peer 1. + let mut block_data = blocks_and_microblocks.borrow_mut(); + let original_block_data = original_blocks_and_microblocks.borrow(); + let mut next_idx = idx.borrow_mut(); + let data_to_push = { + if block_data.len() > 0 { + let (consensus_hash, block, microblocks) = + block_data[*next_idx].clone(); + Some((consensus_hash, block, microblocks)) + } else { + // start over (can happen if a message gets + // dropped due to a timeout) + test_debug!("Reset block transmission (possible timeout)"); + *block_data = (*original_block_data).clone(); + *next_idx = thread_rng().gen::() % block_data.len(); + let (consensus_hash, block, microblocks) = + block_data[*next_idx].clone(); + Some((consensus_hash, block, microblocks)) + } + }; + + if let Some((consensus_hash, block, microblocks)) = data_to_push { + test_debug!( + "Push block {}/{} and microblocks", + &consensus_hash, + block.block_hash() + ); + + let block_hash = block.block_hash(); + let mut sent_blocks = sent_blocks.borrow_mut(); + let mut sent_microblocks = sent_microblocks.borrow_mut(); + + let pushed_block = if !*sent_blocks { + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + consensus_hash.clone(), + block, + ) + } else { + true + }; + + *sent_blocks = pushed_block; + + if pushed_block { + let pushed_microblock = if !*sent_microblocks { + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + consensus_hash, + block_hash, + microblocks, + ) + } else { + true + }; + + *sent_microblocks = pushed_microblock; + + if pushed_block && pushed_microblock { + block_data.remove(*next_idx); + if block_data.len() > 0 { + *next_idx = thread_rng().gen::() % block_data.len(); + } + *sent_blocks = false; + *sent_microblocks = false; + } + } + test_debug!("{} blocks/microblocks remaining", block_data.len()); + } + } + + // peer 0 should never see a GetBlocksInv message. + // peer 1 should never see a BlocksInv message + for (_, convo) in peers[0].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::GetBlocksInv), + 0 + ); + } + for (_, convo) in peers[1].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound() { + // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. + // nodes rely on blocksavailable/microblocksavailable to discover blocks + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, true) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound() { + // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT + // nodes rely on blocksavailable/microblocksavailable to discover blocks + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, true) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound_direct() { + // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. + // nodes may push blocks and microblocks directly to each other + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, false) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound_direct() { + // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT + // nodes may push blocks and microblocks directly to each other + test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, false) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_upload_blocks_http() { + with_timeout(600, || { + let (port_sx, port_rx) = std::sync::mpsc::sync_channel(1); + let (block_sx, block_rx) = std::sync::mpsc::sync_channel(1); + + std::thread::spawn(move || loop { + eprintln!("Get port"); + let remote_port: u16 = port_rx.recv().unwrap(); + eprintln!("Got port {}", remote_port); + + eprintln!("Send getinfo"); + let peer_info = http_get_info(remote_port); + eprintln!("Got getinfo! {:?}", &peer_info); + let idx = peer_info.stacks_tip_height as usize; + + eprintln!("Get blocks and microblocks"); + let blocks_and_microblocks: Vec<( + ConsensusHash, + Option, + Option>, + )> = block_rx.recv().unwrap(); + eprintln!("Got blocks and microblocks!"); + + if idx >= blocks_and_microblocks.len() { + eprintln!("Out of blocks to send!"); + return; + } + + eprintln!( + "Upload block {}", + &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash() + ); + http_post_block( + remote_port, + &blocks_and_microblocks[idx].0, + blocks_and_microblocks[idx].1.as_ref().unwrap(), + ); + for mblock in blocks_and_microblocks[idx].2.as_ref().unwrap().iter() { + eprintln!("Upload microblock {}", mblock.block_hash()); + http_post_microblock( + remote_port, + &blocks_and_microblocks[idx].0, + &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash(), + mblock, + ); + } + }); + + let original_blocks_and_microblocks = RefCell::new(vec![]); + let port_sx_cell = RefCell::new(port_sx); + let block_sx_cell = RefCell::new(block_sx); + + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_upload_blocks_http", + 4250, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 produces the blocks + peer_configs[0].connection_opts.disable_chat_neighbors = true; + + // peer 0 sends them to peer 1 + peer_configs[1].connection_opts.disable_chat_neighbors = true; + peer_configs[1].connection_opts.disable_inv_sync = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // generous timeouts + peer_configs[0].connection_opts.timeout = 180; + peer_configs[1].connection_opts.timeout = 180; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + // only produce blocks for a single reward + // cycle, since pushing block/microblock + // announcements in reward cycles the remote + // peer doesn't know about won't work. + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + assert_eq!(block_data.len(), 5); + + *original_blocks_and_microblocks.borrow_mut() = block_data.clone(); + + block_data + }, + |ref mut peers| { + let blocks_and_microblocks = original_blocks_and_microblocks.borrow().clone(); + let remote_port = peers[1].config.http_port; + + let port_sx = port_sx_cell.borrow_mut(); + let block_sx = block_sx_cell.borrow_mut(); + + let _ = (*port_sx).try_send(remote_port); + let _ = (*block_sx).try_send(blocks_and_microblocks); + }, + |ref peer| { + // check peer health + // TODO + true + }, + |_| true, + ); + }) +} + +fn make_test_smart_contract_transaction( + peer: &mut TestPeer, + name: &str, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, +) -> StacksTransaction { + // make a smart contract + let contract = " + (define-data-var bar int 0) + (define-public (get-bar) (ok (var-get bar))) + (define-public (set-bar (x int) (y int)) + (begin (var-set bar (/ x y)) (ok (var-get bar))))"; + + let cost_limits = peer.config.connection_opts.read_only_call_limit.clone(); + + let tx_contract = peer + .with_mining_state( + |ref mut sortdb, ref mut miner, ref mut spending_account, ref mut stacks_node| { + let mut tx_contract = StacksTransaction::new( + TransactionVersion::Testnet, + spending_account.as_transaction_auth().unwrap().into(), + TransactionPayload::new_smart_contract( + &name.to_string(), + &contract.to_string(), + None, + ) + .unwrap(), + ); + + let chain_tip = + StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let cur_nonce = stacks_node + .chainstate + .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce( + &spending_account.origin_address().unwrap().into(), + ) + .unwrap() + }) + }) + .unwrap(); + + test_debug!( + "Nonce of {:?} is {} at {}/{}", + &spending_account.origin_address().unwrap(), + cur_nonce, + consensus_hash, + block_hash + ); + + // spending_account.set_nonce(cur_nonce + 1); + + tx_contract.chain_id = 0x80000000; + tx_contract.auth.set_origin_nonce(cur_nonce); + tx_contract.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); + + let mut tx_signer = StacksTransactionSigner::new(&tx_contract); + spending_account.sign_as_origin(&mut tx_signer); + + let tx_contract_signed = tx_signer.get_tx().unwrap(); + + test_debug!( + "make transaction {:?} off of {:?}/{:?}: {:?}", + &tx_contract_signed.txid(), + consensus_hash, + block_hash, + &tx_contract_signed + ); + + Ok(tx_contract_signed) + }, + ) + .unwrap(); + + tx_contract +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_push_transactions() { + with_timeout(600, || { + let blocks_and_microblocks = RefCell::new(vec![]); + let blocks_idx = RefCell::new(0); + let sent_txs = RefCell::new(vec![]); + let done = RefCell::new(false); + + let peers = run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_push_transactions", + 4220, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 generates blocks and microblocks, and pushes + // them to peer 1. Peer 0 also generates transactions + // and pushes them to peer 1. + peer_configs[0].connection_opts.disable_block_advertisement = true; + + // let peer 0 drive this test, as before, by controlling + // when peer 1 sees blocks. + peer_configs[1].connection_opts.disable_inv_sync = true; + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + peer_configs[0].connection_opts.outbox_maxlen = 100; + peer_configs[1].connection_opts.inbox_maxlen = 100; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + let initial_balances = vec![ + ( + PrincipalData::from( + peer_configs[0].spending_account.origin_address().unwrap(), + ), + 1000000, + ), + ( + PrincipalData::from( + peer_configs[1].spending_account.origin_address().unwrap(), + ), + 1000000, + ), + ]; + + peer_configs[0].initial_balances = initial_balances.clone(); + peer_configs[1].initial_balances = initial_balances.clone(); + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for b in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + if b == 0 { + // prime with first block + peers[i].process_stacks_epoch_at_tip(&stacks_block, &vec![]); + } + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + + // peers must be connected to each other + let mut peer_0_to_1 = false; + let mut peer_1_to_0 = false; + for (nk, event_id) in peers[0].network.events.iter() { + match peers[0].network.peers.get(event_id) { + Some(convo) => { + if *nk == peer_1_nk { + peer_0_to_1 = true; + } + } + None => {} + } + } + for (nk, event_id) in peers[1].network.events.iter() { + match peers[1].network.peers.get(event_id) { + Some(convo) => { + if *nk == peer_0_nk { + peer_1_to_0 = true; + } + } + None => {} + } + } + + if !peer_0_to_1 || !peer_1_to_0 { + test_debug!( + "Peers not bi-directionally connected: 0->1 = {}, 1->0 = {}", + peer_0_to_1, + peer_1_to_0 + ); + return; + } + + // make sure peer 2's inv has an entry for peer 1, even + // though it's not doing an inv sync. + match peers[1].network.inv_state { + Some(ref mut inv_state) => { + if inv_state.get_stats(&peer_0_nk).is_none() { + test_debug!("initialize inv statistics for peer 0 in peer 1"); + inv_state.add_peer(peer_0_nk, true); + } else { + test_debug!("peer 1 has inv state for peer 0"); + } + } + None => { + test_debug!("No inv state for peer 1"); + } + } + + let done_flag = *done.borrow(); + if is_peer_connected(&peers[0], &peer_1_nk) { + // only submit the next transaction if the previous + // one is accepted + let has_last_transaction = { + let expected_txs: std::cell::Ref<'_, Vec> = + sent_txs.borrow(); + if let Some(tx) = (*expected_txs).last() { + let txid = tx.txid(); + if !peers[1].mempool.as_ref().unwrap().has_tx(&txid) { + debug!("Peer 1 still waiting for transaction {}", &txid); + push_transaction(&mut peers[0], &peer_1_nk, vec![], (*tx).clone()); + false + } else { + true + } + } else { + true + } + }; + + if has_last_transaction { + // push blocks and microblocks in order, and push a + // transaction that can only be validated once the + // block and microblocks are processed. + let ( + ( + block_consensus_hash, + block, + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ), + idx, + ) = { + let block_data = blocks_and_microblocks.borrow(); + let mut idx = blocks_idx.borrow_mut(); + + let microblocks = block_data[*idx].2.clone(); + let microblocks_consensus_hash = block_data[*idx].0.clone(); + let microblocks_block_hash = block_data[*idx].1.block_hash(); + + *idx += 1; + if *idx >= block_data.len() { + *idx = 1; + } + + let block = block_data[*idx].1.clone(); + let block_consensus_hash = block_data[*idx].0.clone(); + ( + ( + block_consensus_hash, + block, + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ), + *idx, + ) + }; + + if !done_flag { + test_debug!( + "Push microblocks built by {}/{} (idx={})", + µblocks_consensus_hash, + µblocks_block_hash, + idx + ); + + let block_hash = block.block_hash(); + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + microblocks_consensus_hash, + microblocks_block_hash, + microblocks, + ); + + test_debug!( + "Push block {}/{} and microblocks (idx = {})", + &block_consensus_hash, + block.block_hash(), + idx + ); + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + block_consensus_hash.clone(), + block, + ); + + // create a transaction against the resulting + // (anchored) chain tip + let tx = make_test_smart_contract_transaction( + &mut peers[0], + &format!("test-contract-{}", &block_hash.to_hex()[0..10]), + &block_consensus_hash, + &block_hash, + ); + + // push or post + push_transaction(&mut peers[0], &peer_1_nk, vec![], tx.clone()); + + let mut expected_txs = sent_txs.borrow_mut(); + expected_txs.push(tx); + } else { + test_debug!("Done pushing data"); + } + } + } + + // peer 0 should never see a GetBlocksInv message. + // peer 1 should never see a BlocksInv message + for (_, convo) in peers[0].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::GetBlocksInv), + 0 + ); + } + for (_, convo) in peers[1].network.peers.iter() { + assert_eq!( + convo + .stats + .get_message_recv_count(StacksMessageID::BlocksInv), + 0 + ); + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |ref mut peers| { + // all blocks downloaded. only stop if peer 1 has + // all the transactions + let mut done_flag = done.borrow_mut(); + *done_flag = true; + + let txs = + MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + test_debug!("Peer 1 has {} txs", txs.len()); + txs.len() == sent_txs.borrow().len() + }, + ); + + // peer 1 should have all the transactions + let blocks_and_microblocks = blocks_and_microblocks.into_inner(); + + let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + let expected_txs = sent_txs.into_inner(); + for tx in txs.iter() { + let mut found = false; + for expected_tx in expected_txs.iter() { + if tx.tx.txid() == expected_tx.txid() { + found = true; + break; + } + } + if !found { + panic!("Transaction not found: {:?}", &tx.tx); + } + } + + // peer 1 should have 1 tx per chain tip + for ((consensus_hash, block, _), sent_tx) in + blocks_and_microblocks.iter().zip(expected_txs.iter()) + { + let block_hash = block.block_hash(); + let tx_infos = MemPoolDB::get_txs_after( + peers[1].mempool.as_ref().unwrap().conn(), + consensus_hash, + &block_hash, + 0, + 1000, + ) + .unwrap(); + test_debug!( + "Check {}/{} (height {}): expect {}", + &consensus_hash, + &block_hash, + block.header.total_work.work, + &sent_tx.txid() + ); + assert_eq!(tx_infos.len(), 1); + assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); + } + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_peers_broadcast() { + with_timeout(600, || { + let blocks_and_microblocks = RefCell::new(vec![]); + let blocks_idx = RefCell::new(0); + let sent_txs = RefCell::new(vec![]); + let done = RefCell::new(false); + let num_peers = 3; + let privk = StacksPrivateKey::new(); + + let peers = run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_peers_broadcast", + 4230, + num_peers, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), num_peers); + + // peer 0 generates blocks and microblocks, and pushes + // them to peers 1..n. Peer 0 also generates transactions + // and broadcasts them to the network. + + peer_configs[0].connection_opts.disable_inv_sync = true; + peer_configs[0].connection_opts.disable_inv_chat = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state. + for i in 0..peer_configs.len() { + peer_configs[i].connection_opts.disable_natpunch = true; + peer_configs[i].connection_opts.disable_network_prune = true; + peer_configs[i].connection_opts.timeout = 600; + peer_configs[i].connection_opts.connect_timeout = 600; + + // do one walk + peer_configs[i].connection_opts.num_initial_walks = 0; + peer_configs[i].connection_opts.walk_retry_count = 0; + peer_configs[i].connection_opts.walk_interval = 600; + + // don't throttle downloads + peer_configs[i].connection_opts.download_interval = 0; + peer_configs[i].connection_opts.inv_sync_interval = 0; + + let max_inflight = peer_configs[i].connection_opts.max_inflight_blocks; + peer_configs[i].connection_opts.max_clients_per_host = + ((num_peers + 1) as u64) * max_inflight; + peer_configs[i].connection_opts.soft_max_clients_per_host = + ((num_peers + 1) as u64) * max_inflight; + peer_configs[i].connection_opts.num_neighbors = (num_peers + 1) as u64; + peer_configs[i].connection_opts.soft_num_neighbors = (num_peers + 1) as u64; + } + + let initial_balances = vec![( + PrincipalData::from(peer_configs[0].spending_account.origin_address().unwrap()), + 1000000, + )]; + + for i in 0..peer_configs.len() { + peer_configs[i].initial_balances = initial_balances.clone(); + } + + // connectivity + let peer_0 = peer_configs[0].to_neighbor(); + for i in 1..peer_configs.len() { + peer_configs[i].add_neighbor(&peer_0); + let peer_i = peer_configs[i].to_neighbor(); + peer_configs[0].add_neighbor(&peer_i); + } + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data + .clone() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let done_flag = *done.borrow(); + + let mut connectivity_0_to_n = HashSet::new(); + let mut connectivity_n_to_0 = HashSet::new(); + + let peer_0_nk = peers[0].to_neighbor().addr; + + for (nk, event_id) in peers[0].network.events.iter() { + if let Some(convo) = peers[0].network.peers.get(event_id) { + if convo.is_authenticated() { + connectivity_0_to_n.insert(nk.clone()); + } + } + } + for i in 1..peers.len() { + for (nk, event_id) in peers[i].network.events.iter() { + if *nk != peer_0_nk { + continue; + } + + if let Some(convo) = peers[i].network.peers.get(event_id) { + if convo.is_authenticated() { + if let Some(inv_state) = &peers[i].network.inv_state { + if let Some(inv_stats) = inv_state.block_stats.get(&peer_0_nk) { + if inv_stats.inv.num_reward_cycles >= 5 { + connectivity_n_to_0.insert(peers[i].to_neighbor().addr); + } + } + } + } + } + } + } + + if connectivity_0_to_n.len() < peers.len() - 1 + || connectivity_n_to_0.len() < peers.len() - 1 + { + test_debug!( + "Network not connected: 0 --> N = {}, N --> 0 = {}", + connectivity_0_to_n.len(), + connectivity_n_to_0.len() + ); + return; + } + + let ((tip_consensus_hash, tip_block, _), idx) = { + let block_data = blocks_and_microblocks.borrow(); + let idx = blocks_idx.borrow(); + (block_data[(*idx as usize).saturating_sub(1)].clone(), *idx) + }; + + if idx > 0 { + let mut caught_up = true; + for i in 1..peers.len() { + peers[i] + .with_db_state(|sortdb, chainstate, relayer, mempool| { + let (canonical_consensus_hash, canonical_block_hash) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + .unwrap(); + + if canonical_consensus_hash != tip_consensus_hash + || canonical_block_hash != tip_block.block_hash() + { + debug!( + "Peer {} is not caught up yet (at {}/{}, need {}/{})", + i + 1, + &canonical_consensus_hash, + &canonical_block_hash, + &tip_consensus_hash, + &tip_block.block_hash() + ); + caught_up = false; + } + Ok(()) + }) + .unwrap(); + } + if !caught_up { + return; + } + } + + // caught up! + // find next block + let ((consensus_hash, block, microblocks), idx) = { + let block_data = blocks_and_microblocks.borrow(); + let mut idx = blocks_idx.borrow_mut(); + if *idx >= block_data.len() { + test_debug!("Out of blocks and microblocks to push"); + return; + } + + let ret = block_data[*idx].clone(); + *idx += 1; + (ret, *idx) + }; + + if !done_flag { + test_debug!( + "Broadcast block {}/{} and microblocks (idx = {})", + &consensus_hash, + block.block_hash(), + idx + ); + + let block_hash = block.block_hash(); + + // create a transaction against the current + // (anchored) chain tip + let tx = make_test_smart_contract_transaction( + &mut peers[0], + &format!("test-contract-{}", &block_hash.to_hex()[0..10]), + &tip_consensus_hash, + &tip_block.block_hash(), + ); + + let mut expected_txs = sent_txs.borrow_mut(); + expected_txs.push(tx.clone()); + + test_debug!( + "Broadcast {}/{} and its microblocks", + &consensus_hash, + &block.block_hash() + ); + // next block + broadcast_block(&mut peers[0], vec![], consensus_hash.clone(), block); + broadcast_microblocks( + &mut peers[0], + vec![], + consensus_hash, + block_hash, + microblocks, + ); + + // NOTE: first transaction will be dropped since the other nodes haven't + // processed the first-ever Stacks block when their relayer code gets + // around to considering it. + broadcast_transaction(&mut peers[0], vec![], tx); + } else { + test_debug!("Done pushing data"); + } + }, + |ref peer| { + // check peer health -- no message errors + // (i.e. no relay cycles) + for (_, convo) in peer.network.peers.iter() { + assert_eq!(convo.stats.msgs_err, 0); + } + true + }, + |ref mut peers| { + // all blocks downloaded. only stop if peer 1 has + // all the transactions + let mut done_flag = done.borrow_mut(); + *done_flag = true; + + let mut ret = true; + for i in 1..peers.len() { + let txs = + MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); + test_debug!("Peer {} has {} txs", i + 1, txs.len()); + ret = ret && txs.len() == sent_txs.borrow().len() - 1; + } + ret + }, + ); + + // peers 1..n should have all the transactions + let blocks_and_microblocks = blocks_and_microblocks.into_inner(); + let expected_txs = sent_txs.into_inner(); + + for i in 1..peers.len() { + let txs = MemPoolDB::get_all_txs(peers[i].mempool.as_ref().unwrap().conn()).unwrap(); + for tx in txs.iter() { + let mut found = false; + for expected_tx in expected_txs.iter() { + if tx.tx.txid() == expected_tx.txid() { + found = true; + break; + } + } + if !found { + panic!("Transaction not found: {:?}", &tx.tx); + } + } + + // peers 1..n should have 1 tx per chain tip (except for the first block) + for ((consensus_hash, block, _), sent_tx) in + blocks_and_microblocks.iter().zip(expected_txs[1..].iter()) + { + let block_hash = block.block_hash(); + let tx_infos = MemPoolDB::get_txs_after( + peers[i].mempool.as_ref().unwrap().conn(), + consensus_hash, + &block_hash, + 0, + 1000, + ) + .unwrap(); + assert_eq!(tx_infos.len(), 1); + assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); + } + } + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_antientropy() { + with_timeout(600, move || { + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_antientropy", + 4240, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 mines blocks, but does not advertize them nor announce them as + // available via its inventory. It only uses its anti-entropy protocol to + // discover that peer 1 doesn't have them, and sends them to peer 1 that way. + peer_configs[0].connection_opts.disable_block_advertisement = true; + peer_configs[0].connection_opts.disable_block_download = true; + + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // permit anti-entropy protocol even if nat'ed + peer_configs[0].connection_opts.antientropy_public = true; + peer_configs[1].connection_opts.antientropy_public = true; + peer_configs[0].connection_opts.antientropy_retry = 1; + peer_configs[1].connection_opts.antientropy_retry = 1; + + // make peer 0 go slowly + peer_configs[0].connection_opts.max_block_push = 2; + peer_configs[0].connection_opts.max_microblock_push = 2; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + // peer 0 is inbound to peer 1 + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + if peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap() + != this_reward_cycle + { + continue; + } + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + + // cap with an empty sortition, so the antientropy protocol picks up all stacks + // blocks + let (_, burn_header_hash, consensus_hash) = peers[0].next_burnchain_block(vec![]); + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(vec![]); + } + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push((sn.consensus_hash.clone(), None, None)); + + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let tip_opt = peers[1] + .with_db_state(|sortdb, chainstate, _, _| { + let tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); + Ok(tip_opt) + }) + .unwrap(); + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +#[test] +#[ignore] +fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { + with_timeout(600, move || { + let sortitions = RefCell::new(vec![]); + let blocks_and_microblocks = RefCell::new(vec![]); + let idx = RefCell::new(0usize); + let pushed_idx = RefCell::new(0usize); + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_buffered_messages", + 4242, + 2, + |ref mut peer_configs| { + // build initial network topology. + assert_eq!(peer_configs.len(), 2); + + // peer 0 mines blocks, but it does not present its inventory. + peer_configs[0].connection_opts.disable_inv_chat = true; + peer_configs[0].connection_opts.disable_block_download = true; + + peer_configs[1].connection_opts.disable_block_download = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // disable nat punches -- disconnect/reconnect + // clears inv state + peer_configs[0].connection_opts.disable_natpunch = true; + peer_configs[1].connection_opts.disable_natpunch = true; + + // peer 0 ignores peer 1's handshakes + peer_configs[0].connection_opts.disable_inbound_handshakes = true; + + // disable anti-entropy + peer_configs[0].connection_opts.max_block_push = 0; + peer_configs[0].connection_opts.max_microblock_push = 0; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + + // peer 0 is inbound to peer 1 + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let this_reward_cycle = peers[0] + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // build up block data to replicate + let mut block_data = vec![]; + for block_num in 0..num_blocks { + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + if block_num == 0 { + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + peers[i].process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + } else { + let mut all_sortitions = sortitions.borrow_mut(); + all_sortitions.push(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + *blocks_and_microblocks.borrow_mut() = block_data.clone()[1..] + .to_vec() + .drain(..) + .map(|(ch, blk_opt, mblocks_opt)| (ch, blk_opt.unwrap(), mblocks_opt.unwrap())) + .collect(); + block_data + }, + |ref mut peers| { + for peer in peers.iter_mut() { + // force peers to keep trying to process buffered data + peer.network.burnchain_tip.burn_header_hash = BurnchainHeaderHash([0u8; 32]); + } + + let mut i = idx.borrow_mut(); + let mut pushed_i = pushed_idx.borrow_mut(); + let all_sortitions = sortitions.borrow(); + let all_blocks_and_microblocks = blocks_and_microblocks.borrow(); + let peer_0_nk = peers[0].to_neighbor().addr; + let peer_1_nk = peers[1].to_neighbor().addr; + + let tip_opt = peers[1] + .with_db_state(|sortdb, chainstate, _, _| { + let tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); + Ok(tip_opt) + }) + .unwrap(); + + if !is_peer_connected(&peers[0], &peer_1_nk) { + debug!("Peer 0 not connected to peer 1"); + return; + } + + if let Some(tip) = tip_opt { + debug!( + "Push at {}, need {}", + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, + *pushed_i + ); + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 + == *pushed_i as u64 + { + // next block + push_block( + &mut peers[0], + &peer_1_nk, + vec![], + (*all_blocks_and_microblocks)[*pushed_i].0.clone(), + (*all_blocks_and_microblocks)[*pushed_i].1.clone(), + ); + push_microblocks( + &mut peers[0], + &peer_1_nk, + vec![], + (*all_blocks_and_microblocks)[*pushed_i].0.clone(), + (*all_blocks_and_microblocks)[*pushed_i].1.block_hash(), + (*all_blocks_and_microblocks)[*pushed_i].2.clone(), + ); + *pushed_i += 1; + } + debug!( + "Sortition at {}, need {}", + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, + *i + ); + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 + == *i as u64 + { + let event_id = { + let mut ret = 0; + for (nk, event_id) in peers[1].network.events.iter() { + ret = *event_id; + break; + } + if ret == 0 { + return; + } + ret + }; + let mut update_sortition = false; + for (event_id, pending) in peers[1].network.pending_messages.iter() { + debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); + if pending.len() >= 1 { + update_sortition = true; + } + } + if update_sortition { + debug!("Advance sortition!"); + peers[1].next_burnchain_block_raw((*all_sortitions)[*i].clone()); + *i += 1; + } + } + } + }, + |ref peer| { + // check peer health + // nothing should break + // TODO + true + }, + |_| true, + ); + }) +} + +pub fn make_contract_tx( + sender: &StacksPrivateKey, + cur_nonce: u64, + tx_fee: u64, + name: &str, + contract: &str, +) -> StacksTransaction { + let sender_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) + .expect("Failed to create p2pkh spending condition from public key."); + + let spending_auth = TransactionAuth::Standard(sender_spending_condition); + + let mut tx_contract = StacksTransaction::new( + TransactionVersion::Testnet, + spending_auth.clone(), + TransactionPayload::new_smart_contract(&name.to_string(), &contract.to_string(), None) + .unwrap(), + ); + + tx_contract.chain_id = 0x80000000; + tx_contract.auth.set_origin_nonce(cur_nonce); + tx_contract.set_tx_fee(tx_fee); + + let mut tx_signer = StacksTransactionSigner::new(&tx_contract); + tx_signer.sign_origin(sender).unwrap(); + + let tx_contract_signed = tx_signer.get_tx().unwrap(); + tx_contract_signed +} + +#[test] +fn test_static_problematic_tests() { + let spender_sk_1 = StacksPrivateKey::new(); + let spender_sk_2 = StacksPrivateKey::new(); + let spender_sk_3 = StacksPrivateKey::new(); + + let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; + let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); + let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); + let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); + + let tx_edge = make_contract_tx( + &spender_sk_1, + 0, + (tx_edge_body.len() * 100) as u64, + "test-edge", + &tx_edge_body, + ); + + // something just over the limit of the expression depth + let exceeds_repeat_factor = edge_repeat_factor + 1; + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + + let tx_exceeds = make_contract_tx( + &spender_sk_2, + 0, + (tx_exceeds_body.len() * 100) as u64, + "test-exceeds", + &tx_exceeds_body, + ); + + // something stupidly high over the expression depth + let high_repeat_factor = 128 * 1024; + let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); + let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); + let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + + let tx_high = make_contract_tx( + &spender_sk_3, + 0, + (tx_high_body.len() * 100) as u64, + "test-high", + &tx_high_body, + ); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_edge, + ASTRules::Typical + ) + .is_ok()); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_exceeds, + ASTRules::Typical + ) + .is_ok()); + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_high, + ASTRules::Typical + ) + .is_ok()); + + assert!(Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_edge, + ASTRules::Typical + ) + .is_ok()); + assert!(!Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_exceeds, + ASTRules::PrecheckSize + ) + .is_ok()); + assert!(!Relayer::static_check_problematic_relayed_tx( + false, + StacksEpochId::Epoch2_05, + &tx_high, + ASTRules::PrecheckSize + ) + .is_ok()); +} + +#[test] +fn process_new_blocks_rejects_problematic_asts() { + let privk = StacksPrivateKey::from_hex( + "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", + ) + .unwrap(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + + let initial_balances = vec![(addr.to_account_principal(), 100000000000)]; + + let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); + peer_config.initial_balances = initial_balances; + peer_config.epochs = Some(vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: i64::MAX as u64, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + ]); + let burnchain = peer_config.burnchain.clone(); + + // activate new AST rules right away + let mut peer = TestPeer::new(peer_config); + let mut sortdb = peer.sortdb.take().unwrap(); + { + let mut tx = sortdb + .tx_begin() + .expect("FATAL: failed to begin tx on sortition DB"); + SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 1) + .expect("FATAL: failed to override AST PrecheckSize rule height"); + tx.commit() + .expect("FATAL: failed to commit sortition DB transaction"); + } + peer.sortdb = Some(sortdb); + + let chainstate_path = peer.chainstate_path.clone(); + + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + }; + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let high_repeat_factor = 128 * 1024; + let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); + let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); + let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + + let bad_tx = make_contract_tx( + &privk, + 0, + (tx_high_body.len() * 100) as u64, + "test-high", + &tx_high_body, + ); + let bad_txid = bad_tx.txid(); + let bad_tx_len = { + let mut bytes = vec![]; + bad_tx.consensus_serialize(&mut bytes).unwrap(); + bytes.len() as u64 + }; + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let mblock_privk = StacksPrivateKey::new(); + + // make one tenure with a valid block, but problematic microblocks + let (burn_ops, block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let coinbase_tx = make_coinbase(miner, 0); + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + + let block = StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx.clone()], + ) + .unwrap() + .0; + + (block, vec![]) + }, + ); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch(&block, &consensus_hash, &vec![]); + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + let coinbase_tx = make_coinbase(miner, 0); + + let mblock_privk = miner.next_microblock_privkey(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + + // this tx would be problematic without our checks + if let Err(ChainstateError::ProblematicTransaction(txid)) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx.clone(), bad_tx.clone()], + ) + { + assert_eq!(txid, bad_txid); + } else { + panic!("Did not get Error::ProblematicTransaction"); + } + + // make a bad block anyway + // don't worry about the state root + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof.clone(), + tip.total_burn, + Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), + ) + .unwrap(); + let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx.clone()], + ) + .unwrap(); + + let mut bad_block = bad_block.0; + bad_block.txs.push(bad_tx.clone()); + + let txid_vecs = bad_block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + bad_block.header.tx_merkle_root = merkle_tree.root(); + + let sort_ic = sortdb.index_conn(); + chainstate + .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) + .unwrap(); + + // make a bad microblock + let mut microblock_builder = StacksMicroblockBuilder::new( + parent_header_hash.clone(), + parent_consensus_hash.clone(), + chainstate, + &sort_ic, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + + // miner should fail with just the bad tx, since it's problematic + let mblock_err = microblock_builder + .mine_next_microblock_from_txs(vec![(bad_tx.clone(), bad_tx_len)], &mblock_privk) + .unwrap_err(); + if let ChainstateError::NoTransactionsToMine = mblock_err { + } else { + panic!("Did not get NoTransactionsToMine"); + } + + let token_transfer = + make_user_stacks_transfer(&privk, 0, 200, &recipient.to_account_principal(), 123); + let tt_len = { + let mut bytes = vec![]; + token_transfer.consensus_serialize(&mut bytes).unwrap(); + bytes.len() as u64 + }; + + let mut bad_mblock = microblock_builder + .mine_next_microblock_from_txs( + vec![(token_transfer, tt_len), (bad_tx.clone(), bad_tx_len)], + &mblock_privk, + ) + .unwrap(); + + // miner shouldn't include the bad tx, since it's problematic + assert_eq!(bad_mblock.txs.len(), 1); + bad_mblock.txs.push(bad_tx.clone()); + + // force it in anyway + let txid_vecs = bad_mblock + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + bad_mblock.header.tx_merkle_root = merkle_tree.root(); + bad_mblock.sign(&mblock_privk).unwrap(); + + (bad_block, vec![bad_mblock]) + }, + ); + + let bad_mblock = microblocks.pop().unwrap(); + let (_, _, new_consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch(&bad_block, &new_consensus_hash, &vec![]); + + // stuff them all into each possible field of NetworkResult + // p2p messages + let nk = NeighborKey { + peer_version: 1, + network_id: 2, + addrbytes: PeerAddress([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), + port: 19, + }; + let preamble = Preamble { + peer_version: 1, + network_id: 2, + seq: 3, + burn_block_height: 4, + burn_block_hash: BurnchainHeaderHash([5u8; 32]), + burn_stable_block_height: 6, + burn_stable_block_hash: BurnchainHeaderHash([7u8; 32]), + additional_data: 8, + signature: MessageSignature([9u8; 65]), + payload_len: 10, + }; + let bad_msgs = vec![ + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum(new_consensus_hash.clone(), bad_block.clone())], + }), + }, + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockId::new( + &new_consensus_hash, + &bad_block.block_hash(), + ), + microblocks: vec![bad_mblock.clone()], + }), + }, + StacksMessage { + preamble: preamble.clone(), + relayers: vec![], + payload: StacksMessageType::Transaction(bad_tx.clone()), + }, + ]; + let mut unsolicited = HashMap::new(); + unsolicited.insert(nk.clone(), bad_msgs.clone()); + + let mut network_result = + NetworkResult::new(0, 0, 0, 0, 0, ConsensusHash([0x01; 20]), HashMap::new()); + network_result.consume_unsolicited(unsolicited); + + assert!(network_result.has_blocks()); + assert!(network_result.has_microblocks()); + assert!(network_result.has_transactions()); + + network_result.consume_http_uploads( + bad_msgs + .into_iter() + .map(|msg| msg.payload) + .collect::>(), + ); + + assert!(network_result.has_blocks()); + assert!(network_result.has_microblocks()); + assert!(network_result.has_transactions()); + + assert_eq!(network_result.uploaded_transactions.len(), 1); + assert_eq!(network_result.uploaded_blocks.len(), 1); + assert_eq!(network_result.uploaded_microblocks.len(), 1); + assert_eq!(network_result.pushed_transactions.len(), 1); + assert_eq!(network_result.pushed_blocks.len(), 1); + assert_eq!(network_result.pushed_microblocks.len(), 1); + + network_result + .blocks + .push((new_consensus_hash.clone(), bad_block.clone(), 123)); + network_result.confirmed_microblocks.push(( + new_consensus_hash.clone(), + vec![bad_mblock.clone()], + 234, + )); + + let mut sortdb = peer.sortdb.take().unwrap(); + let (processed_blocks, processed_mblocks, relay_mblocks, bad_neighbors) = + Relayer::process_new_blocks( + &mut network_result, + &mut sortdb, + &mut peer.stacks_node.as_mut().unwrap().chainstate, + None, + ) + .unwrap(); + + // despite this data showing up in all aspects of the network result, none of it actually + // gets relayed + assert_eq!(processed_blocks.len(), 0); + assert_eq!(processed_mblocks.len(), 0); + assert_eq!(relay_mblocks.len(), 0); + assert_eq!(bad_neighbors.len(), 0); + + let txs_relayed = Relayer::process_transactions( + &mut network_result, + &sortdb, + &mut peer.stacks_node.as_mut().unwrap().chainstate, + &mut peer.mempool.as_mut().unwrap(), + None, + ) + .unwrap(); + assert_eq!(txs_relayed.len(), 0); +} + +#[test] +fn test_block_pay_to_contract_gated_at_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4246, 4247); + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + peer_config.epochs = Some(epochs); + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let coinbase_tx = make_coinbase_with_nonce( + miner, + parent_tip.stacks_block_height as usize, + 0, + Some(PrincipalData::Contract( + QualifiedContractIdentifier::parse("ST000000000000000000002AMW42H.bns") + .unwrap(), + )), + ); + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx], + ) + .unwrap(); + + (anchored_block.0, vec![]) + }; + + // tenures 26 and 27 should fail, since the block is a pay-to-contract block + // Pay-to-contract should only be supported if the block is in epoch 2.1, which + // activates at tenure 27. + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + panic!("Stored pay-to-contract stacks block before epoch 2.1"); + } + Err(chainstate_error::InvalidStacksBlock(_)) => {} + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid pay-to-contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +#[test] +fn test_block_versioned_smart_contract_gated_at_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); + + let initial_balances = vec![( + PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + 1000000, + )]; + + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + + peer_config.epochs = Some(epochs); + peer_config.initial_balances = initial_balances; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let coinbase_tx = + make_coinbase_with_nonce(miner, parent_tip.stacks_block_height as usize, 0, None); + + let versioned_contract = make_smart_contract_with_version( + miner, + 1, + tip.block_height.try_into().unwrap(), + 0, + Some(ClarityVersion::Clarity1), + Some(1000), + ); + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx, versioned_contract], + ) + .unwrap(); + + eprintln!("{:?}", &anchored_block.0); + (anchored_block.0, vec![]) + }; + + // tenures 26 and 27 should fail, since the block contains a versioned smart contract. + // Versioned smart contracts should only be supported if the block is in epoch 2.1, which + // activates at tenure 27. + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + eprintln!("{:?}", &stacks_block); + panic!("Stored pay-to-contract stacks block before epoch 2.1"); + } + Err(chainstate_error::InvalidStacksBlock(_)) => {} + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid versioned smart contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +#[test] +fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { + let mut peer_config = TestPeerConfig::new(function_name!(), 4250, 4251); + + let initial_balances = vec![( + PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), + 1000000, + )]; + + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]; + + peer_config.epochs = Some(epochs); + peer_config.initial_balances = initial_balances; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + let versioned_contract_opt: RefCell> = RefCell::new(None); + let nonce: RefCell = RefCell::new(0); + + let mut make_tenure = + |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option<&StacksMicroblockHeader>| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap(); + let parent_tip = match stacks_tip_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(header_tip) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header_tip.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let next_nonce = *nonce.borrow(); + let coinbase_tx = make_coinbase_with_nonce( + miner, + parent_tip.stacks_block_height as usize, + next_nonce, + None, + ); + + let versioned_contract = make_smart_contract_with_version( + miner, + next_nonce + 1, + tip.block_height.try_into().unwrap(), + 0, + Some(ClarityVersion::Clarity1), + Some(1000), + ); + + *versioned_contract_opt.borrow_mut() = Some(versioned_contract); + *nonce.borrow_mut() = next_nonce + 1; + + let mut mblock_pubkey_hash_bytes = [0u8; 20]; + mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); + + let builder = StacksBlockBuilder::make_block_builder( + &burnchain, + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + Hash160(mblock_pubkey_hash_bytes), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx], + ) + .unwrap(); + + eprintln!("{:?}", &anchored_block.0); + (anchored_block.0, vec![]) + }; + + for i in 0..2 { + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + + // the empty block should be accepted + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Did not accept valid block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + + // the mempool would reject a versioned contract transaction, since we're not yet at + // tenure 28 + let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); + let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + match node.chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), + &consensus_hash, + &stacks_block.block_hash(), + &versioned_contract, + versioned_contract_len as u64, + ) { + Err(MemPoolRejection::Other(msg)) => { + assert!(msg.find("not supported in this epoch").is_some()); + } + Err(e) => { + panic!("will_admit_mempool_tx {:?}", &e); + } + Ok(_) => { + panic!("will_admit_mempool_tx succeeded"); + } + }; + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); + } + + // *now* it should succeed, since tenure 28 was in epoch 2.1 + let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + match Relayer::process_new_anchored_block( + &sortdb.index_conn(), + &mut node.chainstate, + &consensus_hash, + &stacks_block, + 123, + ) { + Ok(x) => { + assert!(x, "Failed to process valid versioned smart contract block"); + } + Err(e) => { + panic!("Got unexpected error {:?}", &e); + } + }; + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + + // the mempool would accept a versioned contract transaction, since we're not yet at + // tenure 28 + let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); + let versioned_contract_len = versioned_contract.serialize_to_vec().len(); + match node.chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), + &consensus_hash, + &stacks_block.block_hash(), + &versioned_contract, + versioned_contract_len as u64, + ) { + Err(e) => { + panic!("will_admit_mempool_tx {:?}", &e); + } + Ok(_) => {} + }; + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +// TODO: process bans +// TODO: test sending invalid blocks-available and microblocks-available (should result in a ban) +// TODO: test sending invalid transactions (should result in a ban) +// TODO: test bandwidth limits (sending too much should result in a nack, and then a ban) diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs new file mode 100644 index 0000000000..04e8e0fd4f --- /dev/null +++ b/stackslib/src/net/tests/relay/mod.rs @@ -0,0 +1,18 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod epoch2x; +pub mod nakamoto; From b25685817fb194e5d8c89c5cb866d1e97f55269f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:21:29 -0400 Subject: [PATCH 0261/1400] chore: move unsolicited message handling logic into its own file (net/unsolicited.rs), and implement unsolicited NakamotoBlock validation and handling --- stackslib/src/net/p2p.rs | 778 +---------------------- stackslib/src/net/unsolicited.rs | 1000 ++++++++++++++++++++++++++++++ 2 files changed, 1020 insertions(+), 758 deletions(-) create mode 100644 stackslib/src/net/unsolicited.rs diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 4e358128ff..6c82950b55 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -284,7 +284,7 @@ pub struct PeerNetwork { // work state -- we can be walking, fetching block inventories, fetching blocks, pruning, etc. pub work_state: PeerNetworkWorkState, pub nakamoto_work_state: PeerNetworkWorkState, - have_data_to_download: bool, + pub(crate) have_data_to_download: bool, // neighbor walk state pub walk: Option>, @@ -960,13 +960,15 @@ impl PeerNetwork { })? } - /// Broadcast a message to a list of neighbors + /// Broadcast a message to a list of neighbors. + /// Neighbors in the `relay_hints` vec will *not* receive data, since they were the one(s) that + /// sent this peer the message in the first place. pub fn broadcast_message( &mut self, mut neighbor_keys: Vec, relay_hints: Vec, message_payload: StacksMessageType, - ) -> () { + ) { debug!( "{:?}: Will broadcast '{}' to up to {} neighbors; relayed by {:?}", &self.local_peer, @@ -1292,7 +1294,7 @@ impl PeerNetwork { match request { NetworkRequest::Ban(neighbor_keys) => { for neighbor_key in neighbor_keys.iter() { - debug!("Request to ban {:?}", neighbor_key); + info!("Request to ban {:?}", neighbor_key); match self.events.get(neighbor_key) { Some(event_id) => { debug!("Will ban {:?} (event {})", neighbor_key, event_id); @@ -1344,6 +1346,18 @@ impl PeerNetwork { } Ok(all_neighbors.into_iter().collect()) } + StacksMessageType::NakamotoBlocks(ref data) => { + // send to each neighbor that needs one + let mut all_neighbors = HashSet::new(); + for nakamoto_block in data.blocks.iter() { + let mut neighbors = + self.sample_broadcast_peers(&relay_hints, nakamoto_block)?; + for nk in neighbors.drain(..) { + all_neighbors.insert(nk); + } + } + Ok(all_neighbors.into_iter().collect()) + } StacksMessageType::Transaction(ref data) => { self.sample_broadcast_peers(&relay_hints, data) } @@ -4366,759 +4380,6 @@ impl PeerNetwork { Some(outbound_neighbor_key) } - /// Update a peer's inventory state to indicate that the given block is available. - /// If updated, return the sortition height of the bit in the inv that was set. - /// Only valid for epoch 2.x - fn handle_unsolicited_inv_update_epoch2x( - &mut self, - sortdb: &SortitionDB, - event_id: usize, - outbound_neighbor_key: &NeighborKey, - consensus_hash: &ConsensusHash, - microblocks: bool, - ) -> Result, net_error> { - let epoch = self.get_current_epoch(); - if epoch.epoch_id >= StacksEpochId::Epoch30 { - info!( - "{:?}: Ban peer event {} for sending an inv 2.x update for {} in epoch 3.x", - event_id, - self.get_local_peer(), - consensus_hash - ); - self.bans.insert(event_id); - - if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { - self.bans.insert(*outbound_event_id); - } - return Ok(None); - } - - let block_sortition_height = match self.inv_state { - Some(ref mut inv) => { - let res = if microblocks { - inv.set_microblocks_available( - &self.burnchain, - outbound_neighbor_key, - sortdb, - consensus_hash, - ) - } else { - inv.set_block_available( - &self.burnchain, - outbound_neighbor_key, - sortdb, - consensus_hash, - ) - }; - - match res { - Ok(Some(block_height)) => block_height, - Ok(None) => { - debug!( - "{:?}: We already know the inventory state in {} for {}", - &self.local_peer, outbound_neighbor_key, consensus_hash - ); - return Ok(None); - } - Err(net_error::NotFoundError) => { - // is this remote node simply ahead of us? - if let Some(convo) = self.peers.get(&event_id) { - if self.chain_view.burn_block_height < convo.burnchain_tip_height { - debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.local_peer, consensus_hash, outbound_neighbor_key); - return Err(net_error::NotFoundError); - } - } - // not ahead of us -- it's a bad consensus hash - debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.local_peer, consensus_hash, outbound_neighbor_key); - return Ok(None); - } - Err(net_error::InvalidMessage) => { - // punish this peer - info!( - "Peer {:?} sent an invalid update for {}", - &outbound_neighbor_key, - if microblocks { - "streamed microblocks" - } else { - "blocks" - } - ); - self.bans.insert(event_id); - - if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { - self.bans.insert(*outbound_event_id); - } - return Ok(None); - } - Err(e) => { - warn!( - "Failed to update inv state for {:?}: {:?}", - &outbound_neighbor_key, &e - ); - return Ok(None); - } - } - } - None => { - return Ok(None); - } - }; - Ok(Some(block_sortition_height)) - } - - /// Buffer a message for re-processing once the burnchain view updates - fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) -> () { - if let Some(msgs) = self.pending_messages.get_mut(&event_id) { - // check limits: - // at most 1 BlocksAvailable - // at most 1 MicroblocksAvailable - // at most 1 BlocksData - // at most $self.connection_opts.max_buffered_microblocks MicroblocksDatas - let mut blocks_available = 0; - let mut microblocks_available = 0; - let mut blocks_data = 0; - let mut microblocks_data = 0; - for msg in msgs.iter() { - match &msg.payload { - StacksMessageType::BlocksAvailable(_) => { - blocks_available += 1; - } - StacksMessageType::MicroblocksAvailable(_) => { - microblocks_available += 1; - } - StacksMessageType::Blocks(_) => { - blocks_data += 1; - } - StacksMessageType::Microblocks(_) => { - microblocks_data += 1; - } - _ => {} - } - } - - if let StacksMessageType::BlocksAvailable(_) = &msg.payload { - if blocks_available >= self.connection_opts.max_buffered_blocks_available { - debug!( - "{:?}: Drop BlocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_available - ); - return; - } - } - if let StacksMessageType::MicroblocksAvailable(_) = &msg.payload { - if microblocks_available >= self.connection_opts.max_buffered_microblocks_available - { - debug!( - "{:?}: Drop MicroblocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_available - ); - return; - } - } - if let StacksMessageType::Blocks(_) = &msg.payload { - if blocks_data >= self.connection_opts.max_buffered_blocks { - debug!( - "{:?}: Drop BlocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_data - ); - return; - } - } - if let StacksMessageType::Microblocks(_) = &msg.payload { - if microblocks_data >= self.connection_opts.max_buffered_microblocks { - debug!( - "{:?}: Drop MicroblocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_data - ); - return; - } - } - msgs.push(msg); - debug!( - "{:?}: Event {} has {} messages buffered", - &self.local_peer, - event_id, - msgs.len() - ); - } else { - self.pending_messages.insert(event_id, vec![msg]); - debug!( - "{:?}: Event {} has 1 messages buffered", - &self.local_peer, event_id - ); - } - } - - /// Do we need a block or microblock stream, given its sortition's consensus hash? - fn need_block_or_microblock_stream( - sortdb: &SortitionDB, - chainstate: &StacksChainState, - consensus_hash: &ConsensusHash, - is_microblock: bool, - ) -> Result { - let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? - .ok_or(chainstate_error::NoSuchBlockError)?; - let block_hash_opt = if sn.sortition { - Some(sn.winning_stacks_block_hash) - } else { - None - }; - - let inv = chainstate.get_blocks_inventory(&[(consensus_hash.clone(), block_hash_opt)])?; - if is_microblock { - // checking for microblock absence - Ok(inv.microblocks_bitvec[0] == 0) - } else { - // checking for block absence - Ok(inv.block_bitvec[0] == 0) - } - } - - /// Handle unsolicited BlocksAvailable. - /// Update our inv for this peer. - /// Mask errors. - /// Return whether or not we need to buffer this message - fn handle_unsolicited_BlocksAvailable( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - new_blocks: &BlocksAvailableData, - ibd: bool, - buffer: bool, - ) -> bool { - let outbound_neighbor_key = match self.find_outbound_neighbor(event_id) { - Some(onk) => onk, - None => { - return false; - } - }; - - debug!( - "{:?}: Process BlocksAvailable from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key, - new_blocks.available.len() - ); - - let mut to_buffer = false; - for (consensus_hash, block_hash) in new_blocks.available.iter() { - let block_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - consensus_hash, - false, - ) { - Ok(Some(bsh)) => bsh, - Ok(None) => { - continue; - } - Err(net_error::NotFoundError) => { - if buffer { - debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); - to_buffer = true; - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e - ); - continue; - } - }; - - let need_block = match PeerNetwork::need_block_or_microblock_stream( - sortdb, - chainstate, - &consensus_hash, - false, - ) { - Ok(x) => x, - Err(e) => { - warn!( - "Failed to determine if we need block for consensus hash {}: {:?}", - &consensus_hash, &e - ); - false - } - }; - - debug!( - "Need block {}/{}? {}", - &consensus_hash, &block_hash, need_block - ); - - if need_block { - // have the downloader request this block if it's new and we don't have it - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available( - block_sortition_height, - ibd, - need_block, - ); - - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); - } - self.have_data_to_download = true; - } - None => {} - } - } - } - - to_buffer - } - - /// Handle unsolicited MicroblocksAvailable. - /// Update our inv for this peer. - /// Mask errors. - /// Return whether or not we need to buffer this message - fn handle_unsolicited_MicroblocksAvailable( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - new_mblocks: &BlocksAvailableData, - ibd: bool, - buffer: bool, - ) -> bool { - let outbound_neighbor_key = match self.find_outbound_neighbor(event_id) { - Some(onk) => onk, - None => { - return false; - } - }; - - debug!( - "{:?}: Process MicroblocksAvailable from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key, - new_mblocks.available.len() - ); - - let mut to_buffer = false; - for (consensus_hash, block_hash) in new_mblocks.available.iter() { - let mblock_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - consensus_hash, - true, - ) { - Ok(Some(bsh)) => bsh, - Ok(None) => { - continue; - } - Err(net_error::NotFoundError) => { - if buffer { - debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); - to_buffer = true; - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e - ); - continue; - } - }; - - let need_microblock_stream = match PeerNetwork::need_block_or_microblock_stream( - sortdb, - chainstate, - &consensus_hash, - true, - ) { - Ok(x) => x, - Err(e) => { - warn!("Failed to determine if we need microblock stream for consensus hash {}: {:?}", &consensus_hash, &e); - false - } - }; - - debug!( - "Need microblock stream {}/{}? {}", - &consensus_hash, &block_hash, need_microblock_stream - ); - - if need_microblock_stream { - // have the downloader request this microblock stream if it's new to us - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_microblock_sortition_height_available( - mblock_sortition_height, - ibd, - need_microblock_stream, - ); - - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); - } - self.have_data_to_download = true; - } - None => {} - } - } - } - to_buffer - } - - /// Handle unsolicited BlocksData. - /// Don't (yet) validate the data, but do update our inv for the peer that sent it, if we have - /// an outbound connection to that peer. Accept the blocks data either way if it corresponds - /// to a winning sortition -- this will cause the blocks data to be fed into the relayer, which - /// will then decide whether or not it needs to be stored and/or forwarded. - /// Mask errors. - fn handle_unsolicited_BlocksData( - &mut self, - sortdb: &SortitionDB, - event_id: usize, - new_blocks: &BlocksData, - buffer: bool, - ) -> bool { - let (remote_neighbor_key, remote_is_authenticated) = match self.peers.get(&event_id) { - Some(convo) => (convo.to_neighbor_key(), convo.is_authenticated()), - None => { - test_debug!( - "{:?}: No such neighbor event={}", - &self.local_peer, - event_id - ); - return false; - } - }; - - if !remote_is_authenticated { - // drop -- a correct peer will have authenticated before sending this message - test_debug!( - "{:?}: Drop unauthenticated BlocksData from {:?}", - &self.local_peer, - &remote_neighbor_key - ); - return false; - } - - let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); - - debug!( - "{:?}: Process BlocksData from {:?} with {} entries", - &self.local_peer, - outbound_neighbor_key_opt - .as_ref() - .unwrap_or(&remote_neighbor_key), - new_blocks.blocks.len() - ); - - let mut to_buffer = false; - - for BlocksDatum(consensus_hash, block) in new_blocks.blocks.iter() { - let sn = match SortitionDB::get_block_snapshot_consensus( - &sortdb.conn(), - &consensus_hash, - ) { - Ok(Some(sn)) => sn, - Ok(None) => { - if buffer { - debug!( - "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, - &consensus_hash, - &block.block_hash(), - StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block.block_hash() - ) - ); - to_buffer = true; - } else { - debug!( - "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, - &consensus_hash, - &block.block_hash(), - StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block.block_hash() - ) - ); - } - continue; - } - Err(e) => { - info!( - "{:?}: Failed to query block snapshot for {}: {:?}", - &self.local_peer, consensus_hash, &e - ); - continue; - } - }; - - if !sn.pox_valid { - info!( - "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", - &self.local_peer, consensus_hash - ); - continue; - } - - if sn.winning_stacks_block_hash != block.block_hash() { - info!( - "{:?}: Ignoring block {} -- winning block was {} (sortition: {})", - &self.local_peer, - block.block_hash(), - sn.winning_stacks_block_hash, - sn.sortition - ); - continue; - } - - // only bother updating the inventory for this event's peer if we have an outbound - // connection to it. - if let Some(outbound_neighbor_key) = outbound_neighbor_key_opt.as_ref() { - let _ = self.handle_unsolicited_inv_update_epoch2x( - sortdb, - event_id, - &outbound_neighbor_key, - &sn.consensus_hash, - false, - ); - } - } - - to_buffer - } - - /// Handle unsolicited MicroblocksData. - /// Returns whether or not to buffer (if buffer is true) - /// Returns whether or not to pass to the relayer (if buffer is false). - fn handle_unsolicited_MicroblocksData( - &mut self, - chainstate: &StacksChainState, - event_id: usize, - new_microblocks: &MicroblocksData, - buffer: bool, - ) -> bool { - let (remote_neighbor_key, remote_is_authenticated) = match self.peers.get(&event_id) { - Some(convo) => (convo.to_neighbor_key(), convo.is_authenticated()), - None => { - test_debug!( - "{:?}: No such neighbor event={}", - &self.local_peer, - event_id - ); - return false; - } - }; - - if !remote_is_authenticated { - // drop -- a correct peer will have authenticated before sending this message - test_debug!( - "{:?}: Drop unauthenticated MicroblocksData from {:?}", - &self.local_peer, - &remote_neighbor_key - ); - return false; - } - - let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); - - debug!( - "{:?}: Process MicroblocksData from {:?} for {} with {} entries", - &self.local_peer, - outbound_neighbor_key_opt - .as_ref() - .unwrap_or(&remote_neighbor_key), - &new_microblocks.index_anchor_block, - new_microblocks.microblocks.len() - ); - - // do we have the associated anchored block? - match chainstate.get_block_header_hashes(&new_microblocks.index_anchor_block) { - Ok(Some(_)) => { - // yup; can process now - debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.local_peer, &new_microblocks.index_anchor_block); - !buffer - } - Ok(None) => { - if buffer { - debug!( - "{:?}: Will buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block - ); - true - } else { - debug!( - "{:?}: Will not buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block - ); - false - } - } - Err(e) => { - warn!( - "{:?}: Failed to get header hashes for {:?}: {:?}", - &self.local_peer, &new_microblocks.index_anchor_block, &e - ); - false - } - } - } - - /// Returns (true, x) if we should buffer the message and try again - /// Returns (x, true) if the relayer should receive the message - fn handle_unsolicited_message( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - event_id: usize, - preamble: &Preamble, - payload: &StacksMessageType, - ibd: bool, - buffer: bool, - ) -> (bool, bool) { - match payload { - // Update our inv state for this peer, but only do so if we have an - // outbound connection to it and it's authenticated (we don't synchronize inv - // state with inbound peers). Since we will have received this message - // from an _inbound_ conversation, we need to find the reciprocal _outbound_ - // conversation and use _that_ conversation's neighbor key to identify - // which inventory we need to update. - StacksMessageType::BlocksAvailable(ref new_blocks) => { - let to_buffer = self.handle_unsolicited_BlocksAvailable( - sortdb, chainstate, event_id, new_blocks, ibd, buffer, - ); - (to_buffer, false) - } - StacksMessageType::MicroblocksAvailable(ref new_mblocks) => { - let to_buffer = self.handle_unsolicited_MicroblocksAvailable( - sortdb, - chainstate, - event_id, - new_mblocks, - ibd, - buffer, - ); - (to_buffer, false) - } - StacksMessageType::Blocks(ref new_blocks) => { - // update inv state for this peer - let to_buffer = - self.handle_unsolicited_BlocksData(sortdb, event_id, new_blocks, buffer); - - // forward to relayer for processing - (to_buffer, true) - } - StacksMessageType::Microblocks(ref new_mblocks) => { - let to_buffer = self.handle_unsolicited_MicroblocksData( - chainstate, - event_id, - new_mblocks, - buffer, - ); - - // only forward to the relayer if we don't need to buffer it. - (to_buffer, true) - } - StacksMessageType::StackerDBPushChunk(ref data) => { - match self.handle_unsolicited_StackerDBPushChunk(event_id, preamble, data) { - Ok(x) => { - // don't buffer, but do reject if invalid - (false, x) - } - Err(e) => { - info!( - "{:?}: failed to handle unsolicited {:?}: {:?}", - &self.local_peer, payload, &e - ); - (false, false) - } - } - } - _ => (false, true), - } - } - - /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. - /// Return messages that we couldn't handle here, but key them by neighbor, not event. - /// Drop invalid messages. - /// If buffer is true, then re-try handling this message once the burnchain view advances. - fn handle_unsolicited_messages( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - unsolicited: HashMap>, - ibd: bool, - buffer: bool, - ) -> HashMap> { - let mut unhandled: HashMap> = HashMap::new(); - for (event_id, messages) in unsolicited.into_iter() { - if messages.len() == 0 { - // no messages for this event - continue; - } - - let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { - convo.to_neighbor_key() - } else { - debug!( - "{:?}: No longer such neighbor event={}, dropping {} unsolicited messages", - &self.local_peer, - event_id, - messages.len() - ); - continue; - }; - - debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); - - for message in messages.into_iter() { - if !buffer { - debug!( - "{:?}: Re-try handling buffered message {} from {:?}", - &self.local_peer, - &message.payload.get_message_description(), - &neighbor_key - ); - } - let (to_buffer, relay) = self.handle_unsolicited_message( - sortdb, - chainstate, - event_id, - &message.preamble, - &message.payload, - ibd, - buffer, - ); - if buffer && to_buffer { - self.buffer_data_message(event_id, message); - } else if relay { - // forward to relayer for processing - debug!( - "{:?}: Will forward message {} from {:?} to relayer", - &self.local_peer, - &message.payload.get_message_description(), - &neighbor_key - ); - if let Some(msgs) = unhandled.get_mut(&neighbor_key) { - msgs.push(message); - } else { - unhandled.insert(neighbor_key.clone(), vec![message]); - } - } - } - } - unhandled - } - /// Find unauthenticated inbound conversations fn find_unauthenticated_inbound_convos(&self) -> Vec { let mut ret = vec![]; @@ -6047,6 +5308,7 @@ impl PeerNetwork { self.num_state_machine_passes, self.num_inv_sync_passes, self.num_downloader_passes, + self.peers.len(), self.chain_view.burn_block_height, self.chain_view.rc_consensus_hash.clone(), self.get_stacker_db_configs_owned(), @@ -6126,8 +5388,8 @@ mod test { use crate::net::atlas::*; use crate::net::codec::*; use crate::net::db::*; - use crate::net::relay::test::make_contract_tx; use crate::net::test::*; + use crate::net::tests::relay::epoch2x::make_contract_tx; use crate::net::*; use crate::util_lib::test::*; diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs new file mode 100644 index 0000000000..29d9009f6f --- /dev/null +++ b/stackslib/src/net/unsolicited.rs @@ -0,0 +1,1000 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; + +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash}; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{Error as ChainstateError, StacksBlockHeader}; +use crate::net::p2p::{PeerNetwork, PeerNetworkWorkState}; +use crate::net::{ + BlocksAvailableData, BlocksData, BlocksDatum, Error as NetError, MicroblocksData, + NakamotoBlocksData, NeighborKey, Preamble, StacksMessage, StacksMessageType, +}; + +/// This module contains all of the code needed to handle unsolicited messages -- that is, messages +/// that get pushed to us. These include: +/// +/// * BlocksAvailable (epoch 2.x) +/// * MicroblocksAvailable (epoch 2.x) +/// * BlocksData (epoch 2.x) +/// * NakamotoBlocksData (epoch 3.x) +/// +/// Normally, the PeerNetwork will attempt to validate each message and pass it to the Relayer via +/// a NetworkResult. However, some kinds of messages (such as these) cannot be always be +/// validated, because validation depends on chainstate data that is not yet available. For +/// example, if this node is behind the burnchain chain tip, it will be unable to verify blocks +/// pushed to it for sortitions that have yet to be processed locally. +/// +/// In the event that a message cannot be validated, the PeerNetwork will instead store these +/// messages internally (in `self.pending_messages`), and try to validate them again once the +/// burnchain view changes. +/// +/// Transactions are not considered here, but are handled separately with the mempool +/// synchronization state machine. + +impl PeerNetwork { + /// Check that the sender is authenticated. + /// Returns Some(remote sender address) if so + /// Returns None otherwise + fn check_peer_authenticated(&self, event_id: usize) -> Option { + let Some((remote_neighbor_key, remote_is_authenticated)) = self + .peers + .get(&event_id) + .map(|convo| (convo.to_neighbor_key(), convo.is_authenticated())) + else { + test_debug!( + "{:?}: No such neighbor event={}", + &self.local_peer, + event_id + ); + return None; + }; + + if !remote_is_authenticated { + // drop -- a correct peer will have authenticated before sending this message + test_debug!( + "{:?}: Unauthenticated neighbor {:?}", + &self.local_peer, + &remote_neighbor_key + ); + return None; + } + Some(remote_neighbor_key) + } + + /// Update a peer's inventory state to indicate that the given block is available. + /// If updated, return the sortition height of the bit in the inv that was set. + /// Only valid for epoch 2.x + fn handle_unsolicited_inv_update_epoch2x( + &mut self, + sortdb: &SortitionDB, + event_id: usize, + outbound_neighbor_key: &NeighborKey, + consensus_hash: &ConsensusHash, + microblocks: bool, + ) -> Result, NetError> { + let Some(inv) = self.inv_state.as_mut() else { + return Ok(None); + }; + + let res = if microblocks { + inv.set_microblocks_available( + &self.burnchain, + outbound_neighbor_key, + sortdb, + consensus_hash, + ) + } else { + inv.set_block_available( + &self.burnchain, + outbound_neighbor_key, + sortdb, + consensus_hash, + ) + }; + + let block_sortition_height = match res { + Ok(Some(block_height)) => block_height, + Ok(None) => { + debug!( + "{:?}: We already know the inventory state in {} for {}", + &self.local_peer, outbound_neighbor_key, consensus_hash + ); + return Ok(None); + } + Err(NetError::NotFoundError) => { + // is this remote node simply ahead of us? + if let Some(convo) = self.peers.get(&event_id) { + if self.chain_view.burn_block_height < convo.burnchain_tip_height { + debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.local_peer, consensus_hash, outbound_neighbor_key); + return Err(NetError::NotFoundError); + } + } + // not ahead of us -- it's a bad consensus hash + debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.local_peer, consensus_hash, outbound_neighbor_key); + return Ok(None); + } + Err(NetError::InvalidMessage) => { + // punish this peer + info!( + "Peer {:?} sent an invalid update for {}", + &outbound_neighbor_key, + if microblocks { + "streamed microblocks" + } else { + "blocks" + } + ); + self.bans.insert(event_id); + + if let Some(outbound_event_id) = self.events.get(&outbound_neighbor_key) { + self.bans.insert(*outbound_event_id); + } + return Ok(None); + } + Err(e) => { + warn!( + "Failed to update inv state for {:?}: {:?}", + &outbound_neighbor_key, &e + ); + return Ok(None); + } + }; + Ok(Some(block_sortition_height)) + } + + /// Buffer a message for re-processing once the burnchain view updates. + /// If there is no space for the message, then silently drop it. + fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) { + let Some(msgs) = self.pending_messages.get_mut(&event_id) else { + self.pending_messages.insert(event_id, vec![msg]); + debug!( + "{:?}: Event {} has 1 messages buffered", + &self.local_peer, event_id + ); + return; + }; + + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + let mut blocks_available = 0; + let mut microblocks_available = 0; + let mut blocks_data = 0; + let mut microblocks_data = 0; + let mut nakamoto_blocks_data = 0; + for msg in msgs.iter() { + match &msg.payload { + StacksMessageType::BlocksAvailable(_) => { + blocks_available += 1; + if blocks_available >= self.connection_opts.max_buffered_blocks_available { + debug!( + "{:?}: Drop BlocksAvailable from event {} -- already have {} buffered", + &self.local_peer, event_id, blocks_available + ); + return; + } + } + StacksMessageType::MicroblocksAvailable(_) => { + microblocks_available += 1; + if microblocks_available + >= self.connection_opts.max_buffered_microblocks_available + { + debug!( + "{:?}: Drop MicroblocksAvailable from event {} -- already have {} buffered", + &self.local_peer, event_id, microblocks_available + ); + return; + } + } + StacksMessageType::Blocks(_) => { + blocks_data += 1; + if blocks_data >= self.connection_opts.max_buffered_blocks { + debug!( + "{:?}: Drop BlocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, blocks_data + ); + return; + } + } + StacksMessageType::Microblocks(_) => { + microblocks_data += 1; + if microblocks_data >= self.connection_opts.max_buffered_microblocks { + debug!( + "{:?}: Drop MicroblocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, microblocks_data + ); + return; + } + } + StacksMessageType::NakamotoBlocks(_) => { + nakamoto_blocks_data += 1; + if nakamoto_blocks_data >= self.connection_opts.max_buffered_nakamoto_blocks { + debug!( + "{:?}: Drop NakamotoBlocksData from event {} -- already have {} buffered", + &self.local_peer, event_id, nakamoto_blocks_data + ); + return; + } + } + _ => {} + } + } + + msgs.push(msg); + debug!( + "{:?}: Event {} has {} messages buffered", + &self.local_peer, + event_id, + msgs.len() + ); + } + + /// Do we need a block or microblock stream, given its sortition's consensus hash? + fn need_block_or_microblock_stream( + sortdb: &SortitionDB, + chainstate: &StacksChainState, + consensus_hash: &ConsensusHash, + is_microblock: bool, + ) -> Result { + let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &consensus_hash)? + .ok_or(ChainstateError::NoSuchBlockError)?; + let block_hash_opt = if sn.sortition { + Some(sn.winning_stacks_block_hash) + } else { + None + }; + + let inv = chainstate.get_blocks_inventory(&[(consensus_hash.clone(), block_hash_opt)])?; + if is_microblock { + // checking for microblock absence + Ok(inv.microblocks_bitvec[0] == 0) + } else { + // checking for block absence + Ok(inv.block_bitvec[0] == 0) + } + } + + /// Handle unsolicited BlocksAvailable. If it is valid, and it represents a block that this + /// peer does not have, then hint to the epoch2x downloader that it needs to go and fetch it. + /// Also, update this peer's copy of the remote sender's inv to indicate that it has the block, + /// so the downloader can eventually request the block regardless of whether or not the hint is + /// effective. + /// + /// This function only accepts BlocksAvailable messages from outbound peers, since we only + /// track inventories for outbound peers. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the blocks' validity will be checked against the + /// sortition DB, and if they correspond to real sortitions, then the remote peer's inventory + /// will be updated and the local peer's downloader will be alerted to this block. + /// + /// Errors pertaining to the validity of the message are logged but not returned. + fn handle_unsolicited_BlocksAvailable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + new_blocks: &BlocksAvailableData, + ibd: bool, + buffer: bool, + ) -> bool { + let Some(outbound_neighbor_key) = self.find_outbound_neighbor(event_id) else { + // we only accept BlocksAvailable from outbound peers, since we only crawl invs from + // outbound peers. + return false; + }; + + debug!( + "{:?}: Process BlocksAvailable from {:?} with {} entries", + &self.local_peer, + &outbound_neighbor_key, + new_blocks.available.len() + ); + + let mut to_buffer = false; + for (consensus_hash, block_hash) in new_blocks.available.iter() { + let block_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + consensus_hash, + false, + ) { + Ok(Some(bsh)) => bsh, + Ok(None) => { + continue; + } + Err(NetError::NotFoundError) => { + if buffer { + debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + to_buffer = true; + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", + &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + ); + continue; + } + }; + + let need_block = match PeerNetwork::need_block_or_microblock_stream( + sortdb, + chainstate, + &consensus_hash, + false, + ) { + Ok(x) => x, + Err(e) => { + warn!( + "Failed to determine if we need block for consensus hash {}: {:?}", + &consensus_hash, &e + ); + false + } + }; + + debug!( + "Need block {}/{}? {}", + &consensus_hash, &block_hash, need_block + ); + + if need_block { + // have the downloader request this block if it's new and we don't have it + match self.block_downloader { + Some(ref mut downloader) => { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); + + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); + } + self.have_data_to_download = true; + } + None => {} + } + } + } + + to_buffer + } + + /// Handle unsolicited MicroblocksAvailable. If it is valid, and it represents a microblock stream that this + /// peer does not have, then hint to the epoch2x downloader that it needs to go and fetch it. + /// Also, update this peer's copy of the remote sender's inv to indicate that it has the stream, + /// so the downloader can eventually request the stream regardless of whether or not the hint is + /// effective. + /// + /// This function only accepts MicroblocksAvailable messages from outbound peers, since we only + /// track inventories for outbound peers. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the remote peer's inventory will be updated and + /// the local peer's downloader will be alerted to the presence of these microblocks. + /// + /// Errors pertaining to the validity of the message are logged but not returned. + /// + /// Return whether or not we need to buffer this message for subsequent consideration. + fn handle_unsolicited_MicroblocksAvailable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + new_mblocks: &BlocksAvailableData, + ibd: bool, + buffer: bool, + ) -> bool { + let Some(outbound_neighbor_key) = self.find_outbound_neighbor(event_id) else { + return false; + }; + + debug!( + "{:?}: Process MicroblocksAvailable from {:?} with {} entries", + &self.local_peer, + outbound_neighbor_key, + new_mblocks.available.len() + ); + + let mut to_buffer = false; + for (consensus_hash, block_hash) in new_mblocks.available.iter() { + let mblock_sortition_height = match self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + consensus_hash, + true, + ) { + Ok(Some(bsh)) => bsh, + Ok(None) => { + continue; + } + Err(NetError::NotFoundError) => { + if buffer { + debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + to_buffer = true; + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {:?}: {:?}", + &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + ); + continue; + } + }; + + let need_microblock_stream = match PeerNetwork::need_block_or_microblock_stream( + sortdb, + chainstate, + &consensus_hash, + true, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to determine if we need microblock stream for consensus hash {}: {:?}", &consensus_hash, &e); + false + } + }; + + debug!( + "Need microblock stream {}/{}? {}", + &consensus_hash, &block_hash, need_microblock_stream + ); + + if need_microblock_stream { + // have the downloader request this microblock stream if it's new to us + if let Some(downloader) = self.block_downloader.as_mut() { + downloader.hint_microblock_sortition_height_available( + mblock_sortition_height, + ibd, + need_microblock_stream, + ); + + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); + } + self.have_data_to_download = true; + } + } + } + to_buffer + } + + /// Handle unsolicited BlocksData. + /// + /// Don't (yet) validate the data, but do update our inv for the peer that sent it, if we have + /// an outbound connection to that peer. + /// + /// Log but do nothing with errors in validation. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the block will be checked against the local + /// sortition DB, and if it corresponds to a sortition, the remote peer's inventory will be + /// updated to reflect that it has it. + /// + /// Returns true if we have to buffer this message; false if not. + fn handle_unsolicited_BlocksData( + &mut self, + sortdb: &SortitionDB, + event_id: usize, + new_blocks: &BlocksData, + buffer: bool, + ) -> bool { + let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); + + debug!( + "{:?}: Process BlocksData from {:?} with {} entries", + &self.local_peer, + outbound_neighbor_key_opt + .clone() + .or_else(|| { self.check_peer_authenticated(event_id) }), + new_blocks.blocks.len() + ); + + let mut to_buffer = false; + + for BlocksDatum(consensus_hash, block) in new_blocks.blocks.iter() { + let sn = match SortitionDB::get_block_snapshot_consensus( + &sortdb.conn(), + &consensus_hash, + ) { + Ok(Some(sn)) => sn, + Ok(None) => { + if buffer { + debug!( + "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", + &self.local_peer, + &consensus_hash, + &block.block_hash(), + StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash() + ) + ); + to_buffer = true; + } else { + debug!( + "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", + &self.local_peer, + &consensus_hash, + &block.block_hash(), + StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &block.block_hash() + ) + ); + } + continue; + } + Err(e) => { + info!( + "{:?}: Failed to query block snapshot for {}: {:?}", + &self.local_peer, consensus_hash, &e + ); + continue; + } + }; + + if !sn.pox_valid { + info!( + "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", + &self.local_peer, consensus_hash + ); + continue; + } + + if sn.winning_stacks_block_hash != block.block_hash() { + info!( + "{:?}: Ignoring block {} -- winning block was {} (sortition: {})", + &self.local_peer, + block.block_hash(), + sn.winning_stacks_block_hash, + sn.sortition + ); + continue; + } + + // only bother updating the inventory for this event's peer if we have an outbound + // connection to it. + if let Some(outbound_neighbor_key) = outbound_neighbor_key_opt.as_ref() { + let _ = self.handle_unsolicited_inv_update_epoch2x( + sortdb, + event_id, + &outbound_neighbor_key, + &sn.consensus_hash, + false, + ); + } + } + + to_buffer + } + + /// Handle unsolicited MicroblocksData. + /// + /// Don't (yet) validate the data; just verify that it connects to two existing StacksBlocks, + /// and if so, keep it to be passed on to the relayer. + /// + /// Log but do nothing with errors in validation. + /// + /// The caller can call this in one of two ways: with `buffer` set to `true` or `false`. If + /// `buffer` is `true`, then the caller is asking to know if the message can be buffered if it + /// cannot be handled. If it is instead `false`, then the caller is asking to simply try and + /// handle the given message. In both cases, the microblocks will be checked against the local + /// sortition DB and chainstate DB, and if they correspond to a missing stream between two known + /// StacksBlocks, the remote peer's inventory will be updated to reflect that it has this + /// stream. + /// + /// Returns whether or not to buffer. If the microblocks correspond to existing chain state, + /// then this method will indicate to the opposite of `buffer`, which ensures that the messages + /// will never be buffered but instead processed immediately. Otherwise, no buffering will + /// take place. + fn handle_unsolicited_MicroblocksData( + &mut self, + chainstate: &StacksChainState, + event_id: usize, + new_microblocks: &MicroblocksData, + buffer: bool, + ) -> bool { + let outbound_neighbor_key_opt = self.find_outbound_neighbor(event_id); + + debug!( + "{:?}: Process MicroblocksData from {:?} for {} with {} entries", + &self.local_peer, + outbound_neighbor_key_opt.or_else(|| { self.check_peer_authenticated(event_id) }), + &new_microblocks.index_anchor_block, + new_microblocks.microblocks.len() + ); + + // do we have the associated anchored block? + match chainstate.get_block_header_hashes(&new_microblocks.index_anchor_block) { + Ok(Some(_)) => { + // yup; can process now + debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.local_peer, &new_microblocks.index_anchor_block); + !buffer + } + Ok(None) => { + if buffer { + debug!( + "{:?}: Will buffer unsolicited MicroblocksData({})", + &self.local_peer, &new_microblocks.index_anchor_block + ); + true + } else { + debug!( + "{:?}: Will not buffer unsolicited MicroblocksData({})", + &self.local_peer, &new_microblocks.index_anchor_block + ); + false + } + } + Err(e) => { + warn!( + "{:?}: Failed to get header hashes for {:?}: {:?}", + &self.local_peer, &new_microblocks.index_anchor_block, &e + ); + false + } + } + } + + /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially + /// buffer + pub(crate) fn is_nakamoto_block_bufferable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + nakamoto_block: &NakamotoBlock, + ) -> bool { + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block(&nakamoto_block.block_id()) + .unwrap_or(false) + { + debug!( + "{:?}: Aleady have Nakamoto block {}", + &self.local_peer, + &nakamoto_block.block_id() + ); + return false; + } + + let mut can_process = true; + let sn = match SortitionDB::get_block_snapshot_consensus( + &sortdb.conn(), + &nakamoto_block.header.consensus_hash, + ) { + Ok(Some(sn)) => sn, + Ok(None) => { + debug!( + "No sortition {} for block {}", + &nakamoto_block.header.consensus_hash, + &nakamoto_block.block_id() + ); + // we don't have the sortition for this, so we can't process it yet (i.e. we need + // to buffer) + can_process = false; + // load the tip so we can load the current reward set data + self.burnchain_tip.clone() + } + Err(e) => { + info!( + "{:?}: Failed to query block snapshot for {}: {:?}", + &self.local_peer, &nakamoto_block.header.consensus_hash, &e + ); + return false; + } + }; + + if !sn.pox_valid { + info!( + "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", + &self.local_peer, &nakamoto_block.header.consensus_hash + ); + return false; + } + + // block must be signed by reward set signers + // TODO + + // the block is well-formed, but we'd buffer if we can't process it yet + !can_process + } + + /// Handle an unsolicited NakamotoBlocksData message. + /// + /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place. + /// This is because a 1-bit indicates the _entire_ tenure is present for a given sortition, and + /// this is usually impossible to tell here. Instead, this handler will return `true` if the + /// sortition identified by the block's consensus hash is known to this node (in which case, + /// the relayer can store it to staging). + /// + /// Returns true if this message should be buffered and re-processed + pub(crate) fn inner_handle_unsolicited_NakamotoBlocksData( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + remote_neighbor_key_opt: Option, + nakamoto_blocks: &NakamotoBlocksData, + ) -> bool { + debug!( + "{:?}: Process NakamotoBlocksData from {:?} with {} entries", + &self.local_peer, + &remote_neighbor_key_opt, + nakamoto_blocks.blocks.len() + ); + + let mut to_buffer = false; + for nakamoto_block in nakamoto_blocks.blocks.iter() { + if self.is_nakamoto_block_bufferable(sortdb, chainstate, nakamoto_block) { + debug!( + "{:?}: Will buffer unsolicited NakamotoBlocksData({}) ({})", + &self.local_peer, + &nakamoto_block.block_id(), + &nakamoto_block.header.consensus_hash, + ); + to_buffer = true; + }; + } + to_buffer + } + + /// Handle an unsolicited NakamotoBlocksData message. + /// + /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place. + /// This is because a 1-bit indicates the _entire_ tenure is present for a given sortition, and + /// this is usually impossible to tell here. Instead, this handler will return `true` if the + /// sortition identified by the block's consensus hash is known to this node (in which case, + /// the relayer can store it to staging). + /// + /// Returns true if this message should be buffered and re-processed + /// + /// Wraps inner_handle_unsolicited_NakamotoBlocksData by resolving the event_id to the optional + /// neighbor key. + fn handle_unsolicited_NakamotoBlocksData( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + nakamoto_blocks: &NakamotoBlocksData, + ) -> bool { + let outbound_neighbor_key_opt = self + .find_outbound_neighbor(event_id) + .or_else(|| self.check_peer_authenticated(event_id)); + self.inner_handle_unsolicited_NakamotoBlocksData( + sortdb, + chainstate, + outbound_neighbor_key_opt, + nakamoto_blocks, + ) + } + + /// Handle an unsolicited message, with either the intention of just processing it (in which + /// case, `buffer` will be `false`), or with the intention of not only processing it, but also + /// determining if it can be bufferred and retried later (in which case, `buffer` will be + /// `true`). + /// + /// Returns (true, x) if we should buffer the message and try processing it again later. + /// Returns (false, x) if we should *not* buffer this message, because it *won't* be valid + /// later. + /// + /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. + /// Returns (x, false) if we should *not* forward the message to the relayer, because it will + /// *not* be processed. + fn handle_unsolicited_message( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + event_id: usize, + preamble: &Preamble, + payload: &StacksMessageType, + ibd: bool, + buffer: bool, + ) -> (bool, bool) { + match payload { + // Update our inv state for this peer, but only do so if we have an + // outbound connection to it and it's authenticated (we don't synchronize inv + // state with inbound peers). Since we will have received this message + // from an _inbound_ conversation, we need to find the reciprocal _outbound_ + // conversation and use _that_ conversation's neighbor key to identify + // which inventory we need to update. + StacksMessageType::BlocksAvailable(ref new_blocks) => { + // no need to forward to relayer + let to_buffer = self.handle_unsolicited_BlocksAvailable( + sortdb, chainstate, event_id, new_blocks, ibd, buffer, + ); + (to_buffer, false) + } + StacksMessageType::MicroblocksAvailable(ref new_mblocks) => { + // no need to forward to relayer + let to_buffer = self.handle_unsolicited_MicroblocksAvailable( + sortdb, + chainstate, + event_id, + new_mblocks, + ibd, + buffer, + ); + (to_buffer, false) + } + StacksMessageType::Blocks(ref new_blocks) => { + // update inv state for this peer, and always forward to the relayer + let to_buffer = + self.handle_unsolicited_BlocksData(sortdb, event_id, new_blocks, buffer); + + // forward to relayer for processing + (to_buffer, true) + } + StacksMessageType::Microblocks(ref new_mblocks) => { + // update inv state for this peer, and optionally forward to the relayer. + // Note that if these microblocks can be processed *now*, then they *will not* be + // buffered + let to_buffer = self.handle_unsolicited_MicroblocksData( + chainstate, + event_id, + new_mblocks, + buffer, + ); + + // only forward to the relayer if we don't need to buffer it. + (to_buffer, true) + } + StacksMessageType::NakamotoBlocks(ref new_blocks) => { + let to_buffer = if buffer { + self.handle_unsolicited_NakamotoBlocksData( + sortdb, chainstate, event_id, new_blocks, + ) + } else { + // nothing to do if we're not querying about whether we can buffer this. + false + }; + + (to_buffer, true) + } + StacksMessageType::StackerDBPushChunk(ref data) => { + match self.handle_unsolicited_StackerDBPushChunk(event_id, preamble, data) { + Ok(x) => { + // don't buffer, but do reject if invalid + (false, x) + } + Err(e) => { + info!( + "{:?}: failed to handle unsolicited {:?}: {:?}", + &self.local_peer, payload, &e + ); + (false, false) + } + } + } + _ => (false, true), + } + } + + /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. + /// Return messages that we couldn't handle here, but key them by neighbor, not event, so the + /// relayer can do something useful with them. + /// + /// Invalid messages are dropped silently, with an log message. + /// + /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent + /// call if the handler for it deems the message valid. + /// + /// If `buffer` is false, then if the message handler deems the message valid, it will be + /// forwraded to the relayer. + pub fn handle_unsolicited_messages( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + unsolicited: HashMap>, + ibd: bool, + buffer: bool, + ) -> HashMap> { + let mut unhandled: HashMap> = HashMap::new(); + for (event_id, messages) in unsolicited.into_iter() { + if messages.len() == 0 { + // no messages for this event + continue; + } + if buffer && self.check_peer_authenticated(event_id).is_none() { + if cfg!(test) + && self + .connection_opts + .test_disable_unsolicited_message_authentication + { + test_debug!( + "{:?}: skip unsolicited message authentication", + &self.local_peer + ); + } else { + // do not buffer messages from unknown peers + // (but it's fine to process messages that were previosuly buffered, since the peer + // may have since disconnected) + debug!("Will not handle unsolicited messages from unauthenticated or dead event {}", event_id); + continue; + } + }; + + let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { + convo.to_neighbor_key() + } else { + debug!( + "{:?}: No longer such neighbor event={}, dropping {} unsolicited messages", + &self.local_peer, + event_id, + messages.len() + ); + continue; + }; + + debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); + + for message in messages.into_iter() { + if !buffer { + debug!( + "{:?}: Re-try handling buffered message {} from {:?}", + &self.local_peer, + &message.payload.get_message_description(), + &neighbor_key + ); + } + let (to_buffer, relay) = self.handle_unsolicited_message( + sortdb, + chainstate, + event_id, + &message.preamble, + &message.payload, + ibd, + buffer, + ); + if buffer && to_buffer { + self.buffer_data_message(event_id, message); + } else if relay { + // forward to relayer for processing + debug!( + "{:?}: Will forward message {} from {:?} to relayer", + &self.local_peer, + &message.payload.get_message_description(), + &neighbor_key + ); + if let Some(msgs) = unhandled.get_mut(&neighbor_key) { + msgs.push(message); + } else { + unhandled.insert(neighbor_key.clone(), vec![message]); + } + } + } + } + unhandled + } +} From f6dd43cc9b56f13391fa26db6ddb9bfb1bd80741 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:22:06 -0400 Subject: [PATCH 0262/1400] feat: nakamoto block-push logic for handling newly-received Nakamoto blocks. Also, removes tests that are now in net/tests/relay/epoch2x.rs --- stackslib/src/net/relay.rs | 4343 +++++------------------------------- 1 file changed, 563 insertions(+), 3780 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7236ef76e4..f8c5fae144 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -67,6 +67,8 @@ pub const RELAY_DUPLICATE_INFERENCE_WARMUP: usize = 128; pub struct Relayer { /// Connection to the p2p thread p2p: NetworkHandle, + /// connection options + connection_opts: ConnectionOptions, /// StackerDB connection stacker_dbs: StackerDBs, } @@ -77,12 +79,12 @@ pub struct RelayerStats { /// Note that we key on (addr, port), not the full NeighborAddress. /// (TODO: Nothing is done with this yet, but one day we'll use it to probe for network /// choke-points). - relay_stats: HashMap, - relay_updates: BTreeMap, + pub(crate) relay_stats: HashMap, + pub(crate) relay_updates: BTreeMap, /// Messages sent from each neighbor recently (includes duplicates) - recent_messages: HashMap>, - recent_updates: BTreeMap, + pub(crate) recent_messages: HashMap>, + pub(crate) recent_updates: BTreeMap, next_priority: u64, } @@ -93,6 +95,7 @@ pub struct ProcessedNetReceipts { pub num_new_blocks: u64, pub num_new_confirmed_microblocks: u64, pub num_new_unconfirmed_microblocks: u64, + pub num_new_nakamoto_blocks: u64, } /// A trait for implementing both mempool event observer methods and stackerdb methods. @@ -170,6 +173,16 @@ impl RelayPayload for StacksMicroblock { } } +impl RelayPayload for NakamotoBlock { + fn get_digest(&self) -> Sha512Trunc256Sum { + let h = self.block_id(); + Sha512Trunc256Sum(h.0) + } + fn get_id(&self) -> String { + format!("NakamotoBlock({})", self.block_id()) + } +} + impl RelayPayload for StacksTransaction { fn get_digest(&self) -> Sha512Trunc256Sum { let h = self.txid(); @@ -317,7 +330,7 @@ impl RelayerStats { } /// Map neighbors to the frequency of their AS numbers in the given neighbors list - fn count_ASNs( + pub(crate) fn count_ASNs( conn: &DBConn, neighbors: &[NeighborKey], ) -> Result, net_error> { @@ -442,7 +455,7 @@ impl RelayerStats { } for l in 0..count { - if norm <= 1 { + if norm == 0 { // just one option break; } @@ -461,8 +474,8 @@ impl RelayerStats { sampled += 1; // sample without replacement - rankings_vec[i].1 -= 1; - norm -= 1; + norm -= rankings_vec[i].1; + rankings_vec[i].1 = 0; break; } } @@ -475,19 +488,24 @@ impl RelayerStats { } impl Relayer { - pub fn new(handle: NetworkHandle, stacker_dbs: StackerDBs) -> Relayer { + pub fn new( + handle: NetworkHandle, + connection_opts: ConnectionOptions, + stacker_dbs: StackerDBs, + ) -> Relayer { Relayer { p2p: handle, + connection_opts, stacker_dbs, } } pub fn from_p2p(network: &mut PeerNetwork, stacker_dbs: StackerDBs) -> Relayer { let handle = network.new_handle(1024); - Relayer::new(handle, stacker_dbs) + Relayer::new(handle, network.connection_opts.clone(), stacker_dbs) } - /// Given blocks pushed to us, verify that they correspond to expected block data. + /// Given Stacks 2.x blocks pushed to us, verify that they correspond to expected block data. pub fn validate_blocks_push( conn: &SortitionDBConn, blocks_data: &BlocksData, @@ -518,10 +536,41 @@ impl Relayer { "No such sortition in block with consensus hash {}", consensus_hash ); + return Err(net_error::InvalidMessage); + } + } + Ok(()) + } + + /// Given Nakamoto blocks pushed to us, verify that they correspond to expected block data. + pub fn validate_nakamoto_blocks_push( + conn: &SortitionDBConn, + nakamoto_blocks_data: &NakamotoBlocksData, + ) -> Result<(), net_error> { + for nakamoto_block in nakamoto_blocks_data.blocks.iter() { + // is this the right Stacks block for this sortition? + let Some(sn) = SortitionDB::get_block_snapshot_consensus( + conn.conn(), + &nakamoto_block.header.consensus_hash, + )? + else { + // don't know this sortition yet + continue; + }; + + if !sn.pox_valid { + info!( + "Pushed block from consensus hash {} corresponds to invalid PoX state", + nakamoto_block.header.consensus_hash + ); + continue; + } - // TODO: once PoX is implemented, this can be permitted if we're missing the reward - // window's anchor block for the reward window in which this block lives. Until - // then, it's never okay -- this peer shall be considered broken. + if !sn.sortition { + info!( + "No such sortition in block with consensus hash {}", + &nakamoto_block.header.consensus_hash + ); return Err(net_error::InvalidMessage); } } @@ -668,7 +717,16 @@ impl Relayer { // do we have this block? don't lock the DB needlessly if so. if chainstate .nakamoto_blocks_db() - .has_nakamoto_block(&block.header.block_id())? + .has_nakamoto_block(&block.header.block_id()) + .map_err(|e| { + debug!( + "Failed to determine if we have Nakamoto block {}/{}: {:?}", + &block.header.consensus_hash, + &block.header.block_hash(), + &e + ); + e + })? { debug!("Already have Nakamoto block {}", &block.header.block_id()); return Ok(false); @@ -676,7 +734,13 @@ impl Relayer { let block_sn = SortitionDB::get_block_snapshot_consensus(sort_handle, &block.header.consensus_hash)? - .ok_or(chainstate_error::DBError(db_error::NotFoundError))?; + .ok_or_else(|| { + debug!( + "Failed to load snapshot for consensus hash {}", + &block.header.consensus_hash + ); + chainstate_error::DBError(db_error::NotFoundError) + })?; // NOTE: it's `+ 1` because the first Nakamoto block is built atop the last epoch 2.x // tenure, right after the last 2.x sortition @@ -757,14 +821,16 @@ impl Relayer { Ok(accepted) } - /// Process nakamoto blocks. + /// Process nakamoto blocks that we downloaded. /// Log errors but do not return them. - pub fn process_nakamoto_blocks( + /// Returns the list of blocks we accepted. + pub fn process_downloaded_nakamoto_blocks( sortdb: &SortitionDB, chainstate: &mut StacksChainState, blocks: impl Iterator, coord_comms: Option<&CoordinatorChannels>, - ) -> Result<(), chainstate_error> { + ) -> Result, chainstate_error> { + let mut accepted = vec![]; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let mut sort_handle = sortdb.index_handle(&tip.sortition_id); for block in blocks { @@ -773,13 +839,15 @@ impl Relayer { sortdb, &mut sort_handle, chainstate, - block, + block.clone(), coord_comms, ) { warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); + } else { + accepted.push(block); } } - Ok(()) + Ok(accepted) } /// Coalesce a set of microblocks into relayer hints and MicroblocksData messages, as calculated by @@ -1343,6 +1411,91 @@ impl Relayer { Ok((mblock_datas, bad_neighbors)) } + /// Preprocess all pushed Nakamoto blocks + /// Return the Nakamoto blocks we can accept (and who relayed them), as well as the + /// list of peers that served us invalid data. + pub(crate) fn process_pushed_nakamoto_blocks( + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + coord_comms: Option<&CoordinatorChannels>, + ) -> Result<(Vec<(Vec, Vec)>, Vec), net_error> { + let mut new_blocks_and_relayers = vec![]; + let mut bad_neighbors = vec![]; + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + + // process Nakamoto blocks pushed to us. + // If a neighbor sends us an invalid Nakamoto block, then ban them. + for (neighbor_key, relayers_and_block_data) in network_result.pushed_nakamoto_blocks.iter() + { + for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter() { + let mut good = true; + let mut accepted_blocks = vec![]; + if let Err(_e) = Relayer::validate_nakamoto_blocks_push( + &sortdb.index_conn(), + nakamoto_blocks_data, + ) { + // punish this peer + bad_neighbors.push((*neighbor_key).clone()); + good = false; + } + + for nakamoto_block in nakamoto_blocks_data.blocks.iter() { + if !good { + break; + } + let block_id = nakamoto_block.block_id(); + debug!( + "Received pushed Nakamoto block {} from {}", + block_id, neighbor_key + ); + let mut sort_handle = sortdb.index_handle(&tip.sortition_id); + match Self::process_new_nakamoto_block( + sortdb, + &mut sort_handle, + chainstate, + nakamoto_block.clone(), + coord_comms, + ) { + Ok(accepted) => { + if accepted { + debug!( + "Accepted Nakamoto block {} from {}", + &block_id, neighbor_key + ); + accepted_blocks.push(nakamoto_block.clone()); + } else { + debug!( + "Rejected Nakamoto block {} from {}", + &block_id, &neighbor_key, + ); + } + } + Err(chainstate_error::InvalidStacksBlock(msg)) => { + warn!("Invalid pushed Nakamoto block {}: {}", &block_id, msg); + bad_neighbors.push((*neighbor_key).clone()); + good = false; + break; + } + Err(e) => { + warn!( + "Could not process pushed Nakamoto block {}: {:?}", + &block_id, &e + ); + good = false; + break; + } + } + } + if good && accepted_blocks.len() > 0 { + new_blocks_and_relayers.push((relayers.clone(), accepted_blocks)); + } + } + } + + Ok((new_blocks_and_relayers, bad_neighbors)) + } + /// Verify that a relayed transaction is not problematic. This is a static check -- we only /// look at the tx contents. /// @@ -1634,6 +1787,55 @@ impl Relayer { )) } + /// Process new Nakamoto blocks, both pushed and downloaded. + /// Returns the list of Nakamoto blocks we stored, as well as the list of bad neighbors that + /// sent us invalid blocks. + pub fn process_new_nakamoto_blocks( + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + coord_comms: Option<&CoordinatorChannels>, + ) -> Result<(Vec<(Vec, Vec)>, Vec), net_error> { + // process downloaded Nakamoto blocks. + // We treat them as singleton blocks fetched via zero relayers + let nakamoto_blocks = + std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); + let mut accepted_nakamoto_blocks_and_relayers = + match Self::process_downloaded_nakamoto_blocks( + sortdb, + chainstate, + nakamoto_blocks.into_values(), + coord_comms, + ) { + Ok(accepted) => accepted + .into_iter() + .map(|block| (vec![], vec![block])) + .collect(), + Err(e) => { + warn!("Failed to process downloaded Nakamoto blocks: {:?}", &e); + vec![] + } + }; + + // process pushed Nakamoto blocks + let (mut pushed_blocks_and_relayers, bad_neighbors) = + match Self::process_pushed_nakamoto_blocks( + network_result, + sortdb, + chainstate, + coord_comms, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to process pushed Nakamoot blocks: {:?}", &e); + (vec![], vec![]) + } + }; + + accepted_nakamoto_blocks_and_relayers.append(&mut pushed_blocks_and_relayers); + Ok((accepted_nakamoto_blocks_and_relayers, bad_neighbors)) + } + /// Produce blocks-available messages from blocks we just got. pub fn load_blocks_available_data( sortdb: &SortitionDB, @@ -1717,7 +1919,7 @@ impl Relayer { /// Store all new transactions we received, and return the list of transactions that we need to /// forward (as well as their relay hints). Also, garbage-collect the mempool. - fn process_transactions( + pub(crate) fn process_transactions( network_result: &mut NetworkResult, sortdb: &SortitionDB, chainstate: &mut StacksChainState, @@ -2005,31 +2207,87 @@ impl Relayer { ) } - /// Given a network result, consume and store all data. - /// * Add all blocks and microblocks to staging. - /// * Forward BlocksAvailable messages to neighbors for newly-discovered anchored blocks - /// * Forward MicroblocksAvailable messages to neighbors for newly-discovered confirmed microblock streams - /// * Forward along unconfirmed microblocks that we didn't already have - /// * Add all transactions to the mempool. - /// * Forward transactions we didn't already have. - /// * Reload the unconfirmed state, if necessary. - /// Mask errors from invalid data -- all errors due to invalid blocks and invalid data should be captured, and - /// turned into peer bans. - pub fn process_network_result( + /// Relay epoch2 block data + fn relay_epoch2_blocks( + &mut self, + _local_peer: &LocalPeer, + sortdb: &SortitionDB, + new_blocks: HashMap, + new_confirmed_microblocks: HashMap)>, + new_microblocks: Vec<(Vec, MicroblocksData)>, + ) { + // have the p2p thread tell our neighbors about newly-discovered blocks + let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); + let available = Relayer::load_blocks_available_data(sortdb, new_block_chs) + .unwrap_or(BlocksAvailableMap::new()); + if available.len() > 0 { + debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); + if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { + warn!("Failed to advertize new blocks: {:?}", &e); + } + } + + // have the p2p thread tell our neighbors about newly-discovered confirmed microblock streams + let new_mblock_chs = new_confirmed_microblocks + .iter() + .map(|(ch, _)| ch.clone()) + .collect(); + let mblocks_available = Relayer::load_blocks_available_data(sortdb, new_mblock_chs) + .unwrap_or(BlocksAvailableMap::new()); + if mblocks_available.len() > 0 { + debug!( + "{:?}: Confirmed microblock streams available: {}", + &_local_peer, + mblocks_available.len() + ); + if let Err(e) = self + .p2p + .advertize_microblocks(mblocks_available, new_confirmed_microblocks) + { + warn!("Failed to advertize new confirmed microblocks: {:?}", &e); + } + } + + // have the p2p thread forward all new unconfirmed microblocks + if new_microblocks.len() > 0 { + debug!( + "{:?}: Unconfirmed microblocks: {}", + &_local_peer, + new_microblocks.len() + ); + for (relayers, mblocks_msg) in new_microblocks.into_iter() { + debug!( + "{:?}: Send {} microblocks for {}", + &_local_peer, + mblocks_msg.microblocks.len(), + &mblocks_msg.index_anchor_block + ); + let msg = StacksMessageType::Microblocks(mblocks_msg); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast microblock: {:?}", &e); + } + } + } + } + + /// Process epoch2 block data. + /// Relays blocks and microblocks as needed + /// Returns (num new blocks, num new confirmed microblocks, num new unconfirmed microblocks) + fn process_new_epoch2_blocks( &mut self, _local_peer: &LocalPeer, network_result: &mut NetworkResult, sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, ibd: bool, coord_comms: Option<&CoordinatorChannels>, - event_observer: Option<&dyn RelayEventDispatcher>, - ) -> Result { + ) -> (u64, u64, u64) { let mut num_new_blocks = 0; let mut num_new_confirmed_microblocks = 0; let mut num_new_unconfirmed_microblocks = 0; - match Relayer::process_new_blocks(network_result, sortdb, chainstate, coord_comms) { + + // Process epoch2 data + match Self::process_new_blocks(network_result, sortdb, chainstate, coord_comms) { Ok((new_blocks, new_confirmed_microblocks, new_microblocks, bad_block_neighbors)) => { // report quantities of new data in the receipts num_new_blocks = new_blocks.len() as u64; @@ -2051,79 +2309,202 @@ impl Relayer { // only relay if not ibd if !ibd { - // have the p2p thread tell our neighbors about newly-discovered blocks - let new_block_chs = new_blocks.iter().map(|(ch, _)| ch.clone()).collect(); - let available = Relayer::load_blocks_available_data(sortdb, new_block_chs)?; - if available.len() > 0 { - debug!("{:?}: Blocks available: {}", &_local_peer, available.len()); - if let Err(e) = self.p2p.advertize_blocks(available, new_blocks) { - warn!("Failed to advertize new blocks: {:?}", &e); - } - } + self.relay_epoch2_blocks( + _local_peer, + sortdb, + new_blocks, + new_confirmed_microblocks, + new_microblocks, + ); + } + } + Err(e) => { + warn!("Failed to process new blocks: {:?}", &e); + } + } + ( + num_new_blocks, + num_new_confirmed_microblocks, + num_new_unconfirmed_microblocks, + ) + } - // have the p2p thread tell our neighbors about newly-discovered confirmed microblock streams - let new_mblock_chs = new_confirmed_microblocks - .iter() - .map(|(ch, _)| ch.clone()) - .collect(); - let mblocks_available = - Relayer::load_blocks_available_data(sortdb, new_mblock_chs)?; - if mblocks_available.len() > 0 { - debug!( - "{:?}: Confirmed microblock streams available: {}", - &_local_peer, - mblocks_available.len() + /// Get the last N sortitions, in order from the sortition tip to the n-1st ancestor + pub fn get_last_n_sortitions( + sortdb: &SortitionDB, + n: u64, + ) -> Result, chainstate_error> { + let mut ret = vec![]; + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + ret.push(sort_tip); + + for _i in 0..(n.saturating_sub(1)) { + let last_sn_parent_sortition_id = ret + .last() + .map(|sn| sn.parent_sortition_id.clone()) + .expect("Infallible -- ret is non-empty"); + let sn = SortitionDB::get_block_snapshot(sortdb.conn(), &last_sn_parent_sortition_id)? + .ok_or(db_error::NotFoundError)?; + ret.push(sn); + } + Ok(ret) + } + + /// Relay Nakamoto blocks. + /// By default, only sends them if we don't have them yet. + /// This can be overridden by setting `force_send` to true. + pub fn relay_epoch3_blocks( + &mut self, + _local_peer: &LocalPeer, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + nakamoto_blocks_and_relayers: Vec<(Vec, Vec)>, + force_send: bool, + ) { + debug!( + "{:?}: relay {} sets of Nakamoto blocks", + _local_peer, + nakamoto_blocks_and_relayers.len() + ); + + // the relay strategy is to only send blocks that are within + // `connection_opts.max_nakamoto_block_relay_age`, which is the number of + // burnchain sortitions that have happened since its tenure began. The + // intuition is that nodes that are in IBD will be downloading blocks anyway, + // but nodes that are at or near the chain tip would benefit from having blocks + // pushed to them. + let Ok(relay_sortitions) = + Self::get_last_n_sortitions(sortdb, self.connection_opts.max_nakamoto_block_relay_age) + .map_err(|e| warn!("Failed to load last N sortitions: {:?}", &e)) + else { + return; + }; + + let relay_tenures: HashSet<_> = relay_sortitions + .into_iter() + .map(|sn| sn.consensus_hash) + .collect(); + + for (relayers, blocks) in nakamoto_blocks_and_relayers.into_iter() { + let relay_blocks: Vec<_> = blocks + .into_iter() + .filter(|blk| { + // don't relay blocks for non-recent tenures + if !relay_tenures.contains(&blk.header.consensus_hash) { + test_debug!( + "Do not relay {} -- {} is not recent", + &blk.header.block_id(), + &blk.header.consensus_hash ); - if let Err(e) = self - .p2p - .advertize_microblocks(mblocks_available, new_confirmed_microblocks) - { - warn!("Failed to advertize new confirmed microblocks: {:?}", &e); - } + return false; + } + // don't relay blocks we already have. + // If we have a DB error in figuring this out, then don't relay by + // default (lest a faulty DB cause the node to spam the network). + if !force_send + && chainstate + .nakamoto_blocks_db() + .has_nakamoto_block(&blk.block_id()) + .unwrap_or(true) + { + return false; } + true + }) + .collect(); - // have the p2p thread forward all new unconfirmed microblocks - if new_microblocks.len() > 0 { - debug!( - "{:?}: Unconfirmed microblocks: {}", - &_local_peer, - new_microblocks.len() - ); - for (relayers, mblocks_msg) in new_microblocks.into_iter() { - debug!( - "{:?}: Send {} microblocks for {}", - &_local_peer, - mblocks_msg.microblocks.len(), - &mblocks_msg.index_anchor_block - ); - let msg = StacksMessageType::Microblocks(mblocks_msg); - if let Err(e) = self.p2p.broadcast_message(relayers, msg) { - warn!("Failed to broadcast microblock: {:?}", &e); - } - } + debug!( + "{:?}: Forward {} Nakamoto blocks from {:?}", + _local_peer, + relay_blocks.len(), + &relayers + ); + + if relay_blocks.len() == 0 { + continue; + } + + for _block in relay_blocks.iter() { + test_debug!( + "{:?}: Forward Nakamoto block {}/{}", + _local_peer, + &_block.header.consensus_hash, + &_block.header.block_hash() + ); + } + + let msg = StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: relay_blocks, + }); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); + } + } + } + + /// Process epoch3 data + /// Relay new nakamoto blocks if not in ibd + /// Returns number of new nakamoto blocks + pub fn process_new_epoch3_blocks( + &mut self, + _local_peer: &LocalPeer, + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + ibd: bool, + coord_comms: Option<&CoordinatorChannels>, + ) -> u64 { + let mut num_new_nakamoto_blocks = 0; + + match Self::process_new_nakamoto_blocks(network_result, sortdb, chainstate, coord_comms) { + Ok((nakamoto_blocks_and_relayers, bad_neighbors)) => { + num_new_nakamoto_blocks = nakamoto_blocks_and_relayers + .iter() + .fold(0, |acc, (_relayers, blocks)| acc + blocks.len()) + as u64; + + // punish bad peers + if bad_neighbors.len() > 0 { + debug!("{:?}: Ban {} peers", &_local_peer, bad_neighbors.len()); + if let Err(e) = self.p2p.ban_peers(bad_neighbors) { + warn!("Failed to ban bad-block peers: {:?}", &e); } } + + // relay if not IBD + if !ibd && nakamoto_blocks_and_relayers.len() > 0 { + self.relay_epoch3_blocks( + _local_peer, + sortdb, + chainstate, + nakamoto_blocks_and_relayers, + false, + ); + } } Err(e) => { - warn!("Failed to process new blocks: {:?}", &e); + warn!("Failed to process new Nakamoto blocks: {:?}", &e); } - }; - - let nakamoto_blocks = - std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); - if let Err(e) = Relayer::process_nakamoto_blocks( - sortdb, - chainstate, - nakamoto_blocks.into_values(), - coord_comms, - ) { - warn!("Failed to process Nakamoto blocks: {:?}", &e); } + num_new_nakamoto_blocks + } + /// Process new transactions + /// Returns the list of accepted txs + pub fn process_new_transactions( + &mut self, + _local_peer: &LocalPeer, + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + mempool: &mut MemPoolDB, + ibd: bool, + event_observer: Option<&dyn RelayEventDispatcher>, + ) -> Vec { + // process new transactions let mut mempool_txs_added = vec![]; - - // only care about transaction forwarding if not IBD if !ibd { + // only care about transaction forwarding if not IBD. // store all transactions, and forward the novel ones to neighbors test_debug!( "{:?}: Process {} transaction(s)", @@ -2136,7 +2517,8 @@ impl Relayer { chainstate, mempool, event_observer.map(|obs| obs.as_mempool_event_dispatcher()), - )?; + ) + .unwrap_or(vec![]); if new_txs.len() > 0 { debug!( @@ -2155,24 +2537,79 @@ impl Relayer { } } } + mempool_txs_added + } - let mut processed_unconfirmed_state = Default::default(); - - // finally, refresh the unconfirmed chainstate, if need be. - // only bother if we're not in IBD; otherwise this is a waste of time - if network_result.has_microblocks() && !ibd { - processed_unconfirmed_state = Relayer::refresh_unconfirmed(chainstate, sortdb); - } - - // push events for HTTP-uploaded stacker DB chunks - Relayer::process_uploaded_stackerdb_chunks( - mem::replace(&mut network_result.uploaded_stackerdb_chunks, vec![]), - event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), - ); - - // store downloaded stacker DB chunks - Relayer::process_stacker_db_chunks( - &mut self.stacker_dbs, + /// Given a network result, consume and store all data. + /// * Add all blocks and microblocks to staging. + /// * Forward BlocksAvailable messages to neighbors for newly-discovered anchored blocks + /// * Forward MicroblocksAvailable messages to neighbors for newly-discovered confirmed microblock streams + /// * Forward along unconfirmed microblocks that we didn't already have + /// * Add all transactions to the mempool. + /// * Forward transactions we didn't already have. + /// * Reload the unconfirmed state, if necessary. + /// Mask errors from invalid data -- all errors due to invalid blocks and invalid data should be captured, and + /// turned into peer bans. + pub fn process_network_result( + &mut self, + _local_peer: &LocalPeer, + network_result: &mut NetworkResult, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + mempool: &mut MemPoolDB, + ibd: bool, + coord_comms: Option<&CoordinatorChannels>, + event_observer: Option<&dyn RelayEventDispatcher>, + ) -> Result { + // process epoch2 data + let (num_new_blocks, num_new_confirmed_microblocks, num_new_unconfirmed_microblocks) = self + .process_new_epoch2_blocks( + _local_peer, + network_result, + sortdb, + chainstate, + ibd, + coord_comms, + ); + + // process epoch3 data + let num_new_nakamoto_blocks = self.process_new_epoch3_blocks( + _local_peer, + network_result, + sortdb, + chainstate, + ibd, + coord_comms, + ); + + // process transactions + let mempool_txs_added = self.process_new_transactions( + _local_peer, + network_result, + sortdb, + chainstate, + mempool, + ibd, + event_observer, + ); + + // finally, refresh the unconfirmed chainstate, if need be. + // only bother if we're not in IBD; otherwise this is a waste of time + let processed_unconfirmed_state = if network_result.has_microblocks() && !ibd { + Relayer::refresh_unconfirmed(chainstate, sortdb) + } else { + Default::default() + }; + + // push events for HTTP-uploaded stacker DB chunks + Relayer::process_uploaded_stackerdb_chunks( + mem::replace(&mut network_result.uploaded_stackerdb_chunks, vec![]), + event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), + ); + + // store downloaded stacker DB chunks + Relayer::process_stacker_db_chunks( + &mut self.stacker_dbs, &network_result.stacker_db_configs, mem::replace(&mut network_result.stacker_db_sync_results, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), @@ -2192,6 +2629,7 @@ impl Relayer { num_new_blocks, num_new_confirmed_microblocks, num_new_unconfirmed_microblocks, + num_new_nakamoto_blocks, }; Ok(receipts) @@ -2609,6 +3047,15 @@ impl PeerNetwork { } } + for (nk, nakamoto_data) in network_result.pushed_nakamoto_blocks.iter() { + for (_, nakamoto_msg) in nakamoto_data.iter() { + for nakamoto_block in nakamoto_msg.blocks.iter() { + self.relayer_stats + .add_relayed_message((*nk).clone(), nakamoto_block); + } + } + } + for (nk, txs) in network_result.pushed_transactions.iter() { for (_, tx) in txs.iter() { self.relayer_stats.add_relayed_message((*nk).clone(), tx); @@ -2618,3668 +3065,4 @@ impl PeerNetwork { } #[cfg(test)] -pub mod test { - use std::cell::RefCell; - use std::collections::HashMap; - - use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; - use clarity::vm::ast::ASTRules; - use clarity::vm::costs::LimitedCostTracker; - use clarity::vm::database::ClarityDatabase; - use clarity::vm::types::QualifiedContractIdentifier; - use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; - use stacks_common::address::AddressHashMode; - use stacks_common::types::chainstate::{ - BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash, - }; - use stacks_common::types::Address; - use stacks_common::util::hash::MerkleTree; - use stacks_common::util::sleep_ms; - use stacks_common::util::vrf::VRFProof; - - use super::*; - use crate::burnchains::tests::TestMiner; - use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; - use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; - use crate::chainstate::stacks::test::codec_all_transactions; - use crate::chainstate::stacks::tests::{ - make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, - make_user_stacks_transfer, - }; - use crate::chainstate::stacks::{Error as ChainstateError, *}; - use crate::clarity_vm::clarity::ClarityConnection; - use crate::core::*; - use crate::net::api::getinfo::RPCPeerInfoData; - use crate::net::asn::*; - use crate::net::chat::*; - use crate::net::codec::*; - use crate::net::download::*; - use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; - use crate::net::httpcore::StacksHttpMessage; - use crate::net::inv::inv2x::*; - use crate::net::test::*; - use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; - use crate::net::*; - use crate::util_lib::test::*; - - #[test] - fn test_relayer_stats_add_relyed_messages() { - let mut relay_stats = RelayerStats::new(); - - let all_transactions = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - assert!(all_transactions.len() > MAX_RECENT_MESSAGES); - - eprintln!("Test with {} transactions", all_transactions.len()); - - let nk = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - }; - - // never overflow recent messages for a neighbor - for (i, tx) in all_transactions.iter().enumerate() { - relay_stats.add_relayed_message(nk.clone(), tx); - - assert_eq!(relay_stats.recent_messages.len(), 1); - assert!(relay_stats.recent_messages.get(&nk).unwrap().len() <= MAX_RECENT_MESSAGES); - - assert_eq!(relay_stats.recent_updates.len(), 1); - } - - assert_eq!( - relay_stats.recent_messages.get(&nk).unwrap().len(), - MAX_RECENT_MESSAGES - ); - - for i in (all_transactions.len() - MAX_RECENT_MESSAGES)..MAX_RECENT_MESSAGES { - let digest = all_transactions[i].get_digest(); - let mut found = false; - for (_, hash) in relay_stats.recent_messages.get(&nk).unwrap().iter() { - found = found || (*hash == digest); - } - if !found { - assert!(false); - } - } - - // never overflow number of neighbors tracked - for i in 0..(MAX_RELAYER_STATS + 1) { - let mut new_nk = nk.clone(); - new_nk.peer_version += i as u32; - - relay_stats.add_relayed_message(new_nk, &all_transactions[0]); - - assert!(relay_stats.recent_updates.len() <= i + 1); - assert!(relay_stats.recent_updates.len() <= MAX_RELAYER_STATS); - } - } - - #[test] - fn test_relayer_merge_stats() { - let mut relayer_stats = RelayerStats::new(); - - let na = NeighborAddress { - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - public_key_hash: Hash160([0u8; 20]), - }; - - let relay_stats = RelayStats { - num_messages: 1, - num_bytes: 1, - last_seen: 1, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, 1); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - let now = get_epoch_time_secs() + 60; - - let relay_stats_2 = RelayStats { - num_messages: 2, - num_bytes: 2, - last_seen: now, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats_2.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); - assert!( - relayer_stats.relay_stats.get(&na).unwrap().last_seen < now - && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() - ); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - let relay_stats_3 = RelayStats { - num_messages: 3, - num_bytes: 3, - last_seen: 0, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats_3.clone()); - - relayer_stats.merge_relay_stats(rs); - assert_eq!(relayer_stats.relay_stats.len(), 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 3); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 3); - assert!( - relayer_stats.relay_stats.get(&na).unwrap().last_seen < now - && relayer_stats.relay_stats.get(&na).unwrap().last_seen >= get_epoch_time_secs() - ); - assert_eq!(relayer_stats.relay_updates.len(), 1); - - for i in 0..(MAX_RELAYER_STATS + 1) { - let na = NeighborAddress { - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 14321 + (i as u16), - public_key_hash: Hash160([0u8; 20]), - }; - - let now = get_epoch_time_secs() + (i as u64) + 1; - - let relay_stats = RelayStats { - num_messages: 1, - num_bytes: 1, - last_seen: now, - }; - - let mut rs = HashMap::new(); - rs.insert(na.clone(), relay_stats.clone()); - - relayer_stats.merge_relay_stats(rs); - assert!(relayer_stats.relay_stats.len() <= MAX_RELAYER_STATS); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_messages, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().num_bytes, 1); - assert_eq!(relayer_stats.relay_stats.get(&na).unwrap().last_seen, now); - } - } - - #[test] - fn test_relay_inbound_peer_rankings() { - let mut relay_stats = RelayerStats::new(); - - let all_transactions = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::Any, - &TransactionPostConditionMode::Allow, - ); - assert!(all_transactions.len() > MAX_RECENT_MESSAGES); - - let nk_1 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54321, - }; - - let nk_2 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54322, - }; - - let nk_3 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 127, 0, 0, 1]), - port: 54323, - }; - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 0); - - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_1.clone(), &all_transactions[0]); - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 1); - assert_eq!(*dups.get(&nk_1).unwrap(), 3); - - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - relay_stats.add_relayed_message(nk_2.clone(), &all_transactions[0]); - - let dups = relay_stats.count_relay_dups(&all_transactions[0]); - assert_eq!(dups.len(), 2); - assert_eq!(*dups.get(&nk_1).unwrap(), 3); - assert_eq!(*dups.get(&nk_2).unwrap(), 4); - - // total dups == 7 - let dist = relay_stats.get_inbound_relay_rankings( - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - &all_transactions[0], - 0, - ); - assert_eq!(*dist.get(&nk_1).unwrap(), 7 - 3 + 1); - assert_eq!(*dist.get(&nk_2).unwrap(), 7 - 4 + 1); - assert_eq!(*dist.get(&nk_3).unwrap(), 7 + 1); - - // high warmup period - let dist = relay_stats.get_inbound_relay_rankings( - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - &all_transactions[0], - 100, - ); - assert_eq!(*dist.get(&nk_1).unwrap(), 100 + 1); - assert_eq!(*dist.get(&nk_2).unwrap(), 100 + 1); - assert_eq!(*dist.get(&nk_3).unwrap(), 100 + 1); - } - - #[test] - fn test_relay_outbound_peer_rankings() { - let relay_stats = RelayerStats::new(); - - let asn1 = ASEntry4 { - prefix: 0x10000000, - mask: 8, - asn: 1, - org: 1, - }; - - let asn2 = ASEntry4 { - prefix: 0x20000000, - mask: 8, - asn: 2, - org: 2, - }; - - let nk_1 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x10, 0x11, 0x12, 0x13, - ]), - port: 54321, - }; - - let nk_2 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x23, - ]), - port: 54322, - }; - - let nk_3 = NeighborKey { - peer_version: 12345, - network_id: 0x80000000, - addrbytes: PeerAddress([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x20, 0x21, 0x22, 0x24, - ]), - port: 54323, - }; - - let n1 = Neighbor { - addr: nk_1.clone(), - public_key: Secp256k1PublicKey::from_hex( - "0260569384baa726f877d47045931e5310383f18d0b243a9b6c095cee6ef19abd6", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 1, - org: 1, - in_degree: 0, - out_degree: 0, - }; - - let n2 = Neighbor { - addr: nk_2.clone(), - public_key: Secp256k1PublicKey::from_hex( - "02465f9ff58dfa8e844fec86fa5fc3fd59c75ea807e20d469b0a9f885d2891fbd4", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 2, - org: 2, - in_degree: 0, - out_degree: 0, - }; - - let n3 = Neighbor { - addr: nk_3.clone(), - public_key: Secp256k1PublicKey::from_hex( - "032d8a1ea2282c1514fdc1a6f21019561569d02a225cf7c14b4f803b0393cef031", - ) - .unwrap(), - expire_block: 4302, - last_contact_time: 0, - allowed: 0, - denied: 0, - asn: 2, - org: 2, - in_degree: 0, - out_degree: 0, - }; - - let peerdb = PeerDB::connect_memory( - 0x80000000, - 0, - 4032, - UrlString::try_from("http://foo.com").unwrap(), - &vec![asn1, asn2], - &vec![n1.clone(), n2.clone(), n3.clone()], - ) - .unwrap(); - - let asn_count = RelayerStats::count_ASNs( - peerdb.conn(), - &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()], - ) - .unwrap(); - assert_eq!(asn_count.len(), 3); - assert_eq!(*asn_count.get(&nk_1).unwrap(), 1); - assert_eq!(*asn_count.get(&nk_2).unwrap(), 2); - assert_eq!(*asn_count.get(&nk_3).unwrap(), 2); - - let ranking = relay_stats - .get_outbound_relay_rankings(&peerdb, &vec![nk_1.clone(), nk_2.clone(), nk_3.clone()]) - .unwrap(); - assert_eq!(ranking.len(), 3); - assert_eq!(*ranking.get(&nk_1).unwrap(), 5 - 1 + 1); - assert_eq!(*ranking.get(&nk_2).unwrap(), 5 - 2 + 1); - assert_eq!(*ranking.get(&nk_3).unwrap(), 5 - 2 + 1); - - let ranking = relay_stats - .get_outbound_relay_rankings(&peerdb, &vec![nk_2.clone(), nk_3.clone()]) - .unwrap(); - assert_eq!(ranking.len(), 2); - assert_eq!(*ranking.get(&nk_2).unwrap(), 4 - 2 + 1); - assert_eq!(*ranking.get(&nk_3).unwrap(), 4 - 2 + 1); - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_3_peers_push_available() { - with_timeout(600, || { - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_3_peers_push_available", - 4200, - 3, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 3); - - // peer 0 produces the blocks - peer_configs[0].connection_opts.disable_chat_neighbors = true; - - // peer 1 downloads the blocks from peer 0, and sends - // BlocksAvailable and MicroblocksAvailable messages to - // peer 2. - peer_configs[1].connection_opts.disable_chat_neighbors = true; - - // peer 2 learns about the blocks and microblocks from peer 1's - // BlocksAvaiable and MicroblocksAvailable messages, but - // not from inv syncs. - peer_configs[2].connection_opts.disable_chat_neighbors = true; - peer_configs[2].connection_opts.disable_inv_sync = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - peer_configs[2].connection_opts.disable_natpunch = true; - - // do not push blocks and microblocks; only announce them - peer_configs[0].connection_opts.disable_block_push = true; - peer_configs[1].connection_opts.disable_block_push = true; - peer_configs[2].connection_opts.disable_block_push = true; - - peer_configs[0].connection_opts.disable_microblock_push = true; - peer_configs[1].connection_opts.disable_microblock_push = true; - peer_configs[2].connection_opts.disable_microblock_push = true; - - // generous timeouts - peer_configs[0].connection_opts.connect_timeout = 180; - peer_configs[1].connection_opts.connect_timeout = 180; - peer_configs[2].connection_opts.connect_timeout = 180; - peer_configs[0].connection_opts.timeout = 180; - peer_configs[1].connection_opts.timeout = 180; - peer_configs[2].connection_opts.timeout = 180; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - let peer_2 = peer_configs[2].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - peer_configs[2].add_neighbor(&peer_1); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - // only produce blocks for a single reward - // cycle, since pushing block/microblock - // announcements in reward cycles the remote - // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - assert_eq!(block_data.len(), 5); - - block_data - }, - |ref mut peers| { - // make sure peer 2's inv has an entry for peer 1, even - // though it's not doing an inv sync. This is required for the downloader to - // work, and for (Micro)BlocksAvailable messages to be accepted - let peer_1_nk = peers[1].to_neighbor().addr; - let peer_2_nk = peers[2].to_neighbor().addr; - let bc = peers[1].config.burnchain.clone(); - match peers[2].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_1_nk).is_none() { - test_debug!("initialize inv statistics for peer 1 in peer 2"); - inv_state.add_peer(peer_1_nk.clone(), true); - if let Some(ref mut stats) = inv_state.get_stats_mut(&peer_1_nk) { - stats.scans = 1; - stats.inv.merge_pox_inv(&bc, 0, 6, vec![0xff], false); - stats.inv.merge_blocks_inv( - 0, - 30, - vec![0, 0, 0, 0, 0], - vec![0, 0, 0, 0, 0], - false, - ); - } else { - panic!("Unable to instantiate inv stats for {:?}", &peer_1_nk); - } - } else { - test_debug!("peer 2 has inv state for peer 1"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - let peer_1_nk = peers[1].to_neighbor().addr; - match peers[2].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_1_nk).is_none() { - test_debug!("initialize inv statistics for peer 1 in peer 2"); - inv_state.add_peer(peer_1_nk.clone(), true); - - inv_state - .get_stats_mut(&peer_1_nk) - .unwrap() - .inv - .num_reward_cycles = this_reward_cycle; - inv_state.get_stats_mut(&peer_1_nk).unwrap().inv.pox_inv = - vec![0x3f]; - } else { - test_debug!("peer 2 has inv state for peer 1"); - } - } - None => { - test_debug!("No inv state for peer 2"); - } - } - - // peer 2 should never see a BlocksInv - // message. That would imply it asked for an inv - for (_, convo) in peers[2].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // TODO - true - }, - |_| true, - ); - }) - } - - fn is_peer_connected(peer: &TestPeer, dest: &NeighborKey) -> bool { - let event_id = match peer.network.events.get(dest) { - Some(evid) => *evid, - None => { - return false; - } - }; - - match peer.network.peers.get(&event_id) { - Some(convo) => { - return convo.is_authenticated(); - } - None => { - return false; - } - } - } - - fn push_message( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - msg: StacksMessageType, - ) -> bool { - let event_id = match peer.network.events.get(dest) { - Some(evid) => *evid, - None => { - panic!("Unreachable peer: {:?}", dest); - } - }; - - let relay_msg = match peer.network.peers.get_mut(&event_id) { - Some(convo) => convo - .sign_relay_message( - &peer.network.local_peer, - &peer.network.chain_view, - relay_hints, - msg, - ) - .unwrap(), - None => { - panic!("No such event ID {} from neighbor {}", event_id, dest); - } - }; - - match peer.network.relay_signed_message(dest, relay_msg.clone()) { - Ok(_) => { - return true; - } - Err(net_error::OutboxOverflow) => { - test_debug!( - "{:?} outbox overflow; try again later", - &peer.to_neighbor().addr - ); - return false; - } - Err(net_error::SendError(msg)) => { - warn!( - "Failed to send to {:?}: SendError({})", - &peer.to_neighbor().addr, - msg - ); - return false; - } - Err(e) => { - test_debug!( - "{:?} encountered fatal error when forwarding: {:?}", - &peer.to_neighbor().addr, - &e - ); - assert!(false); - unreachable!(); - } - } - } - - fn http_rpc( - peer_http: u16, - request: StacksHttpRequest, - ) -> Result { - use std::net::TcpStream; - - let mut sock = TcpStream::connect( - &format!("127.0.0.1:{}", peer_http) - .parse::() - .unwrap(), - ) - .unwrap(); - - let request_bytes = request.try_serialize().unwrap(); - match sock.write_all(&request_bytes) { - Ok(_) => {} - Err(e) => { - test_debug!("Client failed to write: {:?}", &e); - return Err(net_error::WriteError(e)); - } - } - - let mut resp = vec![]; - match sock.read_to_end(&mut resp) { - Ok(_) => { - if resp.len() == 0 { - test_debug!("Client did not receive any data"); - return Err(net_error::PermanentlyDrained); - } - } - Err(e) => { - test_debug!("Client failed to read: {:?}", &e); - return Err(net_error::ReadError(e)); - } - } - - test_debug!("Client received {} bytes", resp.len()); - let response = StacksHttp::parse_response( - &request.preamble().verb, - &request.preamble().path_and_query_str, - &resp, - ) - .unwrap(); - match response { - StacksHttpMessage::Response(x) => Ok(x), - _ => { - panic!("Did not receive a Response"); - } - } - } - - fn broadcast_message( - broadcaster: &mut TestPeer, - relay_hints: Vec, - msg: StacksMessageType, - ) -> bool { - let request = NetworkRequest::Broadcast(relay_hints, msg); - match broadcaster.network.dispatch_request(request) { - Ok(_) => true, - Err(e) => { - error!("Failed to broadcast: {:?}", &e); - false - } - } - } - - fn push_block( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block: StacksBlock, - ) -> bool { - test_debug!( - "{:?}: Push block {}/{} to {:?}", - peer.to_neighbor().addr, - &consensus_hash, - block.block_hash(), - dest - ); - - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); - let consensus_hash = sn.consensus_hash; - - let msg = StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(consensus_hash, block)], - }); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_block( - peer: &mut TestPeer, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block: StacksBlock, - ) -> bool { - test_debug!( - "{:?}: Broadcast block {}/{}", - peer.to_neighbor().addr, - &consensus_hash, - block.block_hash(), - ); - - let sn = SortitionDB::get_block_snapshot_consensus( - peer.sortdb.as_ref().unwrap().conn(), - &consensus_hash, - ) - .unwrap() - .unwrap(); - let consensus_hash = sn.consensus_hash; - - let msg = StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(consensus_hash, block)], - }); - broadcast_message(peer, relay_hints, msg) - } - - fn push_microblocks( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block_hash: BlockHeaderHash, - microblocks: Vec, - ) -> bool { - test_debug!( - "{:?}: Push {} microblocksblock {}/{} to {:?}", - peer.to_neighbor().addr, - microblocks.len(), - &consensus_hash, - &block_hash, - dest - ); - let msg = StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block_hash, - ), - microblocks: microblocks, - }); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_microblocks( - peer: &mut TestPeer, - relay_hints: Vec, - consensus_hash: ConsensusHash, - block_hash: BlockHeaderHash, - microblocks: Vec, - ) -> bool { - test_debug!( - "{:?}: broadcast {} microblocksblock {}/{}", - peer.to_neighbor().addr, - microblocks.len(), - &consensus_hash, - &block_hash, - ); - let msg = StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &block_hash, - ), - microblocks: microblocks, - }); - broadcast_message(peer, relay_hints, msg) - } - - fn push_transaction( - peer: &mut TestPeer, - dest: &NeighborKey, - relay_hints: Vec, - tx: StacksTransaction, - ) -> bool { - test_debug!( - "{:?}: Push tx {} to {:?}", - peer.to_neighbor().addr, - tx.txid(), - dest - ); - let msg = StacksMessageType::Transaction(tx); - push_message(peer, dest, relay_hints, msg) - } - - fn broadcast_transaction( - peer: &mut TestPeer, - relay_hints: Vec, - tx: StacksTransaction, - ) -> bool { - test_debug!("{:?}: broadcast tx {}", peer.to_neighbor().addr, tx.txid(),); - let msg = StacksMessageType::Transaction(tx); - broadcast_message(peer, relay_hints, msg) - } - - fn http_get_info(http_port: u16) -> RPCPeerInfoData { - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "GET".to_string(), - "/v2/info".to_string(), - ); - request.keep_alive = false; - let getinfo = StacksHttpRequest::new(request, HttpRequestContents::new()); - let response = http_rpc(http_port, getinfo).unwrap(); - let peer_info = response.decode_peer_info().unwrap(); - peer_info - } - - fn http_post_block( - http_port: u16, - consensus_hash: &ConsensusHash, - block: &StacksBlock, - ) -> bool { - test_debug!( - "upload block {}/{} to localhost:{}", - consensus_hash, - block.block_hash(), - http_port - ); - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "POST".to_string(), - "/v2/blocks".to_string(), - ); - request.keep_alive = false; - let post_block = - StacksHttpRequest::new(request, HttpRequestContents::new().payload_stacks(block)); - - let response = http_rpc(http_port, post_block).unwrap(); - let accepted = response.decode_stacks_block_accepted().unwrap(); - accepted.accepted - } - - fn http_post_microblock( - http_port: u16, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - mblock: &StacksMicroblock, - ) -> bool { - test_debug!( - "upload microblock {}/{}-{} to localhost:{}", - consensus_hash, - block_hash, - mblock.block_hash(), - http_port - ); - let mut request = HttpRequestPreamble::new_for_peer( - PeerHost::from_host_port("127.0.0.1".to_string(), http_port), - "POST".to_string(), - "/v2/microblocks".to_string(), - ); - request.keep_alive = false; - let tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let post_microblock = StacksHttpRequest::new( - request, - HttpRequestContents::new() - .payload_stacks(mblock) - .for_specific_tip(tip), - ); - - let response = http_rpc(http_port, post_microblock).unwrap(); - let payload = response.get_http_payload_ok().unwrap(); - let bhh: BlockHeaderHash = serde_json::from_value(payload.try_into().unwrap()).unwrap(); - return true; - } - - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks( - outbound_test: bool, - disable_push: bool, - ) { - with_timeout(600, move || { - let original_blocks_and_microblocks = RefCell::new(vec![]); - let blocks_and_microblocks = RefCell::new(vec![]); - let idx = RefCell::new(0); - let sent_blocks = RefCell::new(false); - let sent_microblocks = RefCell::new(false); - - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks", - 4210, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 produces the blocks and pushes them to peer 1 - // peer 1 receives the blocks and microblocks. It - // doesn't download them, nor does it try to get invs - peer_configs[0].connection_opts.disable_block_advertisement = true; - - peer_configs[1].connection_opts.disable_inv_sync = true; - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // force usage of blocksavailable/microblocksavailable? - if disable_push { - peer_configs[0].connection_opts.disable_block_push = true; - peer_configs[0].connection_opts.disable_microblock_push = true; - peer_configs[1].connection_opts.disable_block_push = true; - peer_configs[1].connection_opts.disable_microblock_push = true; - } - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - - if outbound_test { - // neighbor relationship is symmetric -- peer 1 has an outbound connection - // to peer 0. - peer_configs[1].add_neighbor(&peer_0); - } - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - let saved_copy: Vec<(ConsensusHash, StacksBlock, Vec)> = - block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - *blocks_and_microblocks.borrow_mut() = saved_copy.clone(); - *original_blocks_and_microblocks.borrow_mut() = saved_copy; - block_data - }, - |ref mut peers| { - if !disable_push { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - } - - // make sure peer 1's inv has an entry for peer 0, even - // though it's not doing an inv sync. This is required for the downloader to - // work - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - match peers[1].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_0_nk).is_none() { - test_debug!("initialize inv statistics for peer 0 in peer 1"); - inv_state.add_peer(peer_0_nk.clone(), true); - } else { - test_debug!("peer 1 has inv state for peer 0"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - if is_peer_connected(&peers[0], &peer_1_nk) { - // randomly push a block and/or microblocks to peer 1. - let mut block_data = blocks_and_microblocks.borrow_mut(); - let original_block_data = original_blocks_and_microblocks.borrow(); - let mut next_idx = idx.borrow_mut(); - let data_to_push = { - if block_data.len() > 0 { - let (consensus_hash, block, microblocks) = - block_data[*next_idx].clone(); - Some((consensus_hash, block, microblocks)) - } else { - // start over (can happen if a message gets - // dropped due to a timeout) - test_debug!("Reset block transmission (possible timeout)"); - *block_data = (*original_block_data).clone(); - *next_idx = thread_rng().gen::() % block_data.len(); - let (consensus_hash, block, microblocks) = - block_data[*next_idx].clone(); - Some((consensus_hash, block, microblocks)) - } - }; - - if let Some((consensus_hash, block, microblocks)) = data_to_push { - test_debug!( - "Push block {}/{} and microblocks", - &consensus_hash, - block.block_hash() - ); - - let block_hash = block.block_hash(); - let mut sent_blocks = sent_blocks.borrow_mut(); - let mut sent_microblocks = sent_microblocks.borrow_mut(); - - let pushed_block = if !*sent_blocks { - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - consensus_hash.clone(), - block, - ) - } else { - true - }; - - *sent_blocks = pushed_block; - - if pushed_block { - let pushed_microblock = if !*sent_microblocks { - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - consensus_hash, - block_hash, - microblocks, - ) - } else { - true - }; - - *sent_microblocks = pushed_microblock; - - if pushed_block && pushed_microblock { - block_data.remove(*next_idx); - if block_data.len() > 0 { - *next_idx = thread_rng().gen::() % block_data.len(); - } - *sent_blocks = false; - *sent_microblocks = false; - } - } - test_debug!("{} blocks/microblocks remaining", block_data.len()); - } - } - - // peer 0 should never see a GetBlocksInv message. - // peer 1 should never see a BlocksInv message - for (_, convo) in peers[0].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::GetBlocksInv), - 0 - ); - } - for (_, convo) in peers[1].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound() { - // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. - // nodes rely on blocksavailable/microblocksavailable to discover blocks - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, true) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound() { - // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT - // nodes rely on blocksavailable/microblocksavailable to discover blocks - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, true) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_outbound_direct() { - // simulates node 0 pushing blocks to node 1, but node 0 is publicly routable. - // nodes may push blocks and microblocks directly to each other - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(true, false) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks_inbound_direct() { - // simulates node 0 pushing blocks to node 1, where node 0 is behind a NAT - // nodes may push blocks and microblocks directly to each other - test_get_blocks_and_microblocks_2_peers_push_blocks_and_microblocks(false, false) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_upload_blocks_http() { - with_timeout(600, || { - let (port_sx, port_rx) = std::sync::mpsc::sync_channel(1); - let (block_sx, block_rx) = std::sync::mpsc::sync_channel(1); - - std::thread::spawn(move || loop { - eprintln!("Get port"); - let remote_port: u16 = port_rx.recv().unwrap(); - eprintln!("Got port {}", remote_port); - - eprintln!("Send getinfo"); - let peer_info = http_get_info(remote_port); - eprintln!("Got getinfo! {:?}", &peer_info); - let idx = peer_info.stacks_tip_height as usize; - - eprintln!("Get blocks and microblocks"); - let blocks_and_microblocks: Vec<( - ConsensusHash, - Option, - Option>, - )> = block_rx.recv().unwrap(); - eprintln!("Got blocks and microblocks!"); - - if idx >= blocks_and_microblocks.len() { - eprintln!("Out of blocks to send!"); - return; - } - - eprintln!( - "Upload block {}", - &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash() - ); - http_post_block( - remote_port, - &blocks_and_microblocks[idx].0, - blocks_and_microblocks[idx].1.as_ref().unwrap(), - ); - for mblock in blocks_and_microblocks[idx].2.as_ref().unwrap().iter() { - eprintln!("Upload microblock {}", mblock.block_hash()); - http_post_microblock( - remote_port, - &blocks_and_microblocks[idx].0, - &blocks_and_microblocks[idx].1.as_ref().unwrap().block_hash(), - mblock, - ); - } - }); - - let original_blocks_and_microblocks = RefCell::new(vec![]); - let port_sx_cell = RefCell::new(port_sx); - let block_sx_cell = RefCell::new(block_sx); - - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_upload_blocks_http", - 4250, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 produces the blocks - peer_configs[0].connection_opts.disable_chat_neighbors = true; - - // peer 0 sends them to peer 1 - peer_configs[1].connection_opts.disable_chat_neighbors = true; - peer_configs[1].connection_opts.disable_inv_sync = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // generous timeouts - peer_configs[0].connection_opts.timeout = 180; - peer_configs[1].connection_opts.timeout = 180; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - // only produce blocks for a single reward - // cycle, since pushing block/microblock - // announcements in reward cycles the remote - // peer doesn't know about won't work. - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - assert_eq!(block_data.len(), 5); - - *original_blocks_and_microblocks.borrow_mut() = block_data.clone(); - - block_data - }, - |ref mut peers| { - let blocks_and_microblocks = original_blocks_and_microblocks.borrow().clone(); - let remote_port = peers[1].config.http_port; - - let port_sx = port_sx_cell.borrow_mut(); - let block_sx = block_sx_cell.borrow_mut(); - - let _ = (*port_sx).try_send(remote_port); - let _ = (*block_sx).try_send(blocks_and_microblocks); - }, - |ref peer| { - // check peer health - // TODO - true - }, - |_| true, - ); - }) - } - - fn make_test_smart_contract_transaction( - peer: &mut TestPeer, - name: &str, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - ) -> StacksTransaction { - // make a smart contract - let contract = " - (define-data-var bar int 0) - (define-public (get-bar) (ok (var-get bar))) - (define-public (set-bar (x int) (y int)) - (begin (var-set bar (/ x y)) (ok (var-get bar))))"; - - let cost_limits = peer.config.connection_opts.read_only_call_limit.clone(); - - let tx_contract = peer - .with_mining_state( - |ref mut sortdb, ref mut miner, ref mut spending_account, ref mut stacks_node| { - let mut tx_contract = StacksTransaction::new( - TransactionVersion::Testnet, - spending_account.as_transaction_auth().unwrap().into(), - TransactionPayload::new_smart_contract( - &name.to_string(), - &contract.to_string(), - None, - ) - .unwrap(), - ); - - let chain_tip = - StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let cur_nonce = stacks_node - .chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_nonce( - &spending_account.origin_address().unwrap().into(), - ) - .unwrap() - }) - }) - .unwrap(); - - test_debug!( - "Nonce of {:?} is {} at {}/{}", - &spending_account.origin_address().unwrap(), - cur_nonce, - consensus_hash, - block_hash - ); - - // spending_account.set_nonce(cur_nonce + 1); - - tx_contract.chain_id = 0x80000000; - tx_contract.auth.set_origin_nonce(cur_nonce); - tx_contract.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); - - let mut tx_signer = StacksTransactionSigner::new(&tx_contract); - spending_account.sign_as_origin(&mut tx_signer); - - let tx_contract_signed = tx_signer.get_tx().unwrap(); - - test_debug!( - "make transaction {:?} off of {:?}/{:?}: {:?}", - &tx_contract_signed.txid(), - consensus_hash, - block_hash, - &tx_contract_signed - ); - - Ok(tx_contract_signed) - }, - ) - .unwrap(); - - tx_contract - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_push_transactions() { - with_timeout(600, || { - let blocks_and_microblocks = RefCell::new(vec![]); - let blocks_idx = RefCell::new(0); - let sent_txs = RefCell::new(vec![]); - let done = RefCell::new(false); - - let peers = run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_push_transactions", - 4220, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 generates blocks and microblocks, and pushes - // them to peer 1. Peer 0 also generates transactions - // and pushes them to peer 1. - peer_configs[0].connection_opts.disable_block_advertisement = true; - - // let peer 0 drive this test, as before, by controlling - // when peer 1 sees blocks. - peer_configs[1].connection_opts.disable_inv_sync = true; - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - peer_configs[0].connection_opts.outbox_maxlen = 100; - peer_configs[1].connection_opts.inbox_maxlen = 100; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - let initial_balances = vec![ - ( - PrincipalData::from( - peer_configs[0].spending_account.origin_address().unwrap(), - ), - 1000000, - ), - ( - PrincipalData::from( - peer_configs[1].spending_account.origin_address().unwrap(), - ), - 1000000, - ), - ]; - - peer_configs[0].initial_balances = initial_balances.clone(); - peer_configs[1].initial_balances = initial_balances.clone(); - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for b in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - if b == 0 { - // prime with first block - peers[i].process_stacks_epoch_at_tip(&stacks_block, &vec![]); - } - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - - // peers must be connected to each other - let mut peer_0_to_1 = false; - let mut peer_1_to_0 = false; - for (nk, event_id) in peers[0].network.events.iter() { - match peers[0].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_1_nk { - peer_0_to_1 = true; - } - } - None => {} - } - } - for (nk, event_id) in peers[1].network.events.iter() { - match peers[1].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_0_nk { - peer_1_to_0 = true; - } - } - None => {} - } - } - - if !peer_0_to_1 || !peer_1_to_0 { - test_debug!( - "Peers not bi-directionally connected: 0->1 = {}, 1->0 = {}", - peer_0_to_1, - peer_1_to_0 - ); - return; - } - - // make sure peer 2's inv has an entry for peer 1, even - // though it's not doing an inv sync. - match peers[1].network.inv_state { - Some(ref mut inv_state) => { - if inv_state.get_stats(&peer_0_nk).is_none() { - test_debug!("initialize inv statistics for peer 0 in peer 1"); - inv_state.add_peer(peer_0_nk, true); - } else { - test_debug!("peer 1 has inv state for peer 0"); - } - } - None => { - test_debug!("No inv state for peer 1"); - } - } - - let done_flag = *done.borrow(); - if is_peer_connected(&peers[0], &peer_1_nk) { - // only submit the next transaction if the previous - // one is accepted - let has_last_transaction = { - let expected_txs: std::cell::Ref<'_, Vec> = - sent_txs.borrow(); - if let Some(tx) = (*expected_txs).last() { - let txid = tx.txid(); - if !peers[1].mempool.as_ref().unwrap().has_tx(&txid) { - debug!("Peer 1 still waiting for transaction {}", &txid); - push_transaction( - &mut peers[0], - &peer_1_nk, - vec![], - (*tx).clone(), - ); - false - } else { - true - } - } else { - true - } - }; - - if has_last_transaction { - // push blocks and microblocks in order, and push a - // transaction that can only be validated once the - // block and microblocks are processed. - let ( - ( - block_consensus_hash, - block, - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ), - idx, - ) = { - let block_data = blocks_and_microblocks.borrow(); - let mut idx = blocks_idx.borrow_mut(); - - let microblocks = block_data[*idx].2.clone(); - let microblocks_consensus_hash = block_data[*idx].0.clone(); - let microblocks_block_hash = block_data[*idx].1.block_hash(); - - *idx += 1; - if *idx >= block_data.len() { - *idx = 1; - } - - let block = block_data[*idx].1.clone(); - let block_consensus_hash = block_data[*idx].0.clone(); - ( - ( - block_consensus_hash, - block, - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ), - *idx, - ) - }; - - if !done_flag { - test_debug!( - "Push microblocks built by {}/{} (idx={})", - µblocks_consensus_hash, - µblocks_block_hash, - idx - ); - - let block_hash = block.block_hash(); - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - microblocks_consensus_hash, - microblocks_block_hash, - microblocks, - ); - - test_debug!( - "Push block {}/{} and microblocks (idx = {})", - &block_consensus_hash, - block.block_hash(), - idx - ); - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - block_consensus_hash.clone(), - block, - ); - - // create a transaction against the resulting - // (anchored) chain tip - let tx = make_test_smart_contract_transaction( - &mut peers[0], - &format!("test-contract-{}", &block_hash.to_hex()[0..10]), - &block_consensus_hash, - &block_hash, - ); - - // push or post - push_transaction(&mut peers[0], &peer_1_nk, vec![], tx.clone()); - - let mut expected_txs = sent_txs.borrow_mut(); - expected_txs.push(tx); - } else { - test_debug!("Done pushing data"); - } - } - } - - // peer 0 should never see a GetBlocksInv message. - // peer 1 should never see a BlocksInv message - for (_, convo) in peers[0].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::GetBlocksInv), - 0 - ); - } - for (_, convo) in peers[1].network.peers.iter() { - assert_eq!( - convo - .stats - .get_message_recv_count(StacksMessageID::BlocksInv), - 0 - ); - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |ref mut peers| { - // all blocks downloaded. only stop if peer 1 has - // all the transactions - let mut done_flag = done.borrow_mut(); - *done_flag = true; - - let txs = - MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); - test_debug!("Peer 1 has {} txs", txs.len()); - txs.len() == sent_txs.borrow().len() - }, - ); - - // peer 1 should have all the transactions - let blocks_and_microblocks = blocks_and_microblocks.into_inner(); - - let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()).unwrap(); - let expected_txs = sent_txs.into_inner(); - for tx in txs.iter() { - let mut found = false; - for expected_tx in expected_txs.iter() { - if tx.tx.txid() == expected_tx.txid() { - found = true; - break; - } - } - if !found { - panic!("Transaction not found: {:?}", &tx.tx); - } - } - - // peer 1 should have 1 tx per chain tip - for ((consensus_hash, block, _), sent_tx) in - blocks_and_microblocks.iter().zip(expected_txs.iter()) - { - let block_hash = block.block_hash(); - let tx_infos = MemPoolDB::get_txs_after( - peers[1].mempool.as_ref().unwrap().conn(), - consensus_hash, - &block_hash, - 0, - 1000, - ) - .unwrap(); - test_debug!( - "Check {}/{} (height {}): expect {}", - &consensus_hash, - &block_hash, - block.header.total_work.work, - &sent_tx.txid() - ); - assert_eq!(tx_infos.len(), 1); - assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); - } - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_peers_broadcast() { - with_timeout(600, || { - let blocks_and_microblocks = RefCell::new(vec![]); - let blocks_idx = RefCell::new(0); - let sent_txs = RefCell::new(vec![]); - let done = RefCell::new(false); - let num_peers = 3; - let privk = StacksPrivateKey::new(); - - let peers = run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_peers_broadcast", - 4230, - num_peers, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), num_peers); - - // peer 0 generates blocks and microblocks, and pushes - // them to peers 1..n. Peer 0 also generates transactions - // and broadcasts them to the network. - - peer_configs[0].connection_opts.disable_inv_sync = true; - peer_configs[0].connection_opts.disable_inv_chat = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state. - for i in 0..peer_configs.len() { - peer_configs[i].connection_opts.disable_natpunch = true; - peer_configs[i].connection_opts.disable_network_prune = true; - peer_configs[i].connection_opts.timeout = 600; - peer_configs[i].connection_opts.connect_timeout = 600; - - // do one walk - peer_configs[i].connection_opts.num_initial_walks = 0; - peer_configs[i].connection_opts.walk_retry_count = 0; - peer_configs[i].connection_opts.walk_interval = 600; - - // don't throttle downloads - peer_configs[i].connection_opts.download_interval = 0; - peer_configs[i].connection_opts.inv_sync_interval = 0; - - let max_inflight = peer_configs[i].connection_opts.max_inflight_blocks; - peer_configs[i].connection_opts.max_clients_per_host = - ((num_peers + 1) as u64) * max_inflight; - peer_configs[i].connection_opts.soft_max_clients_per_host = - ((num_peers + 1) as u64) * max_inflight; - peer_configs[i].connection_opts.num_neighbors = (num_peers + 1) as u64; - peer_configs[i].connection_opts.soft_num_neighbors = (num_peers + 1) as u64; - } - - let initial_balances = vec![( - PrincipalData::from( - peer_configs[0].spending_account.origin_address().unwrap(), - ), - 1000000, - )]; - - for i in 0..peer_configs.len() { - peer_configs[i].initial_balances = initial_balances.clone(); - } - - // connectivity - let peer_0 = peer_configs[0].to_neighbor(); - for i in 1..peer_configs.len() { - peer_configs[i].add_neighbor(&peer_0); - let peer_i = peer_configs[i].to_neighbor(); - peer_configs[0].add_neighbor(&peer_i); - } - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data - .clone() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let done_flag = *done.borrow(); - - let mut connectivity_0_to_n = HashSet::new(); - let mut connectivity_n_to_0 = HashSet::new(); - - let peer_0_nk = peers[0].to_neighbor().addr; - - for (nk, event_id) in peers[0].network.events.iter() { - if let Some(convo) = peers[0].network.peers.get(event_id) { - if convo.is_authenticated() { - connectivity_0_to_n.insert(nk.clone()); - } - } - } - for i in 1..peers.len() { - for (nk, event_id) in peers[i].network.events.iter() { - if *nk != peer_0_nk { - continue; - } - - if let Some(convo) = peers[i].network.peers.get(event_id) { - if convo.is_authenticated() { - if let Some(inv_state) = &peers[i].network.inv_state { - if let Some(inv_stats) = - inv_state.block_stats.get(&peer_0_nk) - { - if inv_stats.inv.num_reward_cycles >= 5 { - connectivity_n_to_0 - .insert(peers[i].to_neighbor().addr); - } - } - } - } - } - } - } - - if connectivity_0_to_n.len() < peers.len() - 1 - || connectivity_n_to_0.len() < peers.len() - 1 - { - test_debug!( - "Network not connected: 0 --> N = {}, N --> 0 = {}", - connectivity_0_to_n.len(), - connectivity_n_to_0.len() - ); - return; - } - - let ((tip_consensus_hash, tip_block, _), idx) = { - let block_data = blocks_and_microblocks.borrow(); - let idx = blocks_idx.borrow(); - (block_data[(*idx as usize).saturating_sub(1)].clone(), *idx) - }; - - if idx > 0 { - let mut caught_up = true; - for i in 1..peers.len() { - peers[i] - .with_db_state(|sortdb, chainstate, relayer, mempool| { - let (canonical_consensus_hash, canonical_block_hash) = - SortitionDB::get_canonical_stacks_chain_tip_hash( - sortdb.conn(), - ) - .unwrap(); - - if canonical_consensus_hash != tip_consensus_hash - || canonical_block_hash != tip_block.block_hash() - { - debug!( - "Peer {} is not caught up yet (at {}/{}, need {}/{})", - i + 1, - &canonical_consensus_hash, - &canonical_block_hash, - &tip_consensus_hash, - &tip_block.block_hash() - ); - caught_up = false; - } - Ok(()) - }) - .unwrap(); - } - if !caught_up { - return; - } - } - - // caught up! - // find next block - let ((consensus_hash, block, microblocks), idx) = { - let block_data = blocks_and_microblocks.borrow(); - let mut idx = blocks_idx.borrow_mut(); - if *idx >= block_data.len() { - test_debug!("Out of blocks and microblocks to push"); - return; - } - - let ret = block_data[*idx].clone(); - *idx += 1; - (ret, *idx) - }; - - if !done_flag { - test_debug!( - "Broadcast block {}/{} and microblocks (idx = {})", - &consensus_hash, - block.block_hash(), - idx - ); - - let block_hash = block.block_hash(); - - // create a transaction against the current - // (anchored) chain tip - let tx = make_test_smart_contract_transaction( - &mut peers[0], - &format!("test-contract-{}", &block_hash.to_hex()[0..10]), - &tip_consensus_hash, - &tip_block.block_hash(), - ); - - let mut expected_txs = sent_txs.borrow_mut(); - expected_txs.push(tx.clone()); - - test_debug!( - "Broadcast {}/{} and its microblocks", - &consensus_hash, - &block.block_hash() - ); - // next block - broadcast_block(&mut peers[0], vec![], consensus_hash.clone(), block); - broadcast_microblocks( - &mut peers[0], - vec![], - consensus_hash, - block_hash, - microblocks, - ); - - // NOTE: first transaction will be dropped since the other nodes haven't - // processed the first-ever Stacks block when their relayer code gets - // around to considering it. - broadcast_transaction(&mut peers[0], vec![], tx); - } else { - test_debug!("Done pushing data"); - } - }, - |ref peer| { - // check peer health -- no message errors - // (i.e. no relay cycles) - for (_, convo) in peer.network.peers.iter() { - assert_eq!(convo.stats.msgs_err, 0); - } - true - }, - |ref mut peers| { - // all blocks downloaded. only stop if peer 1 has - // all the transactions - let mut done_flag = done.borrow_mut(); - *done_flag = true; - - let mut ret = true; - for i in 1..peers.len() { - let txs = MemPoolDB::get_all_txs(peers[1].mempool.as_ref().unwrap().conn()) - .unwrap(); - test_debug!("Peer {} has {} txs", i + 1, txs.len()); - ret = ret && txs.len() == sent_txs.borrow().len() - 1; - } - ret - }, - ); - - // peers 1..n should have all the transactions - let blocks_and_microblocks = blocks_and_microblocks.into_inner(); - let expected_txs = sent_txs.into_inner(); - - for i in 1..peers.len() { - let txs = - MemPoolDB::get_all_txs(peers[i].mempool.as_ref().unwrap().conn()).unwrap(); - for tx in txs.iter() { - let mut found = false; - for expected_tx in expected_txs.iter() { - if tx.tx.txid() == expected_tx.txid() { - found = true; - break; - } - } - if !found { - panic!("Transaction not found: {:?}", &tx.tx); - } - } - - // peers 1..n should have 1 tx per chain tip (except for the first block) - for ((consensus_hash, block, _), sent_tx) in - blocks_and_microblocks.iter().zip(expected_txs[1..].iter()) - { - let block_hash = block.block_hash(); - let tx_infos = MemPoolDB::get_txs_after( - peers[i].mempool.as_ref().unwrap().conn(), - consensus_hash, - &block_hash, - 0, - 1000, - ) - .unwrap(); - assert_eq!(tx_infos.len(), 1); - assert_eq!(tx_infos[0].tx.txid(), sent_tx.txid()); - } - } - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_antientropy() { - with_timeout(600, move || { - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_antientropy", - 4240, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 mines blocks, but does not advertize them nor announce them as - // available via its inventory. It only uses its anti-entropy protocol to - // discover that peer 1 doesn't have them, and sends them to peer 1 that way. - peer_configs[0].connection_opts.disable_block_advertisement = true; - peer_configs[0].connection_opts.disable_block_download = true; - - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // permit anti-entropy protocol even if nat'ed - peer_configs[0].connection_opts.antientropy_public = true; - peer_configs[1].connection_opts.antientropy_public = true; - peer_configs[0].connection_opts.antientropy_retry = 1; - peer_configs[1].connection_opts.antientropy_retry = 1; - - // make peer 0 go slowly - peer_configs[0].connection_opts.max_block_push = 2; - peer_configs[0].connection_opts.max_microblock_push = 2; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - // peer 0 is inbound to peer 1 - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - if peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap() - != this_reward_cycle - { - continue; - } - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - - // cap with an empty sortition, so the antientropy protocol picks up all stacks - // blocks - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(vec![]); - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(vec![]); - } - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push((sn.consensus_hash.clone(), None, None)); - - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let tip_opt = peers[1] - .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = NakamotoChainState::get_canonical_block_header( - chainstate.db(), - sortdb, - ) - .unwrap(); - Ok(tip_opt) - }) - .unwrap(); - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - #[test] - #[ignore] - fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { - with_timeout(600, move || { - let sortitions = RefCell::new(vec![]); - let blocks_and_microblocks = RefCell::new(vec![]); - let idx = RefCell::new(0usize); - let pushed_idx = RefCell::new(0usize); - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_buffered_messages", - 4242, - 2, - |ref mut peer_configs| { - // build initial network topology. - assert_eq!(peer_configs.len(), 2); - - // peer 0 mines blocks, but it does not present its inventory. - peer_configs[0].connection_opts.disable_inv_chat = true; - peer_configs[0].connection_opts.disable_block_download = true; - - peer_configs[1].connection_opts.disable_block_download = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // disable nat punches -- disconnect/reconnect - // clears inv state - peer_configs[0].connection_opts.disable_natpunch = true; - peer_configs[1].connection_opts.disable_natpunch = true; - - // peer 0 ignores peer 1's handshakes - peer_configs[0].connection_opts.disable_inbound_handshakes = true; - - // disable anti-entropy - peer_configs[0].connection_opts.max_block_push = 0; - peer_configs[0].connection_opts.max_microblock_push = 0; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - - // peer 0 is inbound to peer 1 - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let this_reward_cycle = peers[0] - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - // build up block data to replicate - let mut block_data = vec![]; - for block_num in 0..num_blocks { - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - let (mut burn_ops, stacks_block, microblocks) = - peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - if block_num == 0 { - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - peers[i].process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - } else { - let mut all_sortitions = sortitions.borrow_mut(); - all_sortitions.push(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - *blocks_and_microblocks.borrow_mut() = block_data.clone()[1..] - .to_vec() - .drain(..) - .map(|(ch, blk_opt, mblocks_opt)| { - (ch, blk_opt.unwrap(), mblocks_opt.unwrap()) - }) - .collect(); - block_data - }, - |ref mut peers| { - for peer in peers.iter_mut() { - // force peers to keep trying to process buffered data - peer.network.burnchain_tip.burn_header_hash = - BurnchainHeaderHash([0u8; 32]); - } - - let mut i = idx.borrow_mut(); - let mut pushed_i = pushed_idx.borrow_mut(); - let all_sortitions = sortitions.borrow(); - let all_blocks_and_microblocks = blocks_and_microblocks.borrow(); - let peer_0_nk = peers[0].to_neighbor().addr; - let peer_1_nk = peers[1].to_neighbor().addr; - - let tip_opt = peers[1] - .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = NakamotoChainState::get_canonical_block_header( - chainstate.db(), - sortdb, - ) - .unwrap(); - Ok(tip_opt) - }) - .unwrap(); - - if !is_peer_connected(&peers[0], &peer_1_nk) { - debug!("Peer 0 not connected to peer 1"); - return; - } - - if let Some(tip) = tip_opt { - debug!( - "Push at {}, need {}", - tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1, - *pushed_i - ); - if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1 - == *pushed_i as u64 - { - // next block - push_block( - &mut peers[0], - &peer_1_nk, - vec![], - (*all_blocks_and_microblocks)[*pushed_i].0.clone(), - (*all_blocks_and_microblocks)[*pushed_i].1.clone(), - ); - push_microblocks( - &mut peers[0], - &peer_1_nk, - vec![], - (*all_blocks_and_microblocks)[*pushed_i].0.clone(), - (*all_blocks_and_microblocks)[*pushed_i].1.block_hash(), - (*all_blocks_and_microblocks)[*pushed_i].2.clone(), - ); - *pushed_i += 1; - } - debug!( - "Sortition at {}, need {}", - tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1, - *i - ); - if tip.anchored_header.height() - - peers[1].config.burnchain.first_block_height - - 1 - == *i as u64 - { - let event_id = { - let mut ret = 0; - for (nk, event_id) in peers[1].network.events.iter() { - ret = *event_id; - break; - } - if ret == 0 { - return; - } - ret - }; - let mut update_sortition = false; - for (event_id, pending) in peers[1].network.pending_messages.iter() { - debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); - if pending.len() >= 1 { - update_sortition = true; - } - } - if update_sortition { - debug!("Advance sortition!"); - peers[1].next_burnchain_block_raw((*all_sortitions)[*i].clone()); - *i += 1; - } - } - } - }, - |ref peer| { - // check peer health - // nothing should break - // TODO - true - }, - |_| true, - ); - }) - } - - pub fn make_contract_tx( - sender: &StacksPrivateKey, - cur_nonce: u64, - tx_fee: u64, - name: &str, - contract: &str, - ) -> StacksTransaction { - let sender_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( - StacksPublicKey::from_private(sender), - ) - .expect("Failed to create p2pkh spending condition from public key."); - - let spending_auth = TransactionAuth::Standard(sender_spending_condition); - - let mut tx_contract = StacksTransaction::new( - TransactionVersion::Testnet, - spending_auth.clone(), - TransactionPayload::new_smart_contract(&name.to_string(), &contract.to_string(), None) - .unwrap(), - ); - - tx_contract.chain_id = 0x80000000; - tx_contract.auth.set_origin_nonce(cur_nonce); - tx_contract.set_tx_fee(tx_fee); - - let mut tx_signer = StacksTransactionSigner::new(&tx_contract); - tx_signer.sign_origin(sender).unwrap(); - - let tx_contract_signed = tx_signer.get_tx().unwrap(); - tx_contract_signed - } - - #[test] - fn test_static_problematic_tests() { - let spender_sk_1 = StacksPrivateKey::new(); - let spender_sk_2 = StacksPrivateKey::new(); - let spender_sk_3 = StacksPrivateKey::new(); - - let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; - let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); - let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); - - let tx_edge = make_contract_tx( - &spender_sk_1, - 0, - (tx_edge_body.len() * 100) as u64, - "test-edge", - &tx_edge_body, - ); - - // something just over the limit of the expression depth - let exceeds_repeat_factor = edge_repeat_factor + 1; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); - - let tx_exceeds = make_contract_tx( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - "test-exceeds", - &tx_exceeds_body, - ); - - // something stupidly high over the expression depth - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); - - let tx_high = make_contract_tx( - &spender_sk_3, - 0, - (tx_high_body.len() * 100) as u64, - "test-high", - &tx_high_body, - ); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_edge, - ASTRules::Typical - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_exceeds, - ASTRules::Typical - ) - .is_ok()); - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_high, - ASTRules::Typical - ) - .is_ok()); - - assert!(Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_edge, - ASTRules::Typical - ) - .is_ok()); - assert!(!Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_exceeds, - ASTRules::PrecheckSize - ) - .is_ok()); - assert!(!Relayer::static_check_problematic_relayed_tx( - false, - StacksEpochId::Epoch2_05, - &tx_high, - ASTRules::PrecheckSize - ) - .is_ok()); - } - - #[test] - fn process_new_blocks_rejects_problematic_asts() { - let privk = StacksPrivateKey::from_hex( - "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", - ) - .unwrap(); - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&privk)], - ) - .unwrap(); - - let initial_balances = vec![(addr.to_account_principal(), 100000000000)]; - - let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); - peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: i64::MAX as u64, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - ]); - let burnchain = peer_config.burnchain.clone(); - - // activate new AST rules right away - let mut peer = TestPeer::new(peer_config); - let mut sortdb = peer.sortdb.take().unwrap(); - { - let mut tx = sortdb - .tx_begin() - .expect("FATAL: failed to begin tx on sortition DB"); - SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 1) - .expect("FATAL: failed to override AST PrecheckSize rule height"); - tx.commit() - .expect("FATAL: failed to commit sortition DB transaction"); - } - peer.sortdb = Some(sortdb); - - let chainstate_path = peer.chainstate_path.clone(); - - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - }; - - let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; - let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); - - let bad_tx = make_contract_tx( - &privk, - 0, - (tx_high_body.len() * 100) as u64, - "test-high", - &tx_high_body, - ); - let bad_txid = bad_tx.txid(); - let bad_tx_len = { - let mut bytes = vec![]; - bad_tx.consensus_serialize(&mut bytes).unwrap(); - bytes.len() as u64 - }; - - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - - let mblock_privk = StacksPrivateKey::new(); - - // make one tenure with a valid block, but problematic microblocks - let (burn_ops, block, microblocks) = peer.make_tenure( - |ref mut miner, - ref mut sortdb, - ref mut chainstate, - vrf_proof, - ref parent_opt, - ref parent_microblock_header_opt| { - let parent_tip = match parent_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(block) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &block.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let coinbase_tx = make_coinbase(miner, 0); - - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - - let block = StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone()], - ) - .unwrap() - .0; - - (block, vec![]) - }, - ); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - peer.process_stacks_epoch(&block, &consensus_hash, &vec![]); - - let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - - let (burn_ops, bad_block, mut microblocks) = peer.make_tenure( - |ref mut miner, - ref mut sortdb, - ref mut chainstate, - vrf_proof, - ref parent_opt, - ref parent_microblock_header_opt| { - let parent_tip = match parent_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(block) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &block.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - let coinbase_tx = make_coinbase(miner, 0); - - let mblock_privk = miner.next_microblock_privkey(); - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - - // this tx would be problematic without our checks - if let Err(ChainstateError::ProblematicTransaction(txid)) = - StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone(), bad_tx.clone()], - ) - { - assert_eq!(txid, bad_txid); - } else { - panic!("Did not get Error::ProblematicTransaction"); - } - - // make a bad block anyway - // don't worry about the state root - let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, - &parent_tip, - vrf_proof.clone(), - tip.total_burn, - Hash160::from_node_public_key(&StacksPublicKey::from_private(&mblock_privk)), - ) - .unwrap(); - let bad_block = StacksBlockBuilder::make_anchored_block_from_txs( - block_builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx.clone()], - ) - .unwrap(); - - let mut bad_block = bad_block.0; - bad_block.txs.push(bad_tx.clone()); - - let txid_vecs = bad_block - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - let merkle_tree = MerkleTree::::new(&txid_vecs); - bad_block.header.tx_merkle_root = merkle_tree.root(); - - let sort_ic = sortdb.index_conn(); - chainstate - .reload_unconfirmed_state(&sort_ic, parent_index_hash.clone()) - .unwrap(); - - // make a bad microblock - let mut microblock_builder = StacksMicroblockBuilder::new( - parent_header_hash.clone(), - parent_consensus_hash.clone(), - chainstate, - &sort_ic, - BlockBuilderSettings::max_value(), - ) - .unwrap(); - - // miner should fail with just the bad tx, since it's problematic - let mblock_err = microblock_builder - .mine_next_microblock_from_txs( - vec![(bad_tx.clone(), bad_tx_len)], - &mblock_privk, - ) - .unwrap_err(); - if let ChainstateError::NoTransactionsToMine = mblock_err { - } else { - panic!("Did not get NoTransactionsToMine"); - } - - let token_transfer = make_user_stacks_transfer( - &privk, - 0, - 200, - &recipient.to_account_principal(), - 123, - ); - let tt_len = { - let mut bytes = vec![]; - token_transfer.consensus_serialize(&mut bytes).unwrap(); - bytes.len() as u64 - }; - - let mut bad_mblock = microblock_builder - .mine_next_microblock_from_txs( - vec![(token_transfer, tt_len), (bad_tx.clone(), bad_tx_len)], - &mblock_privk, - ) - .unwrap(); - - // miner shouldn't include the bad tx, since it's problematic - assert_eq!(bad_mblock.txs.len(), 1); - bad_mblock.txs.push(bad_tx.clone()); - - // force it in anyway - let txid_vecs = bad_mblock - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - let merkle_tree = MerkleTree::::new(&txid_vecs); - bad_mblock.header.tx_merkle_root = merkle_tree.root(); - bad_mblock.sign(&mblock_privk).unwrap(); - - (bad_block, vec![bad_mblock]) - }, - ); - - let bad_mblock = microblocks.pop().unwrap(); - let (_, _, new_consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - peer.process_stacks_epoch(&bad_block, &new_consensus_hash, &vec![]); - - // stuff them all into each possible field of NetworkResult - // p2p messages - let nk = NeighborKey { - peer_version: 1, - network_id: 2, - addrbytes: PeerAddress([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), - port: 19, - }; - let preamble = Preamble { - peer_version: 1, - network_id: 2, - seq: 3, - burn_block_height: 4, - burn_block_hash: BurnchainHeaderHash([5u8; 32]), - burn_stable_block_height: 6, - burn_stable_block_hash: BurnchainHeaderHash([7u8; 32]), - additional_data: 8, - signature: MessageSignature([9u8; 65]), - payload_len: 10, - }; - let bad_msgs = vec![ - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Blocks(BlocksData { - blocks: vec![BlocksDatum(new_consensus_hash.clone(), bad_block.clone())], - }), - }, - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Microblocks(MicroblocksData { - index_anchor_block: StacksBlockId::new( - &new_consensus_hash, - &bad_block.block_hash(), - ), - microblocks: vec![bad_mblock.clone()], - }), - }, - StacksMessage { - preamble: preamble.clone(), - relayers: vec![], - payload: StacksMessageType::Transaction(bad_tx.clone()), - }, - ]; - let mut unsolicited = HashMap::new(); - unsolicited.insert(nk.clone(), bad_msgs.clone()); - - let mut network_result = - NetworkResult::new(0, 0, 0, 0, ConsensusHash([0x01; 20]), HashMap::new()); - network_result.consume_unsolicited(unsolicited); - - assert!(network_result.has_blocks()); - assert!(network_result.has_microblocks()); - assert!(network_result.has_transactions()); - - network_result.consume_http_uploads( - bad_msgs - .into_iter() - .map(|msg| msg.payload) - .collect::>(), - ); - - assert!(network_result.has_blocks()); - assert!(network_result.has_microblocks()); - assert!(network_result.has_transactions()); - - assert_eq!(network_result.uploaded_transactions.len(), 1); - assert_eq!(network_result.uploaded_blocks.len(), 1); - assert_eq!(network_result.uploaded_microblocks.len(), 1); - assert_eq!(network_result.pushed_transactions.len(), 1); - assert_eq!(network_result.pushed_blocks.len(), 1); - assert_eq!(network_result.pushed_microblocks.len(), 1); - - network_result - .blocks - .push((new_consensus_hash.clone(), bad_block.clone(), 123)); - network_result.confirmed_microblocks.push(( - new_consensus_hash.clone(), - vec![bad_mblock.clone()], - 234, - )); - - let mut sortdb = peer.sortdb.take().unwrap(); - let (processed_blocks, processed_mblocks, relay_mblocks, bad_neighbors) = - Relayer::process_new_blocks( - &mut network_result, - &mut sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, - None, - ) - .unwrap(); - - // despite this data showing up in all aspects of the network result, none of it actually - // gets relayed - assert_eq!(processed_blocks.len(), 0); - assert_eq!(processed_mblocks.len(), 0); - assert_eq!(relay_mblocks.len(), 0); - assert_eq!(bad_neighbors.len(), 0); - - let txs_relayed = Relayer::process_transactions( - &mut network_result, - &sortdb, - &mut peer.stacks_node.as_mut().unwrap().chainstate, - &mut peer.mempool.as_mut().unwrap(), - None, - ) - .unwrap(); - assert_eq!(txs_relayed.len(), 0); - } - - #[test] - fn test_block_pay_to_contract_gated_at_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4246, 4247); - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - 0, - Some(PrincipalData::Contract( - QualifiedContractIdentifier::parse("ST000000000000000000002AMW42H.bns") - .unwrap(), - )), - ); - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx], - ) - .unwrap(); - - (anchored_block.0, vec![]) - }; - - // tenures 26 and 27 should fail, since the block is a pay-to-contract block - // Pay-to-contract should only be supported if the block is in epoch 2.1, which - // activates at tenure 27. - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - panic!("Stored pay-to-contract stacks block before epoch 2.1"); - } - Err(chainstate_error::InvalidStacksBlock(_)) => {} - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid pay-to-contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - #[test] - fn test_block_versioned_smart_contract_gated_at_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4248, 4249); - - let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), - 1000000, - )]; - - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - 0, - None, - ); - - let versioned_contract = make_smart_contract_with_version( - miner, - 1, - tip.block_height.try_into().unwrap(), - 0, - Some(ClarityVersion::Clarity1), - Some(1000), - ); - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx, versioned_contract], - ) - .unwrap(); - - eprintln!("{:?}", &anchored_block.0); - (anchored_block.0, vec![]) - }; - - // tenures 26 and 27 should fail, since the block contains a versioned smart contract. - // Versioned smart contracts should only be supported if the block is in epoch 2.1, which - // activates at tenure 27. - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - eprintln!("{:?}", &stacks_block); - panic!("Stored pay-to-contract stacks block before epoch 2.1"); - } - Err(chainstate_error::InvalidStacksBlock(_)) => {} - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - #[test] - fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { - let mut peer_config = TestPeerConfig::new(function_name!(), 4250, 4251); - - let initial_balances = vec![( - PrincipalData::from(peer_config.spending_account.origin_address().unwrap()), - 1000000, - )]; - - let epochs = vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 0, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 0, - end_height: 28, // NOTE: the first 25 burnchain blocks have no sortition - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 28, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]; - - peer_config.epochs = Some(epochs); - peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); - - let mut peer = TestPeer::new(peer_config); - let versioned_contract_opt: RefCell> = RefCell::new(None); - let nonce: RefCell = RefCell::new(0); - - let mut make_tenure = - |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option<&StacksMicroblockHeader>| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header_tip) => { - let ic = sortdb.index_conn(); - let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header_tip.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let next_nonce = *nonce.borrow(); - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - next_nonce, - None, - ); - - let versioned_contract = make_smart_contract_with_version( - miner, - next_nonce + 1, - tip.block_height.try_into().unwrap(), - 0, - Some(ClarityVersion::Clarity1), - Some(1000), - ); - - *versioned_contract_opt.borrow_mut() = Some(versioned_contract); - *nonce.borrow_mut() = next_nonce + 1; - - let mut mblock_pubkey_hash_bytes = [0u8; 20]; - mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); - - let builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - Hash160(mblock_pubkey_hash_bytes), - ) - .unwrap(); - - let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx], - ) - .unwrap(); - - eprintln!("{:?}", &anchored_block.0); - (anchored_block.0, vec![]) - }; - - for i in 0..2 { - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - - // the empty block should be accepted - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Did not accept valid block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - - // process it - peer.coord.handle_new_stacks_block().unwrap(); - - // the mempool would reject a versioned contract transaction, since we're not yet at - // tenure 28 - let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); - let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), - &consensus_hash, - &stacks_block.block_hash(), - &versioned_contract, - versioned_contract_len as u64, - ) { - Err(MemPoolRejection::Other(msg)) => { - assert!(msg.find("not supported in this epoch").is_some()); - } - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => { - panic!("will_admit_mempool_tx succeeded"); - } - }; - - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // *now* it should succeed, since tenure 28 was in epoch 2.1 - let (burn_ops, stacks_block, microblocks) = peer.make_tenure(&mut make_tenure); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - let sortdb = peer.sortdb.take().unwrap(); - let mut node = peer.stacks_node.take().unwrap(); - match Relayer::process_new_anchored_block( - &sortdb.index_conn(), - &mut node.chainstate, - &consensus_hash, - &stacks_block, - 123, - ) { - Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); - } - Err(e) => { - panic!("Got unexpected error {:?}", &e); - } - }; - - // process it - peer.coord.handle_new_stacks_block().unwrap(); - - // the mempool would accept a versioned contract transaction, since we're not yet at - // tenure 28 - let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); - let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( - &sortdb.index_conn(), - &consensus_hash, - &stacks_block.block_hash(), - &versioned_contract, - versioned_contract_len as u64, - ) { - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => {} - }; - - peer.sortdb = Some(sortdb); - peer.stacks_node = Some(node); - } - - // TODO: process bans - // TODO: test sending invalid blocks-available and microblocks-available (should result in a ban) - // TODO: test sending invalid transactions (should result in a ban) - // TODO: test bandwidth limits (sending too much should result in a nack, and then a ban) -} +pub mod test {} From 4934b345b1a9e7bf57dad357e2e97410fad99396 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:22:38 -0400 Subject: [PATCH 0263/1400] feat: unit test coverage for Nakamoto unsolicited message handling and block-push --- stackslib/src/net/tests/relay/nakamoto.rs | 954 ++++++++++++++++++++++ 1 file changed, 954 insertions(+) create mode 100644 stackslib/src/net/tests/relay/nakamoto.rs diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs new file mode 100644 index 0000000000..2f52654ae9 --- /dev/null +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -0,0 +1,954 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{HashMap, VecDeque}; +use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError}; +use std::thread; +use std::thread::JoinHandle; + +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::ast::ASTRules; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::{ClarityVersion, MAX_CALL_STACK_DEPTH}; +use rand::Rng; +use stacks_common::address::AddressHashMode; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash}; +use stacks_common::types::Address; +use stacks_common::util::hash::MerkleTree; +use stacks_common::util::sleep_ms; +use stacks_common::util::vrf::VRFProof; + +use super::*; +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::tests::TestMiner; +use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::nakamoto::coordinator::tests::{ + make_all_signers_vote_for_aggregate_key, make_token_transfer, +}; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::stacks::boot::test::{ + key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, + make_signers_vote_for_aggregate_public_key, make_signers_vote_for_aggregate_public_key_value, + with_sortdb, +}; +use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::test::codec_all_transactions; +use crate::chainstate::stacks::tests::{ + make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, + make_user_stacks_transfer, TestStacksNode, +}; +use crate::chainstate::stacks::{Error as ChainstateError, *}; +use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::*; +use crate::net::api::getinfo::RPCPeerInfoData; +use crate::net::asn::*; +use crate::net::chat::*; +use crate::net::codec::*; +use crate::net::download::*; +use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; +use crate::net::httpcore::StacksHttpMessage; +use crate::net::inv::inv2x::*; +use crate::net::relay::{ProcessedNetReceipts, Relayer}; +use crate::net::test::*; +use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; +use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; +use crate::net::tests::relay::epoch2x::broadcast_message; +use crate::net::{Error as NetError, *}; +use crate::util_lib::test::*; + +/// Everything in a TestPeer, except the coordinator (which is encombered by the lifetime of its +/// chains coordinator's event observer) +struct ExitedPeer { + pub config: TestPeerConfig, + pub network: PeerNetwork, + pub sortdb: Option, + pub miner: TestMiner, + pub stacks_node: Option, + pub relayer: Relayer, + pub mempool: Option, + pub chainstate_path: String, + pub indexer: Option, +} + +impl ExitedPeer { + /// Instantiate the exited peer from the TestPeer + fn from_test_peer(peer: TestPeer) -> Self { + Self { + config: peer.config, + network: peer.network, + sortdb: peer.sortdb, + miner: peer.miner, + stacks_node: peer.stacks_node, + relayer: peer.relayer, + mempool: peer.mempool, + chainstate_path: peer.chainstate_path, + indexer: peer.indexer, + } + } + + /// Run the network stack of the exited peer, but no more block processing will take place. + pub fn run_with_ibd( + &mut self, + ibd: bool, + dns_client: Option<&mut DNSClient>, + ) -> Result<(NetworkResult, ProcessedNetReceipts), NetError> { + let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + let mut mempool = self.mempool.take().unwrap(); + let indexer = self.indexer.take().unwrap(); + + let net_result = self.network.run( + &indexer, + &mut sortdb, + &mut stacks_node.chainstate, + &mut mempool, + dns_client, + false, + ibd, + 100, + &RPCHandlerArgs::default(), + )?; + let receipts_res = self.relayer.process_network_result( + self.network.get_local_peer(), + &mut net_result.clone(), + &mut sortdb, + &mut stacks_node.chainstate, + &mut mempool, + ibd, + None, + None, + ); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + self.mempool = Some(mempool); + self.indexer = Some(indexer); + + receipts_res.and_then(|receipts| Ok((net_result, receipts))) + } +} + +/// Messages passed to the unit test from the seed node thread +enum SeedData { + BurnOps(Vec, ConsensusHash), + Blocks(Vec), + Exit(ExitedPeer), +} + +/// Messages passed from the unit test to the seed node thread +#[derive(Clone, Debug, PartialEq)] +enum SeedCommand { + Exit, +} + +/// Communication channels from the unit test to the seed node thread +struct FollowerComms { + data_receiver: Receiver, + command_sender: SyncSender, +} + +impl FollowerComms { + pub fn send_exit(&mut self) { + self.command_sender + .send(SeedCommand::Exit) + .expect("FATAL: seed node hangup"); + } + + pub fn try_recv(&mut self) -> Option { + match self.data_receiver.try_recv() { + Ok(data) => Some(data), + Err(TryRecvError::Empty) => None, + Err(_) => { + panic!("FATAL: seed node hangup"); + } + } + } +} + +/// Communication channels from the seed node thread to the unit test +struct SeedComms { + data_sender: SyncSender, + command_receiver: Receiver, +} + +struct SeedNode {} + +impl SeedNode { + /// Have `peer` produce two reward cycles of length `rc_len`, and forward all sortitions and + /// Nakamoto blocks back to the unit test. This consumes `peer`. + /// + /// The `peer` will process its blocks locally, and _push_ them to one or more followers. The + /// `peer` will wait for there to be at least one network conversation open before advancing, + /// thereby ensuring reliable delivery of the Nakamoto blocks to at least one follower. In + /// addition, the blocks and sortitions will be sent to the unit test via `comms`. + /// + /// The contents of `peer` will be sent back to the unit test via an `ExitedPeer` struct, so + /// the unit test can query it or even run its networking stack. + pub fn main(mut peer: TestPeer, rc_len: u64, comms: SeedComms) { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let mut test_signers = peer.config.test_signers.take().unwrap(); + let test_stackers = peer.config.test_stackers.take().unwrap(); + + let mut all_blocks: Vec = vec![]; + let mut all_burn_ops = vec![]; + let mut rc_blocks = vec![]; + let mut rc_burn_ops = vec![]; + + // have the peer mine some blocks for two reward cycles + for i in 0..(2 * rc_len) { + debug!("Tenure {}", i); + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + // pass along to the follower + if comms + .data_sender + .send(SeedData::BurnOps(burn_ops.clone(), consensus_hash.clone())) + .is_err() + { + warn!("Follower disconnected"); + break; + } + + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + debug!("Next burnchain block: {}", &consensus_hash); + + let num_blocks: usize = (thread_rng().gen::() % 10) + 1; + + let block_height = peer.get_burn_block_height(); + + // If we are in the prepare phase, check if we need to generate + // aggregate key votes + let txs = if peer.config.burnchain.is_in_prepare_phase(block_height) { + let cycle_id = peer + .config + .burnchain + .block_height_to_reward_cycle(block_height) + .unwrap(); + let next_cycle_id = cycle_id as u128 + 1; + + with_sortdb(&mut peer, |chainstate, sortdb| { + if let Some(tip) = all_blocks.last() { + // TODO: remove once #4796 closes + make_all_signers_vote_for_aggregate_key( + chainstate, + sortdb, + &tip.block_id(), + &mut test_signers, + &test_stackers, + next_cycle_id, + ) + } else { + vec![] + } + }) + } else { + vec![] + }; + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |miner, chainstate, sortdb, blocks_so_far| { + // Include the aggregate key voting transactions in the first block. + let mut txs = if blocks_so_far.is_empty() { + txs.clone() + } else { + vec![] + }; + + if blocks_so_far.len() < num_blocks { + debug!("\n\nProduce block {}\n\n", all_blocks.len()); + + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + txs.push(stx_transfer); + } + txs + }, + ); + + let mut blocks: Vec = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + // run network state machine until we have a connection + loop { + let network_result_res = peer.run_with_ibd(false, None); + if let Ok((network_result, _)) = network_result_res { + if network_result.num_connected_peers > 0 { + break; + } + } + } + + // relay these blocks + let local_peer = peer.network.get_local_peer().clone(); + let sortdb = peer.sortdb.take().unwrap(); + let stacks_node = peer.stacks_node.take().unwrap(); + + peer.relayer.relay_epoch3_blocks( + &local_peer, + &sortdb, + &stacks_node.chainstate, + vec![(vec![], blocks.clone())], + true, + ); + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(stacks_node); + + // send the blocks to the unit test as well + if comms + .data_sender + .send(SeedData::Blocks(blocks.clone())) + .is_err() + { + warn!("Follower disconnected"); + break; + } + + // if we're starting a new reward cycle, then save the current one + let tip = { + let sort_db = peer.sortdb.as_mut().unwrap(); + SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() + }; + if peer + .config + .burnchain + .is_reward_cycle_start(tip.block_height) + { + rc_blocks.push(all_blocks.clone()); + rc_burn_ops.push(all_burn_ops.clone()); + + all_burn_ops.clear(); + all_blocks.clear(); + } + + all_blocks.append(&mut blocks); + all_burn_ops.push(burn_ops); + } + + peer.config.test_signers = Some(test_signers); + peer.config.test_stackers = Some(test_stackers); + + let exited_peer = ExitedPeer::from_test_peer(peer); + + // inform the follower that we're done, and pass along the final state of the peer + if comms.data_sender.send(SeedData::Exit(exited_peer)).is_err() { + panic!("Follower disconnected"); + } + + // wait for request to exit + let Ok(SeedCommand::Exit) = comms.command_receiver.recv() else { + panic!("FATAL: did not receive shutdown request (follower must have crashed)"); + }; + } + + /// Instantiate bidirectional communication channels between the unit test and seed node + pub fn comms() -> (SeedComms, FollowerComms) { + let (data_sender, data_receiver) = sync_channel(1024); + let (command_sender, command_receiver) = sync_channel(1024); + + let seed_comms = SeedComms { + data_sender, + command_receiver, + }; + + let follower_comms = FollowerComms { + data_receiver, + command_sender, + }; + + (seed_comms, follower_comms) + } +} + +/// Verify that Nakmaoto blocks whose sortitions are known will *not* be buffered, but instead +/// forwarded to the relayer for processing. +#[test] +fn test_no_buffer_ready_nakamoto_blocks() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + let mut network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!("Follower got {}: {:?}", &consensus_hash, &burn_ops); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(burn_ops.clone()); + assert_eq!(follower_consensus_hash, consensus_hash); + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + + // no need to buffer this because we can process it right away + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(!buffer); + + // we need these blocks, but we don't need to buffer them + for block in blocks.iter() { + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + } + + // go process the blocks _as if_ they came from a network result + let mut unsolicited = HashMap::new(); + let msg = StacksMessage::from_chain_view( + follower.network.bound_neighbor_key().peer_version, + follower.network.bound_neighbor_key().network_id, + follower.network.get_chain_view(), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: blocks.clone(), + }), + ); + unsolicited.insert(peer_nk.clone(), vec![msg]); + + if let Some(mut network_result) = network_result.take() { + network_result.consume_unsolicited(unsolicited); + let num_processed = follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + + // because we process in order, they should all get processed + assert_eq!(num_processed, blocks.len() as u64); + } + + // no need to buffer if we already have the block + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(!buffer); + + // we don't need these blocks anymore + for block in blocks.iter() { + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + } + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + assert_eq!(exited_peer_burn_tip, follower_burn_tip); + assert_eq!(exited_peer_stacks_tip, follower_stacks_tip); + }); +} + +/// Verify that Nakamoto blocks whose sortitions are not yet known will be buffered, and sent to +/// the relayer once the burnchain advances. +#[test] +fn test_buffer_nonready_nakamoto_blocks() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + let mut follower_relayer = Relayer::from_p2p(&mut follower.network, follower_stacker_dbs); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + // don't authenticate unsolicited messages, since this test directly pushes them + follower + .network + .connection_opts + .test_disable_unsolicited_message_authentication = true; + follower + .config + .connection_opts + .test_disable_unsolicited_message_authentication = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + let mut buffered_burn_ops = VecDeque::new(); + let mut all_blocks = vec![]; + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + let mut network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!( + "Follower got and will buffer {}: {:?}", + &consensus_hash, &burn_ops + ); + buffered_burn_ops.push_back((burn_ops, consensus_hash)); + if buffered_burn_ops.len() > 1 { + let (buffered_burn_ops, buffered_consensus_hash) = + buffered_burn_ops.pop_front().unwrap(); + debug!( + "Follower will process {}: {:?}", + &buffered_consensus_hash, &buffered_burn_ops + ); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(buffered_burn_ops.clone()); + assert_eq!(follower_consensus_hash, buffered_consensus_hash); + } + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + all_blocks.push(blocks.clone()); + + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + + // we will need to buffer this since the sortition for these blocks hasn't been + // processed yet + let buffer = follower + .network + .inner_handle_unsolicited_NakamotoBlocksData( + &sortdb, + &node.chainstate, + Some(peer_nk.clone()), + &NakamotoBlocksData { + blocks: blocks.clone(), + }, + ); + assert!(buffer); + + // we need these blocks, but we can't process them yet + for block in blocks.iter() { + assert!(follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + block + )); + } + + // try to process the blocks _as if_ they came from a network result. + // It should fail. + let mut unsolicited = HashMap::new(); + let msg = StacksMessage::from_chain_view( + follower.network.bound_neighbor_key().peer_version, + follower.network.bound_neighbor_key().network_id, + follower.network.get_chain_view(), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: blocks.clone(), + }), + ); + unsolicited.insert(peer_nk.clone(), vec![msg]); + + if let Some(mut network_result) = network_result.take() { + network_result.consume_unsolicited(unsolicited); + follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + } + + // have the peer network buffer them up + let mut unsolicited_msgs: HashMap> = HashMap::new(); + for (event_id, convo) in follower.network.peers.iter() { + for blks in all_blocks.iter() { + let msg = StacksMessage::from_chain_view( + follower.network.bound_neighbor_key().peer_version, + follower.network.bound_neighbor_key().network_id, + follower.network.get_chain_view(), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: blocks.clone(), + }), + ); + + if let Some(msgs) = unsolicited_msgs.get_mut(event_id) { + msgs.push(msg); + } else { + unsolicited_msgs.insert(*event_id, vec![msg]); + } + } + } + follower.network.handle_unsolicited_messages( + &sortdb, + &node.chainstate, + unsolicited_msgs, + true, + true, + ); + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + + // process the last burnchain sortitions + while let Some((buffered_burn_ops, buffered_consensus_hash)) = + buffered_burn_ops.pop_front() + { + debug!( + "Follower will process {}: {:?}", + &buffered_consensus_hash, &buffered_burn_ops + ); + let (_, _, follower_consensus_hash) = + follower.next_burnchain_block(buffered_burn_ops.clone()); + assert_eq!(follower_consensus_hash, buffered_consensus_hash); + } + + let mut network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + + // process the last buffered messages + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + + if let Some(mut network_result) = network_result.take() { + follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + } + + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + assert_eq!(exited_peer_burn_tip, follower_burn_tip); + assert_eq!(exited_peer_stacks_tip, follower_stacks_tip); + }); +} + +/// Boot a follower off of a seed node by having the seed node push its blocks to the follower via +/// the p2p stack. The follower will buffer up Nakamoto blocks and forward them to its relayer as +/// needed. +#[test] +fn test_nakamoto_boot_node_from_block_push() { + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full reward cycle + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let rc_len = 10u64; + let (peer, mut followers) = make_nakamoto_peers_from_invs( + function_name!(), + &observer, + rc_len as u32, + 5, + bitvecs.clone(), + 1, + ); + let peer_nk = peer.to_neighbor().addr; + let mut follower = followers.pop().unwrap(); + + let test_path = TestPeer::make_test_path(&follower.config); + let stackerdb_path = format!("{}/stacker_db.sqlite", &test_path); + let follower_stacker_dbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); + + // disable the follower's ability to download blocks from the seed peer + follower.network.connection_opts.disable_block_download = true; + follower.config.connection_opts.disable_block_download = true; + + let (seed_comms, mut follower_comms) = SeedNode::comms(); + + thread::scope(|s| { + s.spawn(|| { + SeedNode::main(peer, rc_len, seed_comms); + }); + + let mut seed_exited = false; + let mut exited_peer = None; + let (mut follower_dns_client, follower_dns_thread_handle) = dns_thread_start(100); + + while !seed_exited { + // follower will forward pushed data to its relayer + loop { + let network_result_res = + follower.run_with_ibd(true, Some(&mut follower_dns_client)); + if let Ok((network_result, _)) = network_result_res { + if network_result.num_connected_peers > 0 { + break; + } + } + } + + match follower_comms.try_recv() { + None => {} + Some(SeedData::BurnOps(burn_ops, consensus_hash)) => { + debug!("Follower will process {}: {:?}", &consensus_hash, &burn_ops); + let (_, _, follower_ch) = follower.next_burnchain_block(burn_ops.clone()); + assert_eq!(follower_ch, consensus_hash); + } + Some(SeedData::Blocks(blocks)) => { + debug!("Follower got Nakamoto blocks {:?}", &blocks); + } + Some(SeedData::Exit(exited)) => { + debug!("Follower got seed exit"); + + seed_exited = true; + exited_peer = Some(exited); + follower_comms.send_exit(); + } + } + + follower.coord.handle_new_burnchain_block().unwrap(); + follower.coord.handle_new_stacks_block().unwrap(); + follower.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + // recover exited peer and get its chain tips + let mut exited_peer = exited_peer.unwrap(); + let sortdb = exited_peer.sortdb.take().unwrap(); + let stacks_node = exited_peer.stacks_node.take().unwrap(); + let exited_peer_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let exited_peer_stacks_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap(); + exited_peer.stacks_node = Some(stacks_node); + exited_peer.sortdb = Some(sortdb); + + let mut synced = false; + for i in 0..100 { + // let the follower catch up to and keep talking to the exited peer + exited_peer.run_with_ibd(false, None).unwrap(); + follower + .run_with_ibd(true, Some(&mut follower_dns_client)) + .unwrap(); + + // compare chain tips + let sortdb = follower.sortdb.take().unwrap(); + let stacks_node = follower.stacks_node.take().unwrap(); + let follower_burn_tip = + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let follower_stacks_tip = NakamotoChainState::get_canonical_block_header( + stacks_node.chainstate.db(), + &sortdb, + ) + .unwrap(); + follower.stacks_node = Some(stacks_node); + follower.sortdb = Some(sortdb); + + debug!("{}: Follower sortition tip: {:?}", i, &follower_burn_tip); + debug!("{}: Seed sortition tip: {:?}", i, &exited_peer_burn_tip); + debug!("{}: Follower stacks tip: {:?}", i, &follower_stacks_tip); + debug!("{}: Seed stacks tip: {:?}", i, &exited_peer_stacks_tip); + + if exited_peer_burn_tip.consensus_hash == follower_burn_tip.consensus_hash + && exited_peer_stacks_tip == follower_stacks_tip + { + synced = true; + break; + } + } + + assert!(synced); + }); +} From a794490f252d106446d86e1e41f79688b3362cea Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:22:58 -0400 Subject: [PATCH 0264/1400] fix: log when we process a new nakamoto block --- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index f399615c80..930d47d5fb 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -567,6 +567,7 @@ impl< /// with Some(pox-anchor-block-hash) until the reward cycle info is processed in the sortition /// DB. pub fn handle_new_nakamoto_stacks_block(&mut self) -> Result, Error> { + debug!("Handle new Nakamoto block"); let canonical_sortition_tip = self.canonical_sortition_tip.clone().expect( "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", ); From b139a421401b7304d4fffe89852c7df5b99aa0e5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:23:11 -0400 Subject: [PATCH 0265/1400] chore: implement NakamotoBlocks push message for p2p stack --- stackslib/src/net/codec.rs | 48 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index c0496aa14c..c115a50d82 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -41,6 +41,7 @@ use stacks_common::util::secp256k1::{ use crate::burnchains::{BurnchainView, PrivateKey, PublicKey}; use crate::chainstate::burn::ConsensusHash; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::{ StacksBlock, StacksMicroblock, StacksPublicKey, StacksTransaction, MAX_BLOCK_LEN, }; @@ -353,6 +354,36 @@ impl NakamotoInvData { } } +impl StacksMessageCodec for NakamotoBlocksData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { + write_next(fd, &self.blocks)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let blocks: Vec = { + // loose upper-bound + let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64); + read_next_at_most::<_, NakamotoBlock>(&mut bound_read, NAKAMOTO_BLOCKS_PUSHED_MAX) + }?; + + // only valid if there are no dups + let mut present = HashSet::new(); + for block in blocks.iter() { + if present.contains(&block.block_id()) { + // no dups allowed + return Err(codec_error::DeserializeError( + "Invalid NakamotoBlocksData: duplicate block".to_string(), + )); + } + + present.insert(block.block_id()); + } + + Ok(NakamotoBlocksData { blocks }) + } +} + impl StacksMessageCodec for GetPoxInv { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.consensus_hash)?; @@ -930,6 +961,7 @@ impl StacksMessageType { StacksMessageType::StackerDBPushChunk(ref _m) => StacksMessageID::StackerDBPushChunk, StacksMessageType::GetNakamotoInv(ref _m) => StacksMessageID::GetNakamotoInv, StacksMessageType::NakamotoInv(ref _m) => StacksMessageID::NakamotoInv, + StacksMessageType::NakamotoBlocks(ref _m) => StacksMessageID::NakamotoBlocks, } } @@ -964,6 +996,7 @@ impl StacksMessageType { StacksMessageType::StackerDBPushChunk(ref _m) => "StackerDBPushChunk", StacksMessageType::GetNakamotoInv(ref _m) => "GetNakamotoInv", StacksMessageType::NakamotoInv(ref _m) => "NakamotoInv", + StacksMessageType::NakamotoBlocks(ref _m) => "NakamotoBlocks", } } @@ -1071,6 +1104,15 @@ impl StacksMessageType { StacksMessageType::NakamotoInv(ref m) => { format!("NakamotoInv({:?})", &m.tenures) } + StacksMessageType::NakamotoBlocks(ref m) => { + format!( + "NakamotoBlocks({:?})", + m.blocks + .iter() + .map(|block| block.block_id()) + .collect::>() + ) + } } } } @@ -1122,6 +1164,7 @@ impl StacksMessageCodec for StacksMessageID { } x if x == StacksMessageID::GetNakamotoInv as u8 => StacksMessageID::GetNakamotoInv, x if x == StacksMessageID::NakamotoInv as u8 => StacksMessageID::NakamotoInv, + x if x == StacksMessageID::NakamotoBlocks as u8 => StacksMessageID::NakamotoBlocks, _ => { return Err(codec_error::DeserializeError( "Unknown message ID".to_string(), @@ -1166,6 +1209,7 @@ impl StacksMessageCodec for StacksMessageType { StacksMessageType::StackerDBPushChunk(ref m) => write_next(fd, m)?, StacksMessageType::GetNakamotoInv(ref m) => write_next(fd, m)?, StacksMessageType::NakamotoInv(ref m) => write_next(fd, m)?, + StacksMessageType::NakamotoBlocks(ref m) => write_next(fd, m)?, } Ok(()) } @@ -1276,6 +1320,10 @@ impl StacksMessageCodec for StacksMessageType { let m: NakamotoInvData = read_next(fd)?; StacksMessageType::NakamotoInv(m) } + StacksMessageID::NakamotoBlocks => { + let m: NakamotoBlocksData = read_next(fd)?; + StacksMessageType::NakamotoBlocks(m) + } StacksMessageID::Reserved => { return Err(codec_error::DeserializeError( "Unsupported message ID 'reserved'".to_string(), From 4e2f82c4c81102fcae0386139c7de260f89a2397 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:24:14 -0400 Subject: [PATCH 0266/1400] chore: document all fault-injection flags, and expand the maximum number of buffered messages for each kind of buffered data message we support (including Nakamoto blocks) --- stackslib/src/net/connection.rs | 45 ++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 878ab04efb..06e3c54f85 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -381,6 +381,7 @@ pub struct ConnectionOptions { pub max_buffered_microblocks_available: u64, pub max_buffered_blocks: u64, pub max_buffered_microblocks: u64, + pub max_buffered_nakamoto_blocks: u64, /// how often to query a remote peer for its mempool, in seconds pub mempool_sync_interval: u64, /// how many transactions to ask for in a mempool query @@ -393,30 +394,55 @@ pub struct ConnectionOptions { pub socket_send_buffer_size: u32, /// whether or not to announce or accept neighbors that are behind private networks pub private_neighbors: bool, + /// maximum number of confirmations for a nakamoto block's sortition for which it will be + /// pushed + pub max_nakamoto_block_relay_age: u64, + /// The authorization token to enable the block proposal RPC endpoint + pub block_proposal_token: Option, // fault injection + /// Disable neighbor walk and discovery pub disable_neighbor_walk: bool, + /// Disable sharing neighbors to a remote requester pub disable_chat_neighbors: bool, + /// Disable block inventory sync state machine pub disable_inv_sync: bool, + /// Disable sending inventory messages to a remote requester pub disable_inv_chat: bool, + /// Disable block download state machine pub disable_block_download: bool, + /// Disable network pruning pub disable_network_prune: bool, + /// Disable banning misbehaving peers pub disable_network_bans: bool, + /// Disable block availability advertisement pub disable_block_advertisement: bool, + /// Disable block pushing pub disable_block_push: bool, + /// Disable microblock pushing pub disable_microblock_push: bool, + /// Disable walk pingbacks -- don't attempt to walk to a remote peer even if it contacted us + /// first pub disable_pingbacks: bool, + /// Disable walking to inbound neighbors pub disable_inbound_walks: bool, + /// Disable all attempts to learn our IP address pub disable_natpunch: bool, + /// Disable handshakes from inbound neighbors pub disable_inbound_handshakes: bool, + /// Disable getting chunks from StackerDB (e.g. to test push-only) pub disable_stackerdb_get_chunks: bool, + /// Unconditionally disconnect a peer after this amount of time pub force_disconnect_interval: Option, /// If set to true, this forces the p2p state machine to believe that it is running in /// the reward cycle in which Nakamoto activates, and thus needs to run both the epoch /// 2.x and Nakamoto state machines. pub force_nakamoto_epoch_transition: bool, - /// The authorization token to enable the block proposal RPC endpoint - pub block_proposal_token: Option, + + // test facilitation + /// Do not require that an unsolicited message originate from an authenticated, connected + /// neighbor + pub test_disable_unsolicited_message_authentication: bool, } impl std::default::Default for ConnectionOptions { @@ -481,16 +507,19 @@ impl std::default::Default for ConnectionOptions { max_microblock_push: 10, // maximum number of microblocks messages to push out via our anti-entropy protocol antientropy_retry: 60, // retry pushing data once every minute antientropy_public: true, // run antientropy even if we're NOT NAT'ed - max_buffered_blocks_available: 1, - max_buffered_microblocks_available: 1, - max_buffered_blocks: 1, - max_buffered_microblocks: 10, + max_buffered_blocks_available: 5, + max_buffered_microblocks_available: 5, + max_buffered_blocks: 5, + max_buffered_microblocks: 1024, + max_buffered_nakamoto_blocks: 1024, mempool_sync_interval: 30, // number of seconds in-between mempool sync mempool_max_tx_query: 128, // maximum number of transactions to visit per mempool query mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) socket_recv_buffer_size: 131072, // Linux default socket_send_buffer_size: 16384, // Linux default private_neighbors: true, + max_nakamoto_block_relay_age: 6, + block_proposal_token: None, // no faults on by default disable_neighbor_walk: false, @@ -510,7 +539,9 @@ impl std::default::Default for ConnectionOptions { disable_stackerdb_get_chunks: false, force_disconnect_interval: None, force_nakamoto_epoch_transition: false, - block_proposal_token: None, + + // no test facilitations on by default + test_disable_unsolicited_message_authentication: false, } } } From 58173a0ceaa30704126b622ccd2dd106f5925f28 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:24:45 -0400 Subject: [PATCH 0267/1400] chore: fault-injection for Nakamoto block download --- stackslib/src/net/download/nakamoto/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index ddef979681..b856afab44 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -217,6 +217,10 @@ impl PeerNetwork { chainstate: &StacksChainState, ibd: bool, ) -> Result>, NetError> { + if self.connection_opts.disable_block_download { + return Ok(HashMap::new()); + } + let res = self.sync_blocks_nakamoto(burnchain_height, sortdb, chainstate, ibd)?; let Some(mut block_downloader) = self.block_downloader_nakamoto.take() else { From 991487c15c3af5efc221e8eb9f74d2e5450715ae Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:25:00 -0400 Subject: [PATCH 0268/1400] chore: track pushed NakamotoBlocks in the NetworkResult struct, so they can be processed by the relayer --- stackslib/src/net/mod.rs | 53 +++++++++++++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bd064774c5..d7d0b663fb 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -137,6 +137,7 @@ pub mod relay; pub mod rpc; pub mod server; pub mod stackerdb; +pub mod unsolicited; pub use crate::net::neighbors::{NeighborComms, PeerNetworkComms}; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBs}; @@ -906,15 +907,24 @@ pub struct PoxInvData { pub pox_bitvec: Vec, // a bit will be '1' if the node knows for sure the status of its reward cycle's anchor block; 0 if not. } +/// Stacks epoch 2.x pushed block #[derive(Debug, Clone, PartialEq)] pub struct BlocksDatum(pub ConsensusHash, pub StacksBlock); -/// Blocks pushed +/// Stacks epoch 2.x blocks pushed #[derive(Debug, Clone, PartialEq)] pub struct BlocksData { pub blocks: Vec, } +/// Nakamoto epoch 3.x blocks pushed. +/// No need for a separate NakamotoBlocksDatum struct, because the consensus hashes that place this +/// block into the block stream are already embedded within the header +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoBlocksData { + pub blocks: Vec, +} + /// Microblocks pushed #[derive(Debug, Clone, PartialEq)] pub struct MicroblocksData { @@ -1138,6 +1148,7 @@ pub enum StacksMessageType { // Nakamoto-specific GetNakamotoInv(GetNakamotoInvData), NakamotoInv(NakamotoInvData), + NakamotoBlocks(NakamotoBlocksData), } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -1172,6 +1183,7 @@ pub enum StacksMessageID { // nakamoto GetNakamotoInv = 26, NakamotoInv = 27, + NakamotoBlocks = 28, // reserved Reserved = 255, } @@ -1263,11 +1275,16 @@ pub const GETPOXINV_MAX_BITLEN: u64 = 4096; #[cfg(test)] pub const GETPOXINV_MAX_BITLEN: u64 = 8; -// maximum number of blocks that can be pushed at once (even if the entire message is undersized). +// maximum number of Stacks epoch2.x blocks that can be pushed at once (even if the entire message is undersized). // This bound is needed since it bounds the amount of I/O a peer can be asked to do to validate the // message. pub const BLOCKS_PUSHED_MAX: u32 = 32; +// maximum number of Nakamoto blocks that can be pushed at once (even if the entire message is undersized). +// This bound is needed since it bounds the amount of I/O a peer can be asked to do to validate the +// message. +pub const NAKAMOTO_BLOCKS_PUSHED_MAX: u32 = 32; + /// neighbor identifier #[derive(Clone, Eq, PartialOrd, Ord)] pub struct NeighborKey { @@ -1423,6 +1440,7 @@ pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; /// Result of doing network work +#[derive(Clone)] pub struct NetworkResult { /// PoX ID as it was when we begin downloading blocks (set if we have downloaded new blocks) pub download_pox_id: Option, @@ -1440,6 +1458,8 @@ pub struct NetworkResult { pub pushed_blocks: HashMap>, /// all Stacks 2.x microblocks pushed to us, and the relay hints from the message pub pushed_microblocks: HashMap, MicroblocksData)>>, + /// all Stacks 3.x blocks pushed to us + pub pushed_nakamoto_blocks: HashMap, NakamotoBlocksData)>>, /// transactions sent to us by the http server pub uploaded_transactions: Vec, /// blocks sent to us via the http server @@ -1460,9 +1480,11 @@ pub struct NetworkResult { pub num_inv_sync_passes: u64, /// Number of times the Stacks 2.x block downloader has completed one pass pub num_download_passes: u64, + /// Number of connected peers + pub num_connected_peers: usize, /// The observed burnchain height pub burn_height: u64, - /// The consensus hash of the start of this reward cycle + /// The consensus hash of the burnchain tip (prefixed `rc_` for historical reasons) pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs pub stacker_db_configs: HashMap, @@ -1473,6 +1495,7 @@ impl NetworkResult { num_state_machine_passes: u64, num_inv_sync_passes: u64, num_download_passes: u64, + num_connected_peers: usize, burn_height: u64, rc_consensus_hash: ConsensusHash, stacker_db_configs: HashMap, @@ -1486,6 +1509,7 @@ impl NetworkResult { pushed_transactions: HashMap::new(), pushed_blocks: HashMap::new(), pushed_microblocks: HashMap::new(), + pushed_nakamoto_blocks: HashMap::new(), uploaded_transactions: vec![], uploaded_blocks: vec![], uploaded_microblocks: vec![], @@ -1496,6 +1520,7 @@ impl NetworkResult { num_state_machine_passes: num_state_machine_passes, num_inv_sync_passes: num_inv_sync_passes, num_download_passes: num_download_passes, + num_connected_peers, burn_height, rc_consensus_hash, stacker_db_configs, @@ -1513,7 +1538,7 @@ impl NetworkResult { } pub fn has_nakamoto_blocks(&self) -> bool { - self.nakamoto_blocks.len() > 0 + self.nakamoto_blocks.len() > 0 || self.pushed_nakamoto_blocks.len() > 0 } pub fn has_transactions(&self) -> bool { @@ -1555,7 +1580,7 @@ impl NetworkResult { pub fn consume_unsolicited( &mut self, unhandled_messages: HashMap>, - ) -> () { + ) { for (neighbor_key, messages) in unhandled_messages.into_iter() { for message in messages.into_iter() { match message.payload { @@ -1585,6 +1610,16 @@ impl NetworkResult { .insert(neighbor_key.clone(), vec![(message.relayers, tx_data)]); } } + StacksMessageType::NakamotoBlocks(block_data) => { + if let Some(nakamoto_blocks_msgs) = + self.pushed_nakamoto_blocks.get_mut(&neighbor_key) + { + nakamoto_blocks_msgs.push((message.relayers, block_data)); + } else { + self.pushed_nakamoto_blocks + .insert(neighbor_key.clone(), vec![(message.relayers, block_data)]); + } + } _ => { // forward along if let Some(messages) = self.unhandled_messages.get_mut(&neighbor_key) { @@ -2745,8 +2780,8 @@ pub mod test { &mut self, ibd: bool, dns_client: Option<&mut DNSClient>, - ) -> Result { - let mut net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; + ) -> Result<(NetworkResult, ProcessedNetReceipts), net_error> { + let net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; let mut sortdb = self.sortdb.take().unwrap(); let mut stacks_node = self.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); @@ -2754,7 +2789,7 @@ pub mod test { let receipts_res = self.relayer.process_network_result( self.network.get_local_peer(), - &mut net_result, + &mut net_result.clone(), &mut sortdb, &mut stacks_node.chainstate, &mut mempool, @@ -2772,7 +2807,7 @@ pub mod test { self.coord.handle_new_stacks_block().unwrap(); self.coord.handle_new_nakamoto_stacks_block().unwrap(); - receipts_res + receipts_res.and_then(|receipts| Ok((net_result, receipts))) } pub fn step_dns(&mut self, dns_client: &mut DNSClient) -> Result { From 175f5b80be45de5f1869109a4703d1f49c8e36dc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:25:20 -0400 Subject: [PATCH 0269/1400] chore: need Clone for StackerDBSyncResult (since we need it for NetworkResult) --- stackslib/src/net/stackerdb/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 24265410ee..b6c856a929 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -154,6 +154,7 @@ pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; pub const MINER_SLOT_COUNT: u32 = 1; /// Final result of synchronizing state with a remote set of DB replicas +#[derive(Clone)] pub struct StackerDBSyncResult { /// which contract this is a replica for pub contract_id: QualifiedContractIdentifier, From ad7f2555224f3493d746dbb77ea52d5936e2284e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 13:25:49 -0400 Subject: [PATCH 0270/1400] chore: relay test module --- stackslib/src/net/tests/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 82e1b8b814..57b58f2534 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -18,6 +18,7 @@ pub mod download; pub mod httpcore; pub mod inv; pub mod neighbors; +pub mod relay; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::PrincipalData; From f2e8b8841c2128c2cf9a47217428c8c218ea2ca0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 15:02:54 -0400 Subject: [PATCH 0271/1400] chore: fmt --- stackslib/src/net/relay.rs | 9 +++++++-- stackslib/src/net/tests/relay/epoch2x.rs | 6 ++++-- stackslib/src/net/tests/relay/nakamoto.rs | 7 ++----- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 75834cc9c8..a4b7389ad5 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2500,7 +2500,13 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, ) -> u64 { let mut num_new_nakamoto_blocks = 0; - match Self::process_new_nakamoto_blocks(network_result, burnchain, sortdb, chainstate, coord_comms) { + match Self::process_new_nakamoto_blocks( + network_result, + burnchain, + sortdb, + chainstate, + coord_comms, + ) { Ok((nakamoto_blocks_and_relayers, bad_neighbors)) => { num_new_nakamoto_blocks = nakamoto_blocks_and_relayers .iter() @@ -3109,4 +3115,3 @@ impl PeerNetwork { } } } - diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 817af95d72..1a383f7f87 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -2932,7 +2932,6 @@ fn process_new_blocks_rejects_problematic_asts() { ) .unwrap(); - // this tx would be problematic without our checks if let Err(ChainstateError::ProblematicTransaction(txid)) = StacksBlockBuilder::make_anchored_block_from_txs( @@ -2978,7 +2977,10 @@ fn process_new_blocks_rejects_problematic_asts() { bad_block.header.tx_merkle_root = merkle_tree.root(); chainstate - .reload_unconfirmed_state(&sortdb.index_handle(&tip.sortition_id), parent_index_hash.clone()) + .reload_unconfirmed_state( + &sortdb.index_handle(&tip.sortition_id), + parent_index_hash.clone(), + ) .unwrap(); // make a bad microblock diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index f99a2d4efa..bbabf6fc0d 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -37,13 +37,10 @@ use super::*; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::tests::TestMiner; use crate::chainstate::burn::operations::BlockstackOperationType; -use crate::chainstate::nakamoto::coordinator::tests::{ - make_token_transfer, -}; +use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::stacks::boot::test::{ - key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, - with_sortdb, + key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, }; use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; From a7c5a1f061a3abf9182f69b2e1d3070d15407405 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 6 Jun 2024 16:32:01 -0400 Subject: [PATCH 0272/1400] fix: verify on receipt of the blocks that it's signed by the signers -- both when we buffer it, and when we relay it --- stackslib/src/net/mod.rs | 4 ++ stackslib/src/net/relay.rs | 92 ++++++++++++++++++++++++++++---- stackslib/src/net/unsolicited.rs | 36 +++++++++++-- 3 files changed, 120 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 026b9e026f..e8e52fd137 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -285,6 +285,8 @@ pub enum Error { InvalidState, /// Waiting for DNS resolution WaitingForDNS, + /// No reward set for given reward cycle + NoPoXRewardSet(u64), } impl From for Error { @@ -433,6 +435,7 @@ impl fmt::Display for Error { Error::Http(e) => fmt::Display::fmt(&e, f), Error::InvalidState => write!(f, "Invalid state-machine state reached"), Error::WaitingForDNS => write!(f, "Waiting for DNS resolution"), + Error::NoPoXRewardSet(rc) => write!(f, "No PoX reward set for cycle {}", rc), } } } @@ -506,6 +509,7 @@ impl error::Error for Error { Error::Http(ref e) => Some(e), Error::InvalidState => None, Error::WaitingForDNS => None, + Error::NoPoXRewardSet(..) => None, } } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index a4b7389ad5..750fba7f6b 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -549,9 +549,15 @@ impl Relayer { /// Given Nakamoto blocks pushed to us, verify that they correspond to expected block data. pub fn validate_nakamoto_blocks_push( + burnchain: &Burnchain, conn: &SortitionDBConn, + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, nakamoto_blocks_data: &NakamotoBlocksData, ) -> Result<(), net_error> { + let mut loaded_reward_sets = HashMap::new(); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(conn)?; + for nakamoto_block in nakamoto_blocks_data.blocks.iter() { // is this the right Stacks block for this sortition? let Some(sn) = SortitionDB::get_block_snapshot_consensus( @@ -578,6 +584,71 @@ impl Relayer { ); return Err(net_error::InvalidMessage); } + + // is the block signed by the active reward set? + let sn_rc = burnchain + .pox_reward_cycle(sn.block_height) + .expect("FATAL: sortition has no reward cycle"); + let reward_cycle_info = if let Some(rc_info) = loaded_reward_sets.get(&sn_rc) { + rc_info + } else { + let Some((reward_set_info, _)) = load_nakamoto_reward_set( + sn_rc, + &tip_sn.sortition_id, + burnchain, + chainstate, + sortdb, + &OnChainRewardSetProvider::new(), + ) + .map_err(|e| { + error!( + "Failed to load reward cycle info for cycle {}: {:?}", + sn_rc, &e + ); + match e { + CoordinatorError::ChainstateError(e) => { + error!( + "No RewardCycleInfo loaded for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::ChainstateError(format!("{:?}", &e)) + } + CoordinatorError::DBError(e) => { + error!( + "No RewardCycleInfo loaded for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::DBError(e) + } + _ => { + error!( + "Failed to load RewardCycleInfo for tip {}: {:?}", + &sn.consensus_hash, &e + ); + net_error::NoPoXRewardSet(sn_rc) + } + } + })? + else { + error!("No reward set for reward cycle {}", &sn_rc); + return Err(net_error::NoPoXRewardSet(sn_rc)); + }; + + loaded_reward_sets.insert(sn_rc, reward_set_info); + loaded_reward_sets.get(&sn_rc).expect("FATAL: infallible") + }; + + let Some(reward_set) = reward_cycle_info.known_selected_anchor_block() else { + error!("No reward set for reward cycle {}", &sn_rc); + return Err(net_error::NoPoXRewardSet(sn_rc)); + }; + + if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { + info!( + "Signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e + ); + return Err(net_error::InvalidMessage); + } } Ok(()) } @@ -1467,21 +1538,25 @@ impl Relayer { for (neighbor_key, relayers_and_block_data) in network_result.pushed_nakamoto_blocks.iter() { for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter() { - let mut good = true; let mut accepted_blocks = vec![]; - if let Err(_e) = Relayer::validate_nakamoto_blocks_push( + if let Err(e) = Relayer::validate_nakamoto_blocks_push( + burnchain, &sortdb.index_conn(), + sortdb, + chainstate, nakamoto_blocks_data, ) { + info!( + "Failed to validate Nakamoto blocks pushed from {:?}: {:?}", + neighbor_key, &e + ); + // punish this peer bad_neighbors.push((*neighbor_key).clone()); - good = false; + break; } for nakamoto_block in nakamoto_blocks_data.blocks.iter() { - if !good { - break; - } let block_id = nakamoto_block.block_id(); debug!( "Received pushed Nakamoto block {} from {}", @@ -1513,7 +1588,6 @@ impl Relayer { Err(chainstate_error::InvalidStacksBlock(msg)) => { warn!("Invalid pushed Nakamoto block {}: {}", &block_id, msg); bad_neighbors.push((*neighbor_key).clone()); - good = false; break; } Err(e) => { @@ -1521,12 +1595,12 @@ impl Relayer { "Could not process pushed Nakamoto block {}: {:?}", &block_id, &e ); - good = false; break; } } } - if good && accepted_blocks.len() > 0 { + + if accepted_blocks.len() > 0 { new_blocks_and_relayers.push((relayers.clone(), accepted_blocks)); } } diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 29d9009f6f..cf7ef67089 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -711,7 +711,9 @@ impl PeerNetwork { Err(e) => { info!( "{:?}: Failed to query block snapshot for {}: {:?}", - &self.local_peer, &nakamoto_block.header.consensus_hash, &e + self.get_local_peer(), + &nakamoto_block.header.consensus_hash, + &e ); return false; } @@ -720,13 +722,41 @@ impl PeerNetwork { if !sn.pox_valid { info!( "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", - &self.local_peer, &nakamoto_block.header.consensus_hash + self.get_local_peer(), + &nakamoto_block.header.consensus_hash ); return false; } // block must be signed by reward set signers - // TODO + let sn_rc = self + .burnchain + .pox_reward_cycle(sn.block_height) + .expect("FATAL: sortition has no reward cycle"); + let Some(rc_data) = self.current_reward_sets.get(&sn_rc) else { + info!( + "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", + self.get_local_peer(), + &nakamoto_block.header.consensus_hash, + &nakamoto_block.header.block_hash() + ); + return false; + }; + let Some(reward_set) = rc_data.reward_set() else { + info!( + "{:?}: No reward set for reward cycle {}", + self.get_local_peer(), + sn_rc + ); + return false; + }; + + if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { + info!( + "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e + ); + return false; + } // the block is well-formed, but we'd buffer if we can't process it yet !can_process From e721bd53098b5088d97e2503c1dae7983f9f8dff Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 3 Jun 2024 16:24:45 -0500 Subject: [PATCH 0273/1400] feat: allow punishment in block commits, enforce via bitvec --- stacks-common/src/types/mod.rs | 16 + stackslib/src/burnchains/mod.rs | 7 + stackslib/src/burnchains/tests/burnchain.rs | 4 + stackslib/src/burnchains/tests/db.rs | 1 + .../src/chainstate/burn/db/processing.rs | 7 +- stackslib/src/chainstate/burn/db/sortdb.rs | 132 ++++--- stackslib/src/chainstate/burn/distribution.rs | 4 + .../burn/operations/leader_block_commit.rs | 333 ++++++++++++------ .../src/chainstate/burn/operations/mod.rs | 8 + stackslib/src/chainstate/burn/sortition.rs | 1 + stackslib/src/chainstate/coordinator/tests.rs | 4 + .../chainstate/nakamoto/coordinator/mod.rs | 14 + stackslib/src/chainstate/nakamoto/mod.rs | 130 ++++++- .../src/chainstate/nakamoto/tests/mod.rs | 1 + stackslib/src/chainstate/stacks/block.rs | 1 + .../src/burnchains/mocknet_controller.rs | 1 + testnet/stacks-node/src/chain_data.rs | 12 + .../stacks-node/src/nakamoto_node/miner.rs | 8 +- .../stacks-node/src/nakamoto_node/relayer.rs | 3 +- testnet/stacks-node/src/neon_node.rs | 1 + testnet/stacks-node/src/node.rs | 1 + testnet/stacks-node/src/tests/epoch_205.rs | 1 + testnet/stacks-node/src/tests/epoch_21.rs | 1 + .../src/tests/nakamoto_integrations.rs | 2 +- 24 files changed, 489 insertions(+), 204 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 9e0c8f40b9..05b17e05a5 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -121,6 +121,22 @@ impl StacksEpochId { } } + /// Whether or not this epoch supports the punishment of PoX reward + /// recipients using the bitvec scheme + pub fn allows_pox_punishment(&self) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 => true, + } + } + /// Does this epoch support unlocking PoX contributors that miss a slot? /// /// Epoch 2.0 - 2.05 didn't support this feature, but they weren't epoch-guarded on it. Instead, diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 23dc50f62c..6068e1c47d 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -693,6 +693,8 @@ pub enum Error { CoordinatorClosed, /// Graceful shutdown error ShutdownInitiated, + /// No epoch defined at that height + NoStacksEpoch, } impl fmt::Display for Error { @@ -718,6 +720,10 @@ impl fmt::Display for Error { ), Error::CoordinatorClosed => write!(f, "ChainsCoordinator channel hung up"), Error::ShutdownInitiated => write!(f, "Graceful shutdown was initiated"), + Error::NoStacksEpoch => write!( + f, + "No Stacks epoch is defined at the height being evaluated" + ), } } } @@ -741,6 +747,7 @@ impl error::Error for Error { Error::NonCanonicalPoxId(_, _) => None, Error::CoordinatorClosed => None, Error::ShutdownInitiated => None, + Error::NoStacksEpoch => None, } } } diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index e9a54bd041..a0f1bd8bac 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -153,6 +153,7 @@ fn test_process_block_ops() { let block_commit_1 = LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222").unwrap(), @@ -191,6 +192,7 @@ fn test_process_block_ops() { let block_commit_2 = LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222223").unwrap(), @@ -229,6 +231,7 @@ fn test_process_block_ops() { let block_commit_3 = LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222224").unwrap(), @@ -778,6 +781,7 @@ fn test_burn_snapshot_sequence() { if i > 0 { let next_block_commit = LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes(&vec![ i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 5a8d958f12..fb5d141430 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -515,6 +515,7 @@ pub fn make_simple_block_commit( let block_height = burn_header.block_height; let mut new_op = LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], block_header_hash: block_hash, new_seed: VRFSeed([1u8; 32]), parent_block_ptr: 0, diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index d6c33ab608..a3377f9d10 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -40,7 +40,7 @@ impl<'a> SortitionHandleTx<'a> { fn check_transaction( &mut self, burnchain: &Burnchain, - blockstack_op: &BlockstackOperationType, + blockstack_op: &mut BlockstackOperationType, reward_info: Option<&RewardSetInfo>, ) -> Result<(), BurnchainError> { match blockstack_op { @@ -53,7 +53,7 @@ impl<'a> SortitionHandleTx<'a> { BurnchainError::OpError(e) }) } - BlockstackOperationType::LeaderBlockCommit(ref op) => { + BlockstackOperationType::LeaderBlockCommit(ref mut op) => { op.check(burnchain, self, reward_info).map_err(|e| { warn!( "REJECTED({}) leader block commit {} at {},{} (parent {},{}): {:?}", @@ -259,7 +259,7 @@ impl<'a> SortitionHandleTx<'a> { let mut missed_block_commits = vec![]; // classify and check each transaction - blockstack_txs.retain(|blockstack_op| { + blockstack_txs.retain_mut(|blockstack_op| { match self.check_transaction(burnchain, blockstack_op, reward_set_info) { Ok(_) => true, Err(BurnchainError::OpError(OpError::MissedBlockCommit(missed_op))) => { @@ -404,6 +404,7 @@ mod tests { block_height: 102, burn_parent_modulus: (101 % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), + punished: vec![], }; let mut burnchain = Burnchain::default_unittest(100, &first_burn_hash); diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index a2a3c75c62..e4d2ac22f0 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -279,6 +279,14 @@ impl FromRow for LeaderBlockCommitOp { let burn_parent_modulus: u8 = row.get_unwrap("burn_parent_modulus"); + let punished_str: Option = row.get_unwrap("punished"); + let punished = punished_str + .as_deref() + .map(serde_json::from_str) + .transpose() + .map_err(|e| db_error::SerializationError(e))? + .unwrap_or_else(|| vec![]); + let block_commit = LeaderBlockCommitOp { block_header_hash, new_seed, @@ -298,6 +306,7 @@ impl FromRow for LeaderBlockCommitOp { vtxindex, block_height, burn_header_hash, + punished, }; Ok(block_commit) } @@ -505,7 +514,7 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: &'static str = "8"; +pub const SORTITION_DB_VERSION: &'static str = "9"; const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ r#" @@ -741,6 +750,9 @@ const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ );"#, ]; +static SORTITION_DB_SCHEMA_9: &[&'static str] = + &[r#"ALTER TABLE block_commits ADD punished TEXT DEFAULT NULL;"#]; + const LAST_SORTITION_DB_INDEX: &'static str = "index_block_commits_by_sender"; const SORTITION_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", @@ -1558,6 +1570,11 @@ impl<'a> SortitionHandleTx<'a> { reward_set_vrf_seed: &SortitionHash, next_pox_info: Option<&RewardCycleInfo>, ) -> Result, BurnchainError> { + let allow_nakamoto_punishment = SortitionDB::get_stacks_epoch(self.sqlite(), block_height)? + .ok_or_else(|| BurnchainError::NoStacksEpoch)? + .epoch_id + .allows_pox_punishment(); + if let Some(next_pox_info) = next_pox_info { if let PoxAnchorBlockStatus::SelectedAndKnown( ref anchor_block, @@ -1606,6 +1623,7 @@ impl<'a> SortitionHandleTx<'a> { (recipient, u16::try_from(ix).unwrap()) }) .collect(), + allow_nakamoto_punishment, })) } else { test_debug!( @@ -1640,6 +1658,7 @@ impl<'a> SortitionHandleTx<'a> { Ok(Some(RewardSetInfo { anchor_block, recipients, + allow_nakamoto_punishment, })) } } else { @@ -2795,6 +2814,7 @@ impl SortitionDB { SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; SortitionDB::apply_schema_7(&db_tx, epochs_ref)?; SortitionDB::apply_schema_8_tables(&db_tx, epochs_ref)?; + SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -3030,75 +3050,20 @@ impl SortitionDB { /// Is a particular database version supported by a given epoch? pub fn is_db_version_supported_in_epoch(epoch: StacksEpochId, version: &str) -> bool { + let version_u32: u32 = version.parse().unwrap_or_else(|e| { + error!("Failed to parse sortdb version as u32: {e}"); + 0 + }); match epoch { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => { - version == "1" - || version == "2" - || version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch2_05 => { - version == "2" - || version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch21 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch22 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch23 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch24 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch25 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } - StacksEpochId::Epoch30 => { - version == "3" - || version == "4" - || version == "5" - || version == "6" - || version == "7" - || version == "8" - } + StacksEpochId::Epoch20 => version_u32 >= 1, + StacksEpochId::Epoch2_05 => version_u32 >= 2, + StacksEpochId::Epoch21 => version_u32 >= 3, + StacksEpochId::Epoch22 => version_u32 >= 3, + StacksEpochId::Epoch23 => version_u32 >= 3, + StacksEpochId::Epoch24 => version_u32 >= 3, + StacksEpochId::Epoch25 => version_u32 >= 3, + StacksEpochId::Epoch30 => version_u32 >= 3, } } @@ -3338,6 +3303,22 @@ impl SortitionDB { Ok(()) } + #[cfg_attr(test, mutants::skip)] + fn apply_schema_9(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_9 { + tx.execute_batch(sql_exec)?; + } + + SortitionDB::validate_and_replace_epochs(&tx, epochs)?; + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["9"], + )?; + + Ok(()) + } + fn check_schema_version_or_error(&mut self) -> Result<(), db_error> { match SortitionDB::get_schema_version(self.conn()) { Ok(Some(version)) => { @@ -3398,6 +3379,10 @@ impl SortitionDB { tx.commit()?; self.apply_schema_8_migration(migrator.take())?; + } else if version == "8" { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_9(&tx.deref(), epochs)?; + tx.commit()?; } else if version == expected_version { let tx = self.tx_begin()?; SortitionDB::validate_and_replace_epochs(&tx, epochs)?; @@ -5756,10 +5741,11 @@ impl<'a> SortitionHandleTx<'a> { &block_commit.sunset_burn.to_string(), &apparent_sender_str, &block_commit.burn_parent_modulus, + &serde_json::to_string(&block_commit.punished).unwrap(), ]; - self.execute("INSERT INTO block_commits (txid, vtxindex, block_height, burn_header_hash, block_header_hash, new_seed, parent_block_ptr, parent_vtxindex, key_block_ptr, key_vtxindex, memo, burn_fee, input, sortition_id, commit_outs, sunset_burn, apparent_sender, burn_parent_modulus) \ - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18)", args)?; + self.execute("INSERT INTO block_commits (txid, vtxindex, block_height, burn_header_hash, block_header_hash, new_seed, parent_block_ptr, parent_vtxindex, key_block_ptr, key_vtxindex, memo, burn_fee, input, sortition_id, commit_outs, sunset_burn, apparent_sender, burn_parent_modulus, punished) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19)", args)?; let parent_args: &[&dyn ToSql] = &[sort_id, &block_commit.txid, &parent_sortition_id]; @@ -7151,6 +7137,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), + punished: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); @@ -7869,6 +7856,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), + punished: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); @@ -10085,6 +10073,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), + punished: vec![], }; // descends from genesis @@ -10127,6 +10116,7 @@ pub mod tests { block_height: block_height + 3, burn_parent_modulus: ((block_height + 2) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x04; 32]), + punished: vec![], }; // descends from block_commit_1 @@ -10169,6 +10159,7 @@ pub mod tests { block_height: block_height + 4, burn_parent_modulus: ((block_height + 3) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x05; 32]), + punished: vec![], }; // descends from genesis_block_commit @@ -10211,6 +10202,7 @@ pub mod tests { block_height: block_height + 5, burn_parent_modulus: ((block_height + 4) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x06; 32]), + punished: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index 8687de754d..c6e7831670 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -512,6 +512,7 @@ mod tests { let input_txid = Txid(input_txid); LeaderBlockCommitOp { + punished: vec![], block_header_hash: BlockHeaderHash(block_header_hash), new_seed: VRFSeed([0; 32]), parent_block_ptr: (block_id - 1) as u32, @@ -884,6 +885,7 @@ mod tests { }; let block_commit_1 = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222") @@ -929,6 +931,7 @@ mod tests { }; let block_commit_2 = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222223") @@ -974,6 +977,7 @@ mod tests { }; let block_commit_3 = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222224") diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 539181f9af..30e6e8ff97 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -59,6 +59,33 @@ struct ParsedData { memo: u8, } +/// This struct captures how a particular +/// PoxAddress was treated by a given block commit. +#[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] +pub enum Treatment { + Reward(PoxAddress), + Punish(PoxAddress), +} + +impl Treatment { + pub fn is_reward(&self) -> bool { + matches!(self, Treatment::Reward(_)) + } + pub fn is_punish(&self) -> bool { + matches!(self, Treatment::Punish(_)) + } +} + +impl std::ops::Deref for Treatment { + type Target = PoxAddress; + + fn deref(&self) -> &Self::Target { + match self { + Treatment::Reward(ref a) | Treatment::Punish(ref a) => a, + } + } +} + pub static OUTPUTS_PER_COMMIT: usize = 2; pub static BURN_BLOCK_MINED_AT_MODULUS: u64 = 5; @@ -100,6 +127,7 @@ impl LeaderBlockCommitOp { txid: Txid([0u8; 32]), vtxindex: 0, burn_header_hash: BurnchainHeaderHash::zero(), + punished: vec![], } } @@ -138,6 +166,7 @@ impl LeaderBlockCommitOp { - 1, burn_header_hash: BurnchainHeaderHash::zero(), + punished: vec![], } } @@ -425,6 +454,7 @@ impl LeaderBlockCommitOp { input, apparent_sender, + punished: Vec::new(), txid: tx.txid(), vtxindex: tx.vtxindex(), block_height: block_height, @@ -488,6 +518,7 @@ impl StacksMessageCodec for LeaderBlockCommitOp { pub struct RewardSetInfo { pub anchor_block: BlockHeaderHash, pub recipients: Vec<(PoxAddress, u16)>, + pub allow_nakamoto_punishment: bool, } #[derive(Debug, Clone)] @@ -518,8 +549,18 @@ impl RewardSetInfo { .get_reward_set_payouts_at(&intended_sortition) .map_err(|_e| op_error::BlockCommitBadOutputs)? .0; + let block_height = SortitionDB::get_block_snapshot(tx.tx(), intended_sortition) + .map_err(|_e| op_error::BlockCommitBadOutputs)? + .ok_or_else(|| op_error::BlockCommitBadOutputs)? + .block_height; + let allow_nakamoto_punishment = SortitionDB::get_stacks_epoch(tx.sqlite(), block_height) + .map_err(|_e| op_error::BlockCommitBadOutputs)? + .ok_or_else(|| op_error::BlockCommitBadOutputs)? + .epoch_id + .allows_pox_punishment(); Ok(tx.get_last_anchor_block_hash()?.map(|bhh| RewardSetInfo { + allow_nakamoto_punishment, anchor_block: bhh, recipients: intended_recipients .into_iter() @@ -563,7 +604,7 @@ impl LeaderBlockCommitOp { burnchain: &Burnchain, tx: &mut SortitionHandleTx, reward_set_info: Option<&RewardSetInfo>, - ) -> Result<(), op_error> { + ) -> Result, op_error> { let parent_block_height = u64::from(self.parent_block_ptr); if PoxConstants::has_pox_sunset(epoch_id) { @@ -595,110 +636,155 @@ impl LeaderBlockCommitOp { // the commit outputs must = the expected set of commit outputs. // * otherwise, the commit outputs must be burn outputs. ///////////////////////////////////////////////////////////////////////////////////// - if let Some(reward_set_info) = reward_set_info { - // we do some check-inversion here so that we check the commit_outs _before_ - // we check whether or not the block is descended from the anchor. - // we do this because the descended_from check isn't particularly cheap, so - // we want to make sure that any TX that forces us to perform the check - // has either burned BTC or sent BTC to the PoX recipients - - // if we're in the prepare phase, then this block-commit _must_ burn. - // No PoX descent check needs to be performed -- prepare-phase block commits - // stand alone. - if burnchain.is_in_prepare_phase(self.block_height) { - if let Err(e) = self.check_prepare_commit_burn() { - warn!("Invalid block commit: in block {} which is in the prepare phase, but did not burn to a single output as expected ({:?})", self.block_height, &e); - return Err(op_error::BlockCommitBadOutputs); - } - } else { - // Not in prepare phase, so this can be either PoB or PoX (a descent check from the - // anchor block will be necessary if the block-commit is well-formed). - // - // first, handle a corner case: - // all of the commitment outputs are _burns_ - // _and_ the reward set chose two burn addresses as reward addresses. - // then, don't need to do a pox descendant check. - let recipient_set_all_burns = reward_set_info + let Some(reward_set_info) = reward_set_info else { + // no recipient info for this sortition, so expect all burns + if !self.all_outputs_burn() { + warn!("Invalid block commit: this transaction should only have burn outputs."); + return Err(op_error::BlockCommitBadOutputs); + } + return Ok(vec![]); + }; + + // we do some check-inversion here so that we check the commit_outs _before_ + // we check whether or not the block is descended from the anchor. + // we do this because the descended_from check isn't particularly cheap, so + // we want to make sure that any TX that forces us to perform the check + // has either burned BTC or sent BTC to the PoX recipients + + // if we're in the prepare phase, then this block-commit _must_ burn. + // No PoX descent check needs to be performed -- prepare-phase block commits + // stand alone. + if burnchain.is_in_prepare_phase(self.block_height) { + if let Err(e) = self.check_prepare_commit_burn() { + warn!("Invalid block commit: in block {} which is in the prepare phase, but did not burn to a single output as expected ({:?})", self.block_height, &e); + return Err(op_error::BlockCommitBadOutputs); + } + return Ok(vec![]); + } + + // Not in prepare phase, so this can be either PoB or PoX (a descent check from the + // anchor block will be necessary if the block-commit is well-formed). + // + // first, handle a corner case: + // all of the commitment outputs are _burns_ + // _and_ the reward set chose two burn addresses as reward addresses. + // then, don't need to do a pox descendant check. + let recipient_set_all_burns = reward_set_info + .recipients + .iter() + .fold(true, |prior_is_burn, (addr, ..)| { + prior_is_burn && addr.is_burn() + }); + + if recipient_set_all_burns { + if !self.all_outputs_burn() { + warn!("Invalid block commit: recipient set should be all burns"); + return Err(op_error::BlockCommitBadOutputs); + } + return Ok(vec![]); + } + + // Now, we are checking the reward sets match, and if they don't, + // whether or not pox descendant is necessary + + let descended_from_anchor = tx.descended_from(parent_block_height, &reward_set_info.anchor_block) + .map_err(|e| { + error!("Failed to check whether parent (height={}) is descendent of anchor block={}: {}", + parent_block_height, &reward_set_info.anchor_block, e); + op_error::BlockCommitAnchorCheck + })?; + + if self.all_outputs_burn() { + // If we're not descended from the anchor, then great, this is just a "normal" non-descendant burn commit + // But, if we are descended from the anchor and nakamoto pox punishments are allowed, this commit may have + // been a double punishment + if !descended_from_anchor { + return Ok(vec![]); + } + if reward_set_info.allow_nakamoto_punishment { + // all non-burn recipients were punished -- when we do the block processing + // enforcement check, "burn recipients" can be treated as 1 or a 0 in the + // bitvec interchangeably (whether they are punished or not doesn't matter). + let punished = reward_set_info .recipients .iter() - .fold(true, |prior_is_burn, (addr, ..)| { - prior_is_burn && addr.is_burn() - }); - - if recipient_set_all_burns { - if !self.all_outputs_burn() { - warn!("Invalid block commit: recipient set should be all burns"); - return Err(op_error::BlockCommitBadOutputs); - } - } else { - let expect_pox_descendant = if self.all_outputs_burn() { - false - } else { - let mut check_recipients: Vec<_> = reward_set_info - .recipients - .iter() - .map(|(addr, ..)| addr.clone()) - .collect(); - - if check_recipients.len() == 1 { - // If the number of recipients in the set was odd, we need to pad - // with a burn address. - // NOTE: this used the old burnchain.is_mainnet() code, which always - // returns false - check_recipients.push(PoxAddress::standard_burn_address(false)) - } + .map(|(addr, _)| Treatment::Punish(addr.clone())) + .collect(); + return Ok(punished); + } else { + warn!( + "Invalid block commit: descended from PoX anchor {}, but used burn outputs", + &reward_set_info.anchor_block + ); + return Err(op_error::BlockCommitBadOutputs); + } + } else { + let mut check_recipients: Vec<_> = reward_set_info + .recipients + .iter() + .map(|(addr, ix)| (addr.clone(), *ix)) + .collect(); - if self.commit_outs.len() != check_recipients.len() { - warn!( - "Invalid block commit: expected {} PoX transfers, but commit has {}", - reward_set_info.recipients.len(), - self.commit_outs.len() - ); - return Err(op_error::BlockCommitBadOutputs); - } + if check_recipients.len() == 1 { + // If the number of recipients in the set was odd, we need to pad + // with a burn address. + // NOTE: this used the old burnchain.is_mainnet() code, which always + // returns false + check_recipients.push((PoxAddress::standard_burn_address(false), 0)) + } - // sort check_recipients and commit_outs so that we can perform an - // iterative equality check - check_recipients.sort(); - let mut commit_outs = self.commit_outs.clone(); - commit_outs.sort(); - for (found_commit, expected_commit) in - commit_outs.iter().zip(check_recipients) - { - if expected_commit.to_burnchain_repr() - != found_commit.to_burnchain_repr() - { - warn!("Invalid block commit: committed output {} does not match expected {}", - found_commit.to_burnchain_repr(), expected_commit.to_burnchain_repr()); - return Err(op_error::BlockCommitBadOutputs); - } - } - true - }; + if self.commit_outs.len() != check_recipients.len() { + warn!( + "Invalid block commit: expected {} PoX transfers, but commit has {}", + reward_set_info.recipients.len(), + self.commit_outs.len() + ); + return Err(op_error::BlockCommitBadOutputs); + } - let descended_from_anchor = tx.descended_from(parent_block_height, &reward_set_info.anchor_block) - .map_err(|e| { - error!("Failed to check whether parent (height={}) is descendent of anchor block={}: {}", - parent_block_height, &reward_set_info.anchor_block, e); - op_error::BlockCommitAnchorCheck})?; - if descended_from_anchor != expect_pox_descendant { - if descended_from_anchor { - warn!("Invalid block commit: descended from PoX anchor {}, but used burn outputs", &reward_set_info.anchor_block); - } else { - warn!("Invalid block commit: not descended from PoX anchor {}, but used PoX outputs", &reward_set_info.anchor_block); - } + // we've checked length equality, so we can just iterate through + // self.commit_outs and check if each is in `check_recipients` + // *OR* if `allows_pox_punishment`, then it could be a burn. + // NOTE: we do a find and remove here so that the same recipient + // isn't found multiple times by different commit_outs. + let mut rewarded = vec![]; + for self_commit in self.commit_outs.iter() { + let search_predicate = self_commit.to_burnchain_repr(); + let found = check_recipients + .iter() + .enumerate() + .find(|(_, (check_commit, _))| { + search_predicate == check_commit.to_burnchain_repr() + }); + if let Some((index, _)) = found { + rewarded.push(Treatment::Reward(check_recipients.remove(index).0)); + } else { + // if we didn't find the pox output, then maybe its a pox punishment? + if reward_set_info.allow_nakamoto_punishment && self_commit.is_burn() { + continue; + } else { + warn!("Invalid block commit: committed output {} does not match expected recipient set: {:?}", + self_commit.to_burnchain_repr(), check_recipients); return Err(op_error::BlockCommitBadOutputs); } - } + }; } - } else { - // no recipient info for this sortition, so expect all burns - if !self.all_outputs_burn() { - warn!("Invalid block commit: this transaction should only have burn outputs."); + + if !descended_from_anchor { + warn!( + "Invalid block commit: not descended from PoX anchor {}, but used PoX outputs", + &reward_set_info.anchor_block + ); return Err(op_error::BlockCommitBadOutputs); } - }; - Ok(()) + + let mut punished_outputs: Vec<_> = check_recipients + .into_iter() + .map(|x| Treatment::Punish(x.0)) + .collect(); + punished_outputs.extend(rewarded); + return Ok(punished_outputs); + } } fn check_single_burn_output(&self) -> Result<(), op_error> { @@ -962,8 +1048,9 @@ impl LeaderBlockCommitOp { Ok(()) } + /// Returns Ok() and a vector of PoxAddresses which were punished by this op pub fn check( - &self, + &mut self, burnchain: &Burnchain, tx: &mut SortitionHandleTx, reward_set_info: Option<&RewardSetInfo>, @@ -1017,7 +1104,7 @@ impl LeaderBlockCommitOp { return Err(op_error::MissedBlockCommit(missed_data)); } - if burnchain + let punished = if burnchain .pox_constants .is_after_pox_sunset_end(self.block_height, epoch.epoch_id) { @@ -1027,6 +1114,7 @@ impl LeaderBlockCommitOp { "apparent_sender" => %apparent_sender_repr); e })?; + vec![] } else { // either in epoch 2.1, or the PoX sunset hasn't completed yet self.check_pox(epoch.epoch_id, burnchain, tx, reward_set_info) @@ -1034,11 +1122,15 @@ impl LeaderBlockCommitOp { warn!("Invalid block-commit: bad PoX: {:?}", &e; "apparent_sender" => %apparent_sender_repr); e - })?; - } + })? + }; self.check_common(epoch.epoch_id, tx)?; + if reward_set_info.is_some_and(|r| r.allow_nakamoto_punishment) { + self.punished = punished; + } + // good to go! Ok(()) } @@ -1662,7 +1754,7 @@ mod tests { block_height: block_height, burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: burn_header_hash, - }) + punished: vec![], }) }, OpFixture { // invalid -- wrong opcode @@ -1896,6 +1988,7 @@ mod tests { commit_outs: vec![], burn_fee: 12345, + punished: vec![], input: (Txid([0; 32]), 0), apparent_sender: BurnchainSigner::mock_parts( AddressHashMode::SerializeP2PKH, @@ -2025,11 +2118,12 @@ mod tests { prev_snapshot.index_root.clone() }; - let fixtures = vec![ + let mut fixtures = vec![ CheckFixture { // accept -- consumes leader_key_2 op: LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2079,6 +2173,7 @@ mod tests { CheckFixture { // accept -- builds directly off of genesis block and consumes leader_key_2 op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2129,6 +2224,7 @@ mod tests { CheckFixture { // accept -- also consumes leader_key_1 op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2179,6 +2275,7 @@ mod tests { CheckFixture { // reject -- bad burn parent modulus op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2241,6 +2338,7 @@ mod tests { CheckFixture { // reject -- bad burn parent modulus op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2292,7 +2390,7 @@ mod tests { }, ]; - for (ix, fixture) in fixtures.iter().enumerate() { + for (ix, fixture) in fixtures.iter_mut().enumerate() { eprintln!("Processing {}", ix); let header = BurnchainBlockHeader { block_height: fixture.op.block_height, @@ -2412,6 +2510,7 @@ mod tests { // consumes leader_key_1 let block_commit_1 = LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222") .unwrap(), @@ -2556,10 +2655,11 @@ mod tests { let block_height = 124; - let fixtures = vec![ + let mut fixtures = vec![ CheckFixture { // reject -- predates start block op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2611,6 +2711,7 @@ mod tests { // reject -- no such leader key op: LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2660,6 +2761,7 @@ mod tests { CheckFixture { // reject -- previous block must exist op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2710,6 +2812,7 @@ mod tests { CheckFixture { // reject -- previous block must exist in a different block op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2763,6 +2866,7 @@ mod tests { // here) op: LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2812,6 +2916,7 @@ mod tests { CheckFixture { // reject -- fee is 0 op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2862,6 +2967,7 @@ mod tests { CheckFixture { // accept -- consumes leader_key_2 op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2912,6 +3018,7 @@ mod tests { CheckFixture { // accept -- builds directly off of genesis block and consumes leader_key_2 op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2962,6 +3069,7 @@ mod tests { CheckFixture { // accept -- also consumes leader_key_1 op: LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -3011,7 +3119,7 @@ mod tests { }, ]; - for (ix, fixture) in fixtures.iter().enumerate() { + for (ix, fixture) in fixtures.iter_mut().enumerate() { eprintln!("Processing {}", ix); let header = BurnchainBlockHeader { block_height: fixture.op.block_height, @@ -3122,6 +3230,7 @@ mod tests { }; let block_commit_pre_2_05 = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x02; 32]), new_seed: VRFSeed([0x03; 32]), @@ -3151,6 +3260,7 @@ mod tests { }; let block_commit_post_2_05_valid = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3180,6 +3290,7 @@ mod tests { }; let block_commit_post_2_05_valid_bigger_epoch = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3209,6 +3320,7 @@ mod tests { }; let block_commit_post_2_05_invalid_bad_memo = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x04; 32]), new_seed: VRFSeed([0x05; 32]), @@ -3238,6 +3350,7 @@ mod tests { }; let block_commit_post_2_05_invalid_no_memo = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x05; 32]), new_seed: VRFSeed([0x06; 32]), @@ -3267,6 +3380,7 @@ mod tests { }; let block_commit_post_2_1_valid = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3296,6 +3410,7 @@ mod tests { }; let block_commit_post_2_1_valid_bigger_epoch = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3325,6 +3440,7 @@ mod tests { }; let block_commit_post_2_1_invalid_bad_memo = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x04; 32]), new_seed: VRFSeed([0x05; 32]), @@ -3354,6 +3470,7 @@ mod tests { }; let block_commit_post_2_1_invalid_no_memo = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x05; 32]), new_seed: VRFSeed([0x06; 32]), @@ -3384,7 +3501,7 @@ mod tests { let all_leader_key_ops = vec![leader_key]; - let all_block_commit_ops = vec![ + let mut all_block_commit_ops = vec![ (block_commit_pre_2_05, true), (block_commit_post_2_05_valid, true), (block_commit_post_2_05_valid_bigger_epoch, true), @@ -3417,12 +3534,12 @@ mod tests { eprintln!("Tip sortition is {}", &tip.sortition_id); let mut ic = SortitionHandleTx::begin(&mut db, &tip.sortition_id).unwrap(); - for (op, pass) in all_block_commit_ops.iter() { + for (op, pass) in all_block_commit_ops.iter_mut() { if op.block_height == i + 1 { match op.check(&burnchain, &mut ic, None) { Ok(_) => { assert!( - pass, + *pass, "Check succeeded when it should have failed: {:?}", &op ); @@ -3431,7 +3548,7 @@ mod tests { } Err(op_error::BlockCommitBadEpoch) => { assert!( - !pass, + !*pass, "Check failed when it should have succeeded: {:?}", &op ); diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 5417a3a7c9..597db3de49 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -27,6 +27,7 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::VRFPublicKey; +use self::leader_block_commit::Treatment; use crate::burnchains::{ Address, Burnchain, BurnchainBlockHeader, BurnchainRecipient, BurnchainSigner, BurnchainTransaction, Error as BurnchainError, PublicKey, Txid, @@ -242,6 +243,13 @@ pub struct LeaderBlockCommitOp { /// PoX/Burn outputs pub commit_outs: Vec, + + /// If this block commit punished one or both of its PoX recipients, + /// they will be in this vector. + /// + /// This value is set by the check() call. + pub punished: Vec, + // PoX sunset burn pub sunset_burn: u64, diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index 9f3bc5d5ea..67362fc3b9 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -1119,6 +1119,7 @@ mod test { block_height: header.block_height, burn_parent_modulus: (i % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: header.block_hash.clone(), + punished: vec![], }; let tip = SortitionDB::get_canonical_burn_chain_tip(db.conn()).unwrap(); diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index a76f047725..3479c82a23 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -698,6 +698,7 @@ fn make_genesis_block_with_recipients( let commit_op = LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], block_header_hash: block.block_hash(), burn_fee: my_burn, input: (Txid([0; 32]), 0), @@ -970,6 +971,7 @@ fn make_stacks_block_with_input( let commit_op = LeaderBlockCommitOp { sunset_burn, + punished: vec![], block_header_hash: block.block_hash(), burn_fee: my_burn, input, @@ -2408,6 +2410,7 @@ fn test_sortition_with_reward_set() { let bad_block_recipients = Some(RewardSetInfo { anchor_block: BlockHeaderHash([0; 32]), recipients, + allow_nakamoto_punishment: false, }); let (bad_outs_op, _) = make_stacks_block_with_recipients( &sort_db, @@ -2653,6 +2656,7 @@ fn test_sortition_with_burner_reward_set() { let bad_block_recipients = Some(RewardSetInfo { anchor_block: BlockHeaderHash([0; 32]), recipients, + allow_nakamoto_punishment: false, }); let (bad_outs_op, _) = make_stacks_block_with_recipients( &sort_db, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index efc7a7b590..ab324012c8 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -90,6 +90,20 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { let cycle = burnchain .block_height_to_reward_cycle(cycle_start_burn_height) .expect("FATAL: no reward cycle for burn height"); + self.read_reward_set_nakamoto_of_cycle(cycle, chainstate, sortdb, block_id, debug_log) + } + /// Read a reward_set written while updating .signers at a given cycle_id + /// `debug_log` should be set to true if the reward set loading should + /// log messages as `debug!` instead of `error!` or `info!`. This allows + /// RPC endpoints to expose this without flooding loggers. + pub fn read_reward_set_nakamoto_of_cycle( + &self, + cycle: u64, + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + debug_log: bool, + ) -> Result { // figure out the block ID let Some(coinbase_height_of_calculation) = chainstate .eval_boot_code_read_only( diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 50015dace4..74b571d0ba 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -77,11 +77,13 @@ use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::operations::{LeaderBlockCommitOp, LeaderKeyRegisterOp}; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; -use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; +use crate::chainstate::coordinator::{BlockEventDispatcher, Error, OnChainRewardSetProvider}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::tenure::{NAKAMOTO_TENURES_SCHEMA_1, NAKAMOTO_TENURES_SCHEMA_2}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{POX_4_NAME, SIGNERS_UPDATE_STATE}; +use crate::chainstate::stacks::db::blocks::DummyEventDispatcher; use crate::chainstate::stacks::db::{DBConfig as ChainstateConfig, StacksChainState}; use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::{ @@ -1417,6 +1419,33 @@ impl NakamotoChainState { "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash ); + let elected_height = sort_db + .get_consensus_hash_height(&next_ready_block.header.consensus_hash)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + let elected_in_cycle = sort_db + .pox_constants + .block_height_to_reward_cycle(sort_db.first_block_height, elected_height) + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock( + "Elected in block height before first_block_height".into(), + ) + })?; + let active_reward_set = OnChainRewardSetProvider::(None).read_reward_set_nakamoto_of_cycle( + elected_in_cycle, + stacks_chain_state, + sort_db, + &next_ready_block.header.parent_block_id, + false, + ).map_err(|e| { + warn!( + "Cannot process Nakamoto block: could not load reward set that elected the block"; + "err" => ?e, + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "parent_block_id" => %next_ready_block.header.parent_block_id, + ); + ChainstateError::NoSuchBlockError + })?; let (mut chainstate_tx, clarity_instance) = stacks_chain_state.chainstate_tx_begin()?; // find parent header @@ -1583,6 +1612,7 @@ impl NakamotoChainState { block_size, commit_burn, sortition_burn, + &active_reward_set, ) { Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), Err(e) => (None, Some(e)), @@ -2899,6 +2929,7 @@ impl NakamotoChainState { block_size: u64, burnchain_commit_burn: u64, burnchain_sortition_burn: u64, + active_reward_set: &RewardSet, ) -> Result< ( StacksEpochReceipt, @@ -3034,24 +3065,95 @@ impl NakamotoChainState { )); } + // this block's bitvec header must match the miner's block commit punishments + let tenure_block_commit = SortitionDB::get_block_commit( + burn_dbconn.conn(), + &tenure_block_snapshot.winning_block_txid, + &tenure_block_snapshot.sortition_id, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: has no block-commit in its sortition"; + "block_id" => %block.header.block_id(), + "sortition_id" => %tenure_block_snapshot.sortition_id, + "block_commit_txid" => %tenure_block_snapshot.winning_block_txid); + ChainstateError::NoSuchBlockError + })?; + + if !tenure_block_commit.punished.is_empty() { + // our block commit issued a punishment, check the reward set and bitvector + // to ensure that this was valid. + for treated_addr in tenure_block_commit.punished.iter() { + if treated_addr.is_burn() { + // Don't need to assert anything about burn addresses. + // If they were in the reward set, "punishing" them is meaningless. + continue; + } + // otherwise, we need to find the indices in the rewarded_addresses + // corresponding to this address. + let address_indices = active_reward_set + .rewarded_addresses + .iter() + .enumerate() + .filter_map(|(ix, addr)| { + if addr == treated_addr.deref() { + Some(ix) + } else { + None + } + }); + // if any of them are 0, punishment is okay. + // if all of them are 1, punishment is not okay. + // if all of them are 0, *must* have punished + let bitvec_values: Result, ChainstateError> = address_indices + .map( + |ix| { + let ix = u16::try_from(ix) + .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; + let bitvec_value = block.header.signer_bitvec.get(ix) + .unwrap_or_else(|| { + info!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); + true + }); + Ok(bitvec_value) + } + ) + .collect(); + let bitvec_values = bitvec_values?; + let all_1 = bitvec_values.iter().all(|x| *x); + let all_0 = bitvec_values.iter().all(|x| !x); + if all_1 { + if treated_addr.is_punish() { + warn!( + "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + "block_id" => %block.header.block_id(), + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); + } + } else if all_0 { + if treated_addr.is_reward() { + warn!( + "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + "block_id" => %block.header.block_id(), + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); + } + } + } + } + // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start // block. // (note that we can't check this earlier, since we need the parent tenure to have been // processed) if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { - let tenure_block_commit = SortitionDB::get_block_commit( - burn_dbconn.conn(), - &tenure_block_snapshot.winning_block_txid, - &tenure_block_snapshot.sortition_id, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: has no block-commit in its sortition"; - "block_id" => %block.header.block_id(), - "sortition_id" => %tenure_block_snapshot.sortition_id, - "block_commit_txid" => %tenure_block_snapshot.winning_block_txid); - ChainstateError::NoSuchBlockError - })?; - let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header(chainstate_tx.tx(), &parent_ch)? .ok_or_else(|| { diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 0a2441d388..0e02ad1ac9 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2046,6 +2046,7 @@ fn test_make_miners_stackerdb_config() { block_height: snapshot.block_height, burn_parent_modulus: ((snapshot.block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: snapshot.burn_header_hash.clone(), + punished: vec![], }; let winning_ops = if i == 0 { diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 9827d28e9c..19e8990cea 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -1311,6 +1311,7 @@ mod test { let mut block_commit = LeaderBlockCommitOp { sunset_burn: 0, + punished: vec![], block_header_hash: header.block_hash(), new_seed: VRFSeed::from_proof(&header.proof), parent_block_ptr: 0, diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 3db583aa48..1e2dba0ef9 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -199,6 +199,7 @@ impl BurnchainController for MocknetController { } BlockstackOperationType::LeaderBlockCommit(payload) => { BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: payload.block_header_hash, new_seed: payload.new_seed, diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index 0fcc7ca863..c761c639e7 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -277,6 +277,7 @@ impl MinerStats { // mocked commit let mocked_commit = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash(DEADBEEF.clone()), new_seed: VRFSeed(DEADBEEF.clone()), @@ -441,6 +442,7 @@ impl MinerStats { for (miner, last_commit) in active_miners_and_commits.iter() { if !commit_table.contains_key(miner) { let mocked_commit = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash(DEADBEEF.clone()), new_seed: VRFSeed(DEADBEEF.clone()), @@ -550,6 +552,7 @@ pub mod tests { #[test] fn test_burn_dist_to_prob_dist() { let block_commit_1 = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -582,6 +585,7 @@ pub mod tests { }; let block_commit_2 = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -617,6 +621,7 @@ pub mod tests { }; let block_commit_3 = LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -817,6 +822,7 @@ EOF ( "miner-1".to_string(), LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -848,6 +854,7 @@ EOF ( "miner-2".to_string(), LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -882,6 +889,7 @@ EOF ( "miner-3".to_string(), LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -916,6 +924,7 @@ EOF let unconfirmed_block_commits = vec![ LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -942,6 +951,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -968,6 +978,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -994,6 +1005,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a810a4fbf0..743200b666 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -937,13 +937,7 @@ impl BlockMinerThread { "Current reward cycle did not select a reward set. Cannot mine!".into(), )); }; - let signer_bitvec_len = reward_set - .signers - .as_ref() - .map(|x| x.len()) - .unwrap_or(0) - .try_into() - .ok(); + let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a839374b13..9399e7da5d 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -44,7 +44,7 @@ use stacks::net::db::LocalPeer; use stacks::net::relay::Relayer; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed, }; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; @@ -507,6 +507,7 @@ impl RelayerThread { .get_active() .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?; let op = LeaderBlockCommitOp { + punished: vec![], sunset_burn, block_header_hash: BlockHeaderHash(parent_block_id.0), burn_fee: rest_commit, diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 41ac96b50f..d761d473a6 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1113,6 +1113,7 @@ impl BlockMinerThread { let burn_parent_modulus = (current_burn_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; let sender = self.keychain.get_burnchain_signer(); BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + punished: vec![], sunset_burn, block_header_hash, burn_fee, diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 4c1d8d39cb..88037af71f 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -1035,6 +1035,7 @@ impl Node { let txid = Txid(txid_bytes); BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash, burn_fee, diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 0b363081e0..231a10074d 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -602,6 +602,7 @@ fn transition_empty_blocks() { let burn_parent_modulus = (tip_info.burn_block_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; let op = BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + punished: vec![], sunset_burn, block_header_hash: BlockHeaderHash([0xff; 32]), burn_fee: rest_commit, diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index ea9fe27569..4f7fc0d059 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1928,6 +1928,7 @@ fn transition_empty_blocks() { let burn_parent_modulus = ((tip_info.burn_block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8; let op = BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + punished: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0xff; 32]), burn_fee: burn_fee_cap, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ac14a55f55..58f7b6a05a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4717,7 +4717,7 @@ fn signer_chainstate() { let timer = Instant::now(); while proposals_submitted.load(Ordering::SeqCst) <= before { thread::sleep(Duration::from_millis(5)); - if timer.elapsed() > Duration::from_secs(20) { + if timer.elapsed() > Duration::from_secs(30) { panic!("Timed out waiting for nakamoto miner to produce intermediate block"); } } From f91c26ab67ffd2e25bf34d156ff1f02778ec4863 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 7 Jun 2024 11:45:11 -0400 Subject: [PATCH 0274/1400] test: fix block proposal tests --- stackslib/src/chainstate/stacks/mod.rs | 111 ++---- stackslib/src/net/api/postblock_proposal.rs | 24 +- .../src/net/api/tests/postblock_proposal.rs | 318 ++++++++++++++---- 3 files changed, 312 insertions(+), 141 deletions(-) diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 6a10c15463..db43a63214 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1126,14 +1126,18 @@ pub const MAX_MICROBLOCK_SIZE: u32 = 65536; #[cfg(test)] pub mod test { + use boot::signers_voting_tests::make_dummy_tx; use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; use stacks_common::bitvec::BitVec; use stacks_common::util::hash::*; use stacks_common::util::log; + use stacks_common::util::secp256k1::Secp256k1PrivateKey; use super::*; + use crate::chainstate::burn::BlockSnapshot; + use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use crate::chainstate::stacks::{StacksPublicKey as PubKey, *}; use crate::core::*; @@ -1661,8 +1665,8 @@ pub mod test { } pub fn make_codec_test_nakamoto_block( - num_txs: usize, epoch_id: StacksEpochId, + miner_privk: &StacksPrivateKey, ) -> NakamotoBlock { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); @@ -1671,91 +1675,48 @@ pub mod test { "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", ) .unwrap(); - let origin_auth = TransactionAuth::Standard( - TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private( - &privk, - )) - .unwrap(), - ); - let mut tx_coinbase = StacksTransaction::new( - TransactionVersion::Mainnet, - origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), - ); - let tx_coinbase_proof = StacksTransaction::new( - TransactionVersion::Mainnet, - origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, Some(proof.clone())), - ); - tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; - - let tx_tenure_change = StacksTransaction::new( - TransactionVersion::Mainnet, - origin_auth.clone(), - TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: ConsensusHash([0x01; 20]), - prev_tenure_consensus_hash: ConsensusHash([0x02; 20]), - burn_view_consensus_hash: ConsensusHash([0x03; 20]), - previous_tenure_end: StacksBlockId([0x05; 32]), - previous_tenure_blocks: 0, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0x00; 20]), - }), - ); - - let mut all_txs = codec_all_transactions( - &TransactionVersion::Testnet, - 0x80000000, - &TransactionAnchorMode::OnChainOnly, - &TransactionPostConditionMode::Allow, - epoch_id, + let stx_address = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let payload = TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([0u8; 34]), ); - // remove all coinbases, except for an initial coinbase - let mut txs_anchored = vec![]; - - if epoch_id >= StacksEpochId::Epoch30 { - txs_anchored.push(tx_tenure_change); - txs_anchored.push(tx_coinbase_proof); - } else { - txs_anchored.push(tx_coinbase); - } - - for tx in all_txs.drain(..) { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} - } - txs_anchored.push(tx); - if txs_anchored.len() >= num_txs { - break; - } - } - - let txid_vecs = txs_anchored - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - + let auth = TransactionAuth::from_p2pkh(miner_privk).unwrap(); + let addr = auth.origin().address_testnet(); + let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + tx.chain_id = 0x80000000; + tx.auth.set_origin_nonce(34); + tx.set_post_condition_mode(TransactionPostConditionMode::Allow); + tx.set_tx_fee(300); + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(miner_privk).unwrap(); + let tx = tx_signer.get_tx().unwrap(); + + let txid_vecs = vec![tx.txid().as_bytes().to_vec()]; + let txs_anchored = vec![tx]; let merkle_tree = MerkleTree::::new(&txid_vecs); let tx_merkle_root = merkle_tree.root(); - let tr = tx_merkle_root.as_bytes().to_vec(); let header = NakamotoBlockHeader { - version: 0x01, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([4u8; 20]), - parent_block_id: StacksBlockId([5u8; 32]), + version: 0x00, + chain_length: 107, + burn_spent: 25000, + consensus_hash: MINER_BLOCK_CONSENSUS_HASH.clone(), + parent_block_id: StacksBlockId::from_bytes(&[0x11; 32]).unwrap(), tx_merkle_root, - state_index_root: TrieHash([8u8; 32]), + state_index_root: TrieHash::from_hex( + "fb419c3d8f40ae154018f2abf3935e2275a14c091e071bacaf6cbf5579743a0f", + ) + .unwrap(), timestamp: get_epoch_time_secs(), miner_signature: MessageSignature::empty(), signer_signature: Vec::new(), - signer_bitvec: BitVec::zeros(8).unwrap(), + signer_bitvec: BitVec::ones(8).unwrap(), }; NakamotoBlock { diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 3965e6dfde..32b9fcc07c 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -108,6 +108,18 @@ pub struct BlockValidateRejectReason { pub reason_code: ValidateRejectCode, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum BlockProposalResult { + Accepted, + Error, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockProposalResponse { + pub result: BlockProposalResult, + pub message: String, +} + impl From for BlockValidateRejectReason where T: Into, @@ -321,12 +333,18 @@ impl NakamotoBlockProposal { let size = builder.get_bytes_so_far(); let cost = builder.tenure_finish(tenure_tx)?; + println!("block header: {:?}", block.header); + println!("expected: {:?}", self.block.header); + // Clone signatures from block proposal // These have already been validated by `validate_nakamoto_block_burnchain()`` block.header.miner_signature = self.block.header.miner_signature.clone(); block.header.signer_signature = self.block.header.signer_signature.clone(); - // Assuming `tx_nerkle_root` has been checked we don't need to hash the whole block + // Clone the timestamp from the block proposal, which has already been validated + block.header.timestamp = self.block.header.timestamp; + + // Assuming `tx_merkle_root` has been checked we don't need to hash the whole block let expected_block_header_hash = self.block.header.block_hash(); let computed_block_header_hash = block.header.block_hash(); @@ -540,7 +558,9 @@ impl HttpResponse for RPCBlockProposalRequestHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let response: BlockValidateResponse = parse_json(preamble, body)?; + let body_str = std::str::from_utf8(body) + .map_err(|e| Error::DecodeError(format!("Failed to parse body: {e}")))?; + let response: BlockProposalResponse = parse_json(preamble, body)?; HttpResponsePayload::try_from_json(response) } } diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index fedecfc689..30ccac4be0 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -14,25 +14,45 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::cell::RefCell; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::rc::Rc; +use std::sync::{Arc, Condvar, Mutex}; +use clarity::types::chainstate::{StacksPrivateKey, TrieHash}; +use clarity::util::secp256k1::MessageSignature; +use clarity::util::vrf::VRFProof; +use clarity::vm::ast::ASTRules; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; use clarity::vm::{ClarityName, ContractName, Value}; -use mempool::{MemPoolEventDispatcher, ProposalCallbackReceiver}; -use postblock_proposal::NakamotoBlockProposal; +use mempool::{MemPoolDB, MemPoolEventDispatcher, ProposalCallbackReceiver}; +use postblock_proposal::{NakamotoBlockProposal, ValidateRejectCode}; +use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; use stacks_common::types::net::PeerHost; use stacks_common::types::{Address, StacksEpochId}; +use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; use super::TestRPC; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::{BlockBuilder, BlockLimitFunction}; use crate::chainstate::stacks::test::{make_codec_test_block, make_codec_test_nakamoto_block}; -use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::{ + CoinbasePayload, StacksBlockHeader, StacksTransactionSigner, TenureChangeCause, + TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionPostConditionMode, TransactionVersion, +}; use crate::core::BLOCK_LIMIT_MAINNET_21; use crate::net::api::*; use crate::net::connection::ConnectionOptions; use crate::net::httpcore::{ HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, }; +use crate::net::relay::Relayer; use crate::net::test::TestEventObserver; use crate::net::{ProtocolFamily, TipRequest}; @@ -41,7 +61,7 @@ fn test_try_parse_request() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - let block = make_codec_test_nakamoto_block(3, StacksEpochId::Epoch30); + let block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &StacksPrivateKey::new()); let proposal = NakamotoBlockProposal { block: block.clone(), chain_id: 0x80000000, @@ -106,10 +126,64 @@ fn test_try_parse_request() { assert!(handler.block_proposal.is_none()); } -struct NullObserver; -impl MemPoolEventDispatcher for NullObserver { +struct ProposalObserver { + results: Mutex< + Vec>, + >, + condvar: Condvar, +} + +impl ProposalObserver { + fn new() -> Self { + Self { + results: Mutex::new(vec![]), + condvar: Condvar::new(), + } + } +} + +impl ProposalCallbackReceiver for ProposalObserver { + fn notify_proposal_result( + &self, + result: Result< + postblock_proposal::BlockValidateOk, + postblock_proposal::BlockValidateReject, + >, + ) { + let mut results = self.results.lock().unwrap(); + results.push(result); + self.condvar.notify_one(); + } +} + +struct ProposalTestObserver { + pub proposal_observer: Arc>, +} + +impl ProposalTestObserver { + fn new() -> Self { + Self { + proposal_observer: Arc::new(Mutex::new(ProposalObserver::new())), + } + } +} + +impl ProposalCallbackReceiver for Arc> { + fn notify_proposal_result( + &self, + result: Result< + postblock_proposal::BlockValidateOk, + postblock_proposal::BlockValidateReject, + >, + ) { + let observer = self.lock().unwrap(); + observer.notify_proposal_result(result); + } +} + +impl MemPoolEventDispatcher for ProposalTestObserver { fn get_proposal_callback_receiver(&self) -> Option> { - Some(Box::new(NullObserver {})) + Some(Box::new(Arc::clone(&self.proposal_observer))) } fn mempool_txs_dropped(&self, txids: Vec, reason: mempool::MemPoolDropReason) {} @@ -145,41 +219,125 @@ impl MemPoolEventDispatcher for NullObserver { } } -impl ProposalCallbackReceiver for NullObserver { - fn notify_proposal_result( - &self, - result: Result< - postblock_proposal::BlockValidateOk, - postblock_proposal::BlockValidateReject, - >, - ) { - } -} - #[test] fn test_try_make_response() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let test_observer = TestEventObserver::new(); - let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); let mut requests = vec![]; - let block = make_codec_test_nakamoto_block(3, StacksEpochId::Epoch30); + let tip = + SortitionDB::get_canonical_burn_chain_tip(&rpc_test.peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); + + let mut block = { + let chainstate = rpc_test.peer_1.chainstate(); + let parent_stacks_header = NakamotoChainState::get_block_header( + chainstate.db(), + &tip.get_canonical_stacks_block_id(), + ) + .unwrap() + .unwrap(); + + // let mut block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &tip, miner_privk, parent_stacks_header); + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + + let stx_address = StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + }; + let payload = TransactionPayload::TokenTransfer( + stx_address.into(), + 123, + TokenTransferMemo([0u8; 34]), + ); - // post the block proposal + let auth = TransactionAuth::from_p2pkh(miner_privk).unwrap(); + let addr = auth.origin().address_testnet(); + let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + tx.chain_id = 0x80000000; + tx.auth.set_origin_nonce(34); + tx.set_post_condition_mode(TransactionPostConditionMode::Allow); + tx.set_tx_fee(300); + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(miner_privk).unwrap(); + let tx = tx_signer.get_tx().unwrap(); + + let mut builder = NakamotoBlockBuilder::new( + &parent_stacks_header, + &tip.consensus_hash, + 25000, + None, + None, + 8, + ) + .unwrap(); + + rpc_test + .peer_1 + .with_db_state( + |sort_db: &mut SortitionDB, + chainstate: &mut StacksChainState, + _: &mut Relayer, + _: &mut MemPoolDB| { + let burn_dbconn = sort_db.index_conn(); + let mut miner_tenure_info = builder + .load_tenure_info(chainstate, &burn_dbconn, None) + .unwrap(); + let mut tenure_tx = builder + .tenure_begin(&burn_dbconn, &mut miner_tenure_info) + .unwrap(); + builder.try_mine_tx_with_len( + &mut tenure_tx, + &tx, + tx.tx_len(), + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + ); + let block = builder.mine_nakamoto_block(&mut tenure_tx); + Ok(block) + }, + ) + .unwrap() + }; + + // Increment the timestamp by 1 to ensure it is different from the previous block + block.header.timestamp += 1; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + + // post the valid block proposal let proposal = NakamotoBlockProposal { block: block.clone(), chain_id: 0x80000000, }; - println!( - "Peer1 host: {:?} {}", + + let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), - rpc_test.peer_1.config.http_port - ); - println!( - "Peer2 host: {:?} {}", - rpc_test.peer_2.to_peer_host(), - rpc_test.peer_2.config.http_port - ); + "POST".into(), + "/v2/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + request.add_header("authorization".into(), "password".into()); + requests.push(request); + + // Set the timestamp to a value in the past + block.header.timestamp -= 10000; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + + // post the invalid block proposal + let proposal = NakamotoBlockProposal { + block: block.clone(), + chain_id: 0x80000000, + }; + let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), @@ -190,48 +348,80 @@ fn test_try_make_response() { request.add_header("authorization".into(), "password".into()); requests.push(request); - // // idempotent - // let request = - // StacksHttpRequest::new_post_block(addr.into(), next_block.0.clone(), next_block.1.clone()); - // requests.push(request); + // Set the timestamp to a value in the future + block.header.timestamp += 20000; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + + // post the invalid block proposal + let proposal = NakamotoBlockProposal { + block: block.clone(), + chain_id: 0x80000000, + }; + + let mut request = StacksHttpRequest::new_for_peer( + rpc_test.peer_1.to_peer_host(), + "POST".into(), + "/v2/block_proposal".into(), + HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), + ) + .expect("failed to construct request"); + request.add_header("authorization".into(), "password".into()); + requests.push(request); - // // fails if the consensus hash is not recognized - // let request = StacksHttpRequest::new_post_block( - // addr.into(), - // ConsensusHash([0x11; 20]), - // next_block.1.clone(), - // ); - // requests.push(request); + // execute the requests + let observer = ProposalTestObserver::new(); + let proposal_observer = Arc::clone(&observer.proposal_observer); - let observer = NullObserver {}; let mut responses = rpc_test.run_with_observer(requests, Some(&observer)); let response = responses.remove(0); - println!( - "Response:\n{}\n", - std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() - ); - // let resp = response.decode_stacks_block_accepted().unwrap(); - // assert_eq!(resp.accepted, true); - // assert_eq!(resp.stacks_block_id, stacks_block_id); + // Wait for the results to be non-empty + loop { + if proposal_observer + .lock() + .unwrap() + .results + .lock() + .unwrap() + .len() + < 3 + { + std::thread::sleep(std::time::Duration::from_secs(1)); + } else { + break; + } + } - // let response = responses.remove(0); - // debug!( - // "Response:\n{}\n", - // std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() - // ); + let observer = proposal_observer.lock().unwrap(); + let mut results = observer.results.lock().unwrap(); - // let resp = response.decode_stacks_block_accepted().unwrap(); - // assert_eq!(resp.accepted, false); - // assert_eq!(resp.stacks_block_id, stacks_block_id); + let result = results.remove(0); + assert!(result.is_ok()); - // let response = responses.remove(0); - // debug!( - // "Response:\n{}\n", - // std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() - // ); + let result = results.remove(0); + match result { + Ok(_) => panic!("expected error"), + Err(postblock_proposal::BlockValidateReject { + reason_code, + reason, + .. + }) => { + assert_eq!(reason_code, ValidateRejectCode::InvalidBlock); + assert_eq!(reason, "Block timestamp is not greater than parent block"); + } + } - // let (preamble, body) = response.destruct(); - // assert_eq!(preamble.status_code, 404); + let result = results.remove(0); + match result { + Ok(_) => panic!("expected error"), + Err(postblock_proposal::BlockValidateReject { + reason_code, + reason, + .. + }) => { + assert_eq!(reason_code, ValidateRejectCode::InvalidBlock); + assert_eq!(reason, "Block timestamp is too far into the future"); + } + } } From ce61eb1a884504b59d17c5ca4b177fa2a2eb4c0a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 7 Jun 2024 11:45:38 -0400 Subject: [PATCH 0275/1400] chore: change default testnet num_neighbors config to 32 Since the boot contracts use 32, when the config is set to 16, this causes many warnings. --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index c101da090d..cb291a0f91 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -513,7 +513,7 @@ lazy_static! { heartbeat: 3600, // can't use u64::max, because sqlite stores as i64. private_key_lifetime: 9223372036854775807, - num_neighbors: 16, // number of neighbors whose inventories we track + num_neighbors: 32, // number of neighbors whose inventories we track num_clients: 750, // number of inbound p2p connections soft_num_neighbors: 16, // soft-limit on the number of neighbors whose inventories we track soft_num_clients: 750, // soft limit on the number of inbound p2p connections From 1f7c19f9beaeebc900db6ab7f89b241e20a1226a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 7 Jun 2024 13:46:30 -0500 Subject: [PATCH 0276/1400] unit test for punish/reward treatment in block commit --- .../burn/operations/leader_block_commit.rs | 297 +++++++++++++++++- 1 file changed, 293 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 30e6e8ff97..c66d00b755 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -514,7 +514,7 @@ impl StacksMessageCodec for LeaderBlockCommitOp { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct RewardSetInfo { pub anchor_block: BlockHeaderHash, pub recipients: Vec<(PoxAddress, u16)>, @@ -598,11 +598,11 @@ impl LeaderBlockCommitOp { /// If `reward_set_info` is not None, then *only* the addresses in .recipients are used. The u16 /// indexes are *ignored* (and *must be* ignored, since this method gets called by /// `check_intneded_sortition()`, which does not have this information). - fn check_pox( + fn check_pox( &self, epoch_id: StacksEpochId, burnchain: &Burnchain, - tx: &mut SortitionHandleTx, + tx: &mut SH, reward_set_info: Option<&RewardSetInfo>, ) -> Result, op_error> { let parent_block_height = u64::from(self.parent_block_ptr); @@ -1154,7 +1154,9 @@ mod tests { use crate::burnchains::bitcoin::keys::BitcoinPublicKey; use crate::burnchains::bitcoin::*; use crate::burnchains::*; - use crate::chainstate::burn::db::sortdb::tests::test_append_snapshot; + use crate::chainstate::burn::db::sortdb::tests::{ + test_append_snapshot, test_append_snapshot_with_winner, + }; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::db::*; use crate::chainstate::burn::operations::*; @@ -3140,6 +3142,293 @@ mod tests { } } + pub enum DescendencyStubbedSortitionHandle { + Descended, + NotDescended, + } + + impl SortitionHandle for DescendencyStubbedSortitionHandle { + fn sqlite(&self) -> &Connection { + panic!("Cannot evaluate"); + } + + fn get_block_snapshot_by_height( + &mut self, + _block_height: u64, + ) -> Result, db_error> { + panic!("Cannot evaluate"); + } + + fn first_burn_block_height(&self) -> u64 { + panic!("Cannot evaluate"); + } + + fn pox_constants(&self) -> &PoxConstants { + panic!("Cannot evaluate"); + } + + fn tip(&self) -> SortitionId { + panic!("Cannot evaluate"); + } + + fn get_nakamoto_tip( + &self, + ) -> Result, db_error> { + panic!("Cannot evaluate"); + } + + fn descended_from( + &mut self, + _block_at_burn_height: u64, + _potential_ancestor: &BlockHeaderHash, + ) -> Result { + match self { + DescendencyStubbedSortitionHandle::Descended => Ok(true), + DescendencyStubbedSortitionHandle::NotDescended => Ok(false), + } + } + } + + #[test] + fn pox_reward_punish() { + let burnchain = Burnchain { + pox_constants: pox_constants(), + peer_version: 0x012345678, + network_id: 0x9abcdef0, + chain_name: "bitcoin".to_string(), + network_name: "testnet".to_string(), + working_dir: "/nope".to_string(), + consensus_hash_lifetime: 24, + stable_confirmations: 7, + initial_reward_start_block: 0, + first_block_height: 0, + first_block_timestamp: 0, + first_block_hash: BurnchainHeaderHash([0x05; 32]), + }; + + let default_block_commit = LeaderBlockCommitOp { + punished: vec![], + sunset_burn: 0, + block_header_hash: BlockHeaderHash([0x22; 32]), + new_seed: VRFSeed([0x33; 32]), + parent_block_ptr: 125, + parent_vtxindex: 0, + key_block_ptr: 124, + key_vtxindex: 456, + memo: vec![0x80], + commit_outs: vec![], + + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner::mock_parts( + AddressHashMode::SerializeP2PKH, + 1, + vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + ), + + txid: Txid([0xab; 32]), + vtxindex: 444, + block_height: 128, + burn_parent_modulus: (128 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash([0x11; 32]), + }; + + let anchor_block_hash = BlockHeaderHash([0xaa; 32]); + + fn reward_addrs(i: usize) -> PoxAddress { + let addr = StacksAddress::new(1, Hash160::from_data(&i.to_be_bytes())); + PoxAddress::Standard(addr, None) + } + let burn_addr_0 = PoxAddress::Standard(StacksAddress::burn_address(false), None); + let burn_addr_1 = PoxAddress::Standard(StacksAddress::burn_address(true), None); + let rs_pox_addrs = RewardSetInfo { + anchor_block: anchor_block_hash.clone(), + recipients: vec![(reward_addrs(0), 0), (reward_addrs(1), 1)], + allow_nakamoto_punishment: true, + }; + let rs_pox_addrs_0b = RewardSetInfo { + anchor_block: anchor_block_hash.clone(), + recipients: vec![(reward_addrs(0), 0), (burn_addr_0.clone(), 5)], + allow_nakamoto_punishment: true, + }; + let rs_pox_addrs_1b = RewardSetInfo { + anchor_block: anchor_block_hash.clone(), + recipients: vec![(reward_addrs(1), 1), (burn_addr_1.clone(), 5)], + allow_nakamoto_punishment: true, + }; + + fn rev(rs: &RewardSetInfo) -> RewardSetInfo { + let mut out = rs.clone(); + out.recipients.reverse(); + out + } + + fn no_punish(rs: &RewardSetInfo) -> RewardSetInfo { + let mut out = rs.clone(); + out.allow_nakamoto_punishment = false; + out + } + + let mut test_vectors = vec![ + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + None, + Ok(vec![]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Punish(reward_addrs(1)), + Treatment::Punish(reward_addrs(0)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(0), reward_addrs(1)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Reward(reward_addrs(1)), + Treatment::Reward(reward_addrs(0)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Punish(reward_addrs(1)), + Treatment::Punish(reward_addrs(0)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs_1b.clone()), + // it doesn't matter if we call burn_addr_1 punished or rewarded! + Ok(vec![ + Treatment::Punish(reward_addrs(1)), + Treatment::Punish(burn_addr_1.clone()), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs_0b.clone()), + // it doesn't matter if we call burn_addr_1 punished or rewarded! + Ok(vec![ + Treatment::Punish(reward_addrs(0)), + Treatment::Punish(burn_addr_0.clone()), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(no_punish(&rs_pox_addrs)), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(no_punish(&rs_pox_addrs_1b)), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(no_punish(&rs_pox_addrs_0b)), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(0)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(0), reward_addrs(3)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(1), reward_addrs(3)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Err(op_error::BlockCommitBadOutputs), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![burn_addr_0.clone(), reward_addrs(3)], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Err(op_error::BlockCommitBadOutputs), + ), + ]; + + for (ix, (op, reward_set_info, expected)) in test_vectors.iter_mut().enumerate() { + for should_reverse in [false, true] { + let reward_set_info = if should_reverse { + reward_set_info.as_ref().map(rev) + } else { + reward_set_info.clone() + }; + eprintln!("Processing {}", ix); + let mut ic = DescendencyStubbedSortitionHandle::Descended; + let output = op.check_pox( + StacksEpochId::Epoch30, + &burnchain, + &mut ic, + reward_set_info.as_ref(), + ); + eprintln!("{:?} <=?=> {:?}", expected, output); + match expected { + Err(e) => { + assert_eq!(format!("{e:?}"), format!("{:?}", &output.unwrap_err())); + } + Ok(expected_treatment) => { + assert!(output.is_ok()); + let actual_treatment = output.unwrap(); + assert_eq!(actual_treatment.len(), expected_treatment.len()); + for i in actual_treatment.iter() { + assert!(expected_treatment.contains(i)); + } + } + } + } + } + } + #[test] fn test_epoch_marker() { let first_block_height = 121; From d3ca182bd5ce456cb3414ed852f2e3fabd51ad91 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 7 Jun 2024 17:26:13 -0500 Subject: [PATCH 0277/1400] stashing the beginnings of integration tests --- .../chainstate/nakamoto/coordinator/tests.rs | 285 +++++++++++++++++- stackslib/src/chainstate/nakamoto/mod.rs | 152 +++++----- .../src/chainstate/nakamoto/tests/node.rs | 31 +- 3 files changed, 392 insertions(+), 76 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 015409a74c..cba7ab8624 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -28,7 +28,7 @@ use stacks_common::consts::{ use stacks_common::types::chainstate::{ StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; -use stacks_common::types::{Address, StacksEpoch}; +use stacks_common::types::{Address, StacksEpoch, StacksEpochId}; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; @@ -88,10 +88,9 @@ fn advance_to_nakamoto( test_stackers .iter() .map(|test_stacker| { - let pox_addr = PoxAddress::from_legacy( - AddressHashMode::SerializeP2PKH, - addr.bytes.clone(), - ); + let pox_addr = test_stacker.pox_address.clone().unwrap_or_else(|| { + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()) + }); let signature = make_signer_key_signature( &pox_addr, &test_stacker.signer_private_key, @@ -137,6 +136,103 @@ fn advance_to_nakamoto( // peer is at the start of cycle 8 } +/// Bring a TestPeer into the Nakamoto Epoch +fn advance_to_nakamoto_long( + peer: &mut TestPeer, + test_signers: &mut TestSigners, + test_stackers: &[TestStacker], +) { + let mut peer_nonce = 0; + let private_key = peer.config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + //let pox_4_stacking_height = peer.config.epochs.as_ref().unwrap().iter().find(|e| e.epoch_id == StacksEpochId::Epoch25).unwrap().start_height; + let mut stacked_pox_4 = false; + let mut signer_voted = false; + let nakamoto_height = peer + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch30) + .unwrap() + .start_height; + let mut tip = None; + loop { + let current_burn_height = peer.get_burn_block_height(); + if current_burn_height >= nakamoto_height - 1 { + info!("Booted to nakamoto"); + break; + } + let txs = if tip.is_none() { + // don't mine stack-stx txs in first block, because they cannot pass the burn block height + // validation + vec![] + } else if !stacked_pox_4 { + // Make all the test Stackers stack + stacked_pox_4 = true; + test_stackers + .iter() + .map(|test_stacker| { + let pox_addr = test_stacker.pox_address.clone().unwrap_or_else(|| { + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()) + }); + let reward_cycle = peer + .config + .burnchain + .block_height_to_reward_cycle(current_burn_height) + .unwrap(); + let signature = make_signer_key_signature( + &pox_addr, + &test_stacker.signer_private_key, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + 12_u128, + u128::MAX, + 1, + ); + let signing_key = + StacksPublicKey::from_private(&test_stacker.signer_private_key); + make_pox_4_lockup( + &test_stacker.stacker_private_key, + 0, + test_stacker.amount, + &pox_addr, + 12, + &signing_key, + current_burn_height + 2, + Some(signature), + u128::MAX, + 1, + ) + }) + .collect() + } else if !signer_voted { + signer_voted = true; + with_sortdb(peer, |chainstate, sortdb| { + make_all_signers_vote_for_aggregate_key( + chainstate, + sortdb, + &tip.unwrap(), + test_signers, + test_stackers, + 7, + ) + }) + } else { + vec![] + }; + tip = Some(peer.tenure_with_txs(&txs, &mut peer_nonce)); + } +} + pub fn make_all_signers_vote_for_aggregate_key( chainstate: &mut StacksChainState, sortdb: &SortitionDB, @@ -293,6 +389,77 @@ pub fn boot_nakamoto<'a>( peer } +/// Make a peer and transition it into the Nakamoto epoch. +/// The node needs to be stacking and it needs to vote for an aggregate key; +/// otherwise, Nakamoto can't activate. +pub fn boot_nakamoto_long_reward_sets<'a>( + test_name: &str, + mut initial_balances: Vec<(PrincipalData, u64)>, + test_signers: &mut TestSigners, + test_stackers: &[TestStacker], + observer: Option<&'a TestEventObserver>, +) -> TestPeer<'a> { + let aggregate_public_key = test_signers.aggregate_public_key.clone(); + let mut peer_config = TestPeerConfig::new(test_name, 0, 0); + let private_key = peer_config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + // reward cycles are 5 blocks long + // first 25 blocks are boot-up + // reward cycle 6 instantiates pox-3 + // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation + peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); + peer_config + .stacker_dbs + .push(boot_code_id(MINERS_NAME, false)); + peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + + // Create some balances for test Stackers + let mut stacker_balances = test_stackers + .iter() + .map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), + u64::try_from(test_stacker.amount + 10000).expect("Stacking amount too large"), + ) + }) + .collect(); + + // Create some balances for test Signers + let mut signer_balances = test_stackers + .iter() + .map(|stacker| { + ( + PrincipalData::from(p2pkh_from(&stacker.signer_private_key)), + 1000, + ) + }) + .collect(); + + peer_config.initial_balances.append(&mut stacker_balances); + peer_config.initial_balances.append(&mut signer_balances); + peer_config.initial_balances.append(&mut initial_balances); + peer_config.burnchain.pox_constants.reward_cycle_length = 10; + peer_config.burnchain.pox_constants.v2_unlock_height = 21; + peer_config.burnchain.pox_constants.pox_3_activation_height = 26; + peer_config.burnchain.pox_constants.v3_unlock_height = 27; + peer_config.burnchain.pox_constants.pox_4_activation_height = 28; + peer_config.test_stackers = Some(test_stackers.to_vec()); + peer_config.test_signers = Some(test_signers.clone()); + let mut peer = TestPeer::new_with_observer(peer_config, observer); + + advance_to_nakamoto_long(&mut peer, test_signers, test_stackers); + + peer +} + /// Make a replay peer, used for replaying the blockchain pub fn make_replay_peer<'a>(peer: &mut TestPeer<'a>) -> TestPeer<'a> { let mut replay_config = peer.config.clone(); @@ -596,6 +763,114 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { peer.check_nakamoto_migration(); } +#[test] +fn pox_treatment_1_tenure_10_blocks() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let (mut test_signers, test_stackers) = TestStacker::big_signing_set(); + let mut peer = boot_nakamoto_long_reward_sets( + function_name!(), + vec![(addr.into(), 100_000_000)], + &mut test_signers, + &test_stackers, + None, + ); + let mut blocks = vec![]; + let pox_constants = peer.sortdb().pox_constants.clone(); + let first_burn_height = peer.sortdb().first_block_height; + + // mine until we're at the start of the next reward phase (so we *know* + // that the reward set contains entries) + loop { + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (burn_height, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + info!("Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", + pox_constants.is_in_prepare_phase(first_burn_height, burn_height), + pox_constants.is_reward_cycle_start(first_burn_height, burn_height) + ); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 1 { + info!("Produce nakamoto block {}", blocks_so_far.len()); + + let account = get_account(chainstate, sortdb, &addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + ); + blocks.extend(blocks_and_sizes.into_iter().map(|(block, _, _)| block)); + + if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 1) { + break; + } + } + + // The next block should be the start of a reward phase, so the PoX recipient should + // be chosen. + // + // First: perform a normal block commit, and then try to mine a block with a zero in the + // bitvector. + // + + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 16 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &blocks.last().unwrap().header + ); +} + /// Test chainstate getters against an instantiated epoch2/Nakamoto chain. /// There are 11 epoch2 blocks and 2 nakamto tenure with 10 nakamoto blocks each /// Tests: diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 74b571d0ba..981b75543e 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1435,7 +1435,7 @@ impl NakamotoChainState { stacks_chain_state, sort_db, &next_ready_block.header.parent_block_id, - false, + true, ).map_err(|e| { warn!( "Cannot process Nakamoto block: could not load reward set that elected the block"; @@ -2913,6 +2913,84 @@ impl NakamotoChainState { Ok(lockup_events) } + fn check_pox_bitvector( + block: &NakamotoBlock, + tenure_block_commit: &LeaderBlockCommitOp, + active_reward_set: &RewardSet, + ) -> Result<(), ChainstateError> { + if !tenure_block_commit.punished.is_empty() { + // our block commit issued a punishment, check the reward set and bitvector + // to ensure that this was valid. + for treated_addr in tenure_block_commit.punished.iter() { + if treated_addr.is_burn() { + // Don't need to assert anything about burn addresses. + // If they were in the reward set, "punishing" them is meaningless. + continue; + } + // otherwise, we need to find the indices in the rewarded_addresses + // corresponding to this address. + let address_indices = active_reward_set + .rewarded_addresses + .iter() + .enumerate() + .filter_map(|(ix, addr)| { + if addr == treated_addr.deref() { + Some(ix) + } else { + None + } + }); + // if any of them are 0, punishment is okay. + // if all of them are 1, punishment is not okay. + // if all of them are 0, *must* have punished + let bitvec_values: Result, ChainstateError> = address_indices + .map( + |ix| { + let ix = u16::try_from(ix) + .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; + let bitvec_value = block.header.signer_bitvec.get(ix) + .unwrap_or_else(|| { + info!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); + true + }); + Ok(bitvec_value) + } + ) + .collect(); + let bitvec_values = bitvec_values?; + let all_1 = bitvec_values.iter().all(|x| *x); + let all_0 = bitvec_values.iter().all(|x| !x); + if all_1 { + if treated_addr.is_punish() { + warn!( + "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + "block_id" => %block.header.block_id(), + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); + } + } else if all_0 { + if treated_addr.is_reward() { + warn!( + "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + "block_id" => %block.header.block_id(), + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); + } + } + } + } + + Ok(()) + } + /// Append a Nakamoto Stacks block to the Stacks chain state. /// NOTE: This does _not_ set the block as processed! The caller must do this. fn append_block<'a>( @@ -3079,75 +3157,9 @@ impl NakamotoChainState { ChainstateError::NoSuchBlockError })?; - if !tenure_block_commit.punished.is_empty() { - // our block commit issued a punishment, check the reward set and bitvector - // to ensure that this was valid. - for treated_addr in tenure_block_commit.punished.iter() { - if treated_addr.is_burn() { - // Don't need to assert anything about burn addresses. - // If they were in the reward set, "punishing" them is meaningless. - continue; - } - // otherwise, we need to find the indices in the rewarded_addresses - // corresponding to this address. - let address_indices = active_reward_set - .rewarded_addresses - .iter() - .enumerate() - .filter_map(|(ix, addr)| { - if addr == treated_addr.deref() { - Some(ix) - } else { - None - } - }); - // if any of them are 0, punishment is okay. - // if all of them are 1, punishment is not okay. - // if all of them are 0, *must* have punished - let bitvec_values: Result, ChainstateError> = address_indices - .map( - |ix| { - let ix = u16::try_from(ix) - .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; - let bitvec_value = block.header.signer_bitvec.get(ix) - .unwrap_or_else(|| { - info!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); - true - }); - Ok(bitvec_value) - } - ) - .collect(); - let bitvec_values = bitvec_values?; - let all_1 = bitvec_values.iter().all(|x| *x); - let all_0 = bitvec_values.iter().all(|x| !x); - if all_1 { - if treated_addr.is_punish() { - warn!( - "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; - "reward_address" => %treated_addr.deref(), - "bitvec_values" => ?bitvec_values, - "block_id" => %block.header.block_id(), - ); - return Err(ChainstateError::InvalidStacksBlock( - "Bitvec does not match the block commit's PoX handling".into(), - )); - } - } else if all_0 { - if treated_addr.is_reward() { - warn!( - "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; - "reward_address" => %treated_addr.deref(), - "bitvec_values" => ?bitvec_values, - "block_id" => %block.header.block_id(), - ); - return Err(ChainstateError::InvalidStacksBlock( - "Bitvec does not match the block commit's PoX handling".into(), - )); - } - } - } - } + // TODO: this should be checked in the miner path as well... + // the easiest way to ensure this is via the setup_block function. + Self::check_pox_bitvector(&block, &tenure_block_commit, active_reward_set)?; // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start // block. diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 5736258b11..8ab8d7995e 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -28,7 +28,9 @@ use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; -use stacks_common::types::chainstate::{BlockHeaderHash, SortitionId, StacksBlockId, VRFSeed}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, +}; use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; @@ -75,6 +77,7 @@ pub struct TestStacker { pub stacker_private_key: StacksPrivateKey, pub signer_private_key: StacksPrivateKey, pub amount: u128, + pub pox_address: Option, } impl TestStacker { @@ -88,6 +91,7 @@ impl TestStacker { stacker_private_key, signer_private_key, amount: 1_000_000_000_000_000_000, + pox_address: None, } } @@ -107,6 +111,31 @@ impl TestStacker { signer_private_key: signing_key.clone(), stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), amount: Self::DEFAULT_STACKER_AMOUNT, + pox_address: None, + }) + .collect::>(); + + let test_signers = TestSigners::new(vec![signing_key]); + (test_signers, stackers) + } + + pub fn big_signing_set() -> (TestSigners, Vec) { + let num_keys: u32 = 4; + let mut signing_key_seed = num_keys.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let stackers = (0..num_keys) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_address: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), }) .collect::>(); From d1488d9d04826b4c315cfa279d92a2491adbbe6a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 7 Jun 2024 21:19:04 -0500 Subject: [PATCH 0278/1400] tests: working integration tests for reward/punish, still need more specificity in the coverage --- .../chainstate/nakamoto/coordinator/tests.rs | 268 ++++++++++++++++-- .../src/chainstate/nakamoto/tests/node.rs | 114 ++++++-- 2 files changed, 335 insertions(+), 47 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index cba7ab8624..6a995e720b 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -22,6 +22,7 @@ use clarity::vm::Value; use rand::prelude::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::bitvec::BitVec; use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, SIGNER_SLOTS_PER_USER, }; @@ -29,12 +30,13 @@ use stacks_common::types::chainstate::{ StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; use stacks_common::types::{Address, StacksEpoch, StacksEpochId}; +use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; @@ -52,9 +54,9 @@ use crate::chainstate::stacks::boot::test::{ use crate::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use crate::chainstate::stacks::db::{MinerPaymentTxFees, StacksAccount, StacksChainState}; use crate::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, StacksTransactionSigner, TenureChangeCause, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionVersion, + CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, + TenureChangeCause, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; @@ -764,7 +766,10 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { } #[test] -fn pox_treatment_1_tenure_10_blocks() { +// TODO: DRY this up. +// Cover the "find addr" by checking the invalid block cases +// with the specific addresses set incorrectly. +fn pox_treatment() { let private_key = StacksPrivateKey::from_seed(&[2]); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -774,7 +779,25 @@ fn pox_treatment_1_tenure_10_blocks() { ) .unwrap(); - let (mut test_signers, test_stackers) = TestStacker::big_signing_set(); + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_address: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + }) + .collect::>(); + let mut test_signers = TestSigners::new(vec![signing_key]); let mut peer = boot_nakamoto_long_reward_sets( function_name!(), vec![(addr.into(), 100_000_000)], @@ -785,6 +808,8 @@ fn pox_treatment_1_tenure_10_blocks() { let mut blocks = vec![]; let pox_constants = peer.sortdb().pox_constants.clone(); let first_burn_height = peer.sortdb().first_block_height; + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); // mine until we're at the start of the next reward phase (so we *know* // that the reward set contains entries) @@ -808,14 +833,11 @@ fn pox_treatment_1_tenure_10_blocks() { let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); // do a stx transfer in each block to a given recipient - let recipient_addr = - StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); - let blocks_and_sizes = peer.make_nakamoto_tenure( tenure_change_tx, coinbase_tx, &mut test_signers, - |miner, chainstate, sortdb, blocks_so_far| { + |_miner, chainstate, sortdb, blocks_so_far| { if blocks_so_far.len() < 1 { info!("Produce nakamoto block {}", blocks_so_far.len()); @@ -846,9 +868,224 @@ fn pox_treatment_1_tenure_10_blocks() { // The next block should be the start of a reward phase, so the PoX recipient should // be chosen. // - // First: perform a normal block commit, and then try to mine a block with a zero in the + // First: perform a normal block commit, and then try to mine a block with all zeros in the // bitvector. - // + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (burn_height, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + info!( + "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", + pox_constants.is_in_prepare_phase(first_burn_height, burn_height), + pox_constants.is_reward_cycle_start(first_burn_height, burn_height) + ); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + // do a stx transfer in each block to a given recipient + let mut blocks_and_sizes = peer.make_nakamoto_tenure_and( + tenure_change_tx.clone(), + coinbase_tx.clone(), + &mut test_signers, + |_miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 1 { + info!("Produce nakamoto block {}", blocks_so_far.len()); + + let account = get_account(chainstate, sortdb, &addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + |block| { + block.header.signer_bitvec = BitVec::zeros(12).unwrap(); + // don't try to process this block yet, just return it so that + // we can assert the block error. + false + }, + ); + assert_eq!(blocks_and_sizes.len(), 1); + let invalid_block = blocks_and_sizes.pop().unwrap().0; + let processing_result = peer.try_process_block(&invalid_block).unwrap_err(); + assert_eq!( + processing_result.to_string(), + "Bitvec does not match the block commit's PoX handling".to_string(), + ); + assert!(matches!( + processing_result, + ChainstateError::InvalidStacksBlock(_), + )); + + // set the bitvec to a heterogenous one: either punish or + // reward is acceptable, so this block should just process. + let blocks_and_sizes = peer.make_nakamoto_tenure_and( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |_miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 1 { + info!("Produce nakamoto block {}", blocks_so_far.len()); + + let account = get_account(chainstate, sortdb, &addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + |block| { + // each stacker has 3 entries in the bitvec. + // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 + block.header.signer_bitvec = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, true, + ] + .as_slice(), + ) + .unwrap(); + true + }, + ); + blocks.extend(blocks_and_sizes.into_iter().map(|(block, _, _)| { + info!("Processed block: {}", block.block_id()); + block + })); + + // now we need to test punishment! + let (mut burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + burn_ops.iter_mut().for_each(|op| { + if let BlockstackOperationType::LeaderBlockCommit(ref mut commit) = op { + commit.commit_outs[0] = PoxAddress::standard_burn_address(false); + } + }); + let (burn_height, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + + info!( + "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", + pox_constants.is_in_prepare_phase(first_burn_height, burn_height), + pox_constants.is_reward_cycle_start(first_burn_height, burn_height) + ); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + // do a stx transfer in each block to a given recipient + let mut blocks_and_sizes = peer.make_nakamoto_tenure_and( + tenure_change_tx.clone(), + coinbase_tx.clone(), + &mut test_signers, + |_miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 1 { + info!("Produce nakamoto block {}", blocks_so_far.len()); + + let account = get_account(chainstate, sortdb, &addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + |block| { + block.header.signer_bitvec = BitVec::ones(12).unwrap(); + // don't try to process this block yet, just return it so that + // we can assert the block error. + false + }, + ); + assert_eq!(blocks_and_sizes.len(), 1); + let invalid_block = blocks_and_sizes.pop().unwrap().0; + let processing_result = peer.try_process_block(&invalid_block).unwrap_err(); + assert_eq!( + processing_result.to_string(), + "Bitvec does not match the block commit's PoX handling".to_string(), + ); + assert!(matches!( + processing_result, + ChainstateError::InvalidStacksBlock(_), + )); + + // set the bitvec to a heterogenous one: either punish or + // reward is acceptable, so this block should just process. + let blocks_and_sizes = peer.make_nakamoto_tenure_and( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |_miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 1 { + info!("Produce nakamoto block {}", blocks_so_far.len()); + + let account = get_account(chainstate, sortdb, &addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + |block| { + // each stacker has 3 entries in the bitvec. + // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 + block.header.signer_bitvec = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, true, + ] + .as_slice(), + ) + .unwrap(); + true + }, + ); + blocks.extend(blocks_and_sizes.into_iter().map(|(block, _, _)| block)); let tip = { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; @@ -858,13 +1095,6 @@ fn pox_treatment_1_tenure_10_blocks() { .unwrap() }; - assert_eq!( - tip.anchored_header - .as_stacks_nakamoto() - .unwrap() - .chain_length, - 16 - ); assert_eq!( tip.anchored_header.as_stacks_nakamoto().unwrap(), &blocks.last().unwrap().header diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 8ab8d7995e..f2b811affe 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -45,6 +45,7 @@ use crate::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, }; use crate::chainstate::burn::*; +use crate::chainstate::coordinator::tests::NullEventDispatcher; use crate::chainstate::coordinator::{ ChainsCoordinator, Error as CoordinatorError, OnChainRewardSetProvider, }; @@ -118,30 +119,6 @@ impl TestStacker { let test_signers = TestSigners::new(vec![signing_key]); (test_signers, stackers) } - - pub fn big_signing_set() -> (TestSigners, Vec) { - let num_keys: u32 = 4; - let mut signing_key_seed = num_keys.to_be_bytes().to_vec(); - signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); - let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); - let stackers = (0..num_keys) - .map(|index| TestStacker { - signer_private_key: signing_key.clone(), - stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), - amount: u64::MAX as u128 - 10000, - pox_address: Some(PoxAddress::Standard( - StacksAddress::new( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - Hash160::from_data(&index.to_be_bytes()), - ), - Some(AddressHashMode::SerializeP2PKH), - )), - }) - .collect::>(); - - let test_signers = TestSigners::new(vec![signing_key]); - (test_signers, stackers) - } } impl TestBurnchainBlock { @@ -307,10 +284,9 @@ impl TestStacksNode { parent_block_snapshot_opt: Option<&BlockSnapshot>, expect_success: bool, ) -> LeaderBlockCommitOp { - test_debug!( + info!( "Miner {}: Commit to Nakamoto tenure starting at {}", - miner.id, - &last_tenure_id, + miner.id, &last_tenure_id, ); let parent_block = @@ -368,6 +344,15 @@ impl TestStacksNode { /// Record the nakamoto blocks as a new tenure pub fn add_nakamoto_tenure_blocks(&mut self, tenure_blocks: Vec) { + if let Some(last_tenure) = self.nakamoto_blocks.last_mut() { + // this tenure is overwriting the last tenure + if last_tenure.first().unwrap().header.consensus_hash + == tenure_blocks.first().unwrap().header.consensus_hash + { + *last_tenure = tenure_blocks; + return; + } + } self.nakamoto_blocks.push(tenure_blocks); } @@ -513,9 +498,14 @@ impl TestStacksNode { } /// Construct or extend a full Nakamoto tenure with the given block builder. + /// After block assembly, invoke `after_block` before signing and then processing. + /// If `after_block` returns false, do not attempt to process the block, instead just + /// add it to the result Vec and exit the block building loop (the block builder cannot + /// build any subsequent blocks without processing the prior block) + /// /// The first block will contain a coinbase, if coinbase is Some(..) /// Process the blocks via the chains coordinator as we produce them. - pub fn make_nakamoto_tenure_blocks<'a, F>( + pub fn make_nakamoto_tenure_blocks<'a, F, G>( chainstate: &mut StacksChainState, sortdb: &SortitionDB, miner: &mut TestMiner, @@ -533,6 +523,7 @@ impl TestStacksNode { BitcoinIndexer, >, mut block_builder: F, + mut after_block: G, ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> where F: FnMut( @@ -541,6 +532,7 @@ impl TestStacksNode { &SortitionDB, &[(NakamotoBlock, u64, ExecutionCost)], ) -> Vec, + G: FnMut(&mut NakamotoBlock) -> bool, { let mut blocks = vec![]; let mut block_count = 0; @@ -604,6 +596,7 @@ impl TestStacksNode { txs, ) .unwrap(); + let try_to_process = after_block(&mut nakamoto_block); miner.sign_nakamoto_block(&mut nakamoto_block); let tenure_sn = @@ -644,6 +637,13 @@ impl TestStacksNode { signers.sign_block_with_reward_set(&mut nakamoto_block, &reward_set); let block_id = nakamoto_block.block_id(); + + if !try_to_process { + blocks.push((nakamoto_block, size, cost)); + block_count += 1; + break; + } + debug!( "Process Nakamoto block {} ({:?}", &block_id, &nakamoto_block.header @@ -1041,6 +1041,37 @@ impl<'a> TestPeer<'a> { proof } + pub fn try_process_block(&mut self, block: &NakamotoBlock) -> Result { + let mut sort_handle = self.sortdb.as_ref().unwrap().index_handle_at_tip(); + let accepted = Relayer::process_new_nakamoto_block( + &self.config.burnchain, + self.sortdb.as_ref().unwrap(), + &mut sort_handle, + &mut self.stacks_node.as_mut().unwrap().chainstate, + block.clone(), + None, + )?; + if !accepted { + return Ok(false); + } + let sort_tip = SortitionDB::get_canonical_sortition_tip(self.sortdb().conn()).unwrap(); + let Some(block_receipt) = + NakamotoChainState::process_next_nakamoto_block::( + &mut self.stacks_node.as_mut().unwrap().chainstate, + self.sortdb.as_mut().unwrap(), + &sort_tip, + None, + )? + else { + return Ok(false); + }; + if block_receipt.header.index_block_hash() == block.block_id() { + Ok(true) + } else { + Ok(false) + } + } + /// Produce and process a Nakamoto tenure, after processing the block-commit from /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), /// take the consensus hash, and feed it in here. @@ -1060,6 +1091,31 @@ impl<'a> TestPeer<'a> { &SortitionDB, &[(NakamotoBlock, u64, ExecutionCost)], ) -> Vec, + { + self.make_nakamoto_tenure_and(tenure_change, coinbase, signers, block_builder, |_| true) + } + + /// Produce and process a Nakamoto tenure, after processing the block-commit from + /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), + /// take the consensus hash, and feed it in here. + /// + /// Returns the blocks, their sizes, and runtime costs + pub fn make_nakamoto_tenure_and( + &mut self, + tenure_change: StacksTransaction, + coinbase: StacksTransaction, + signers: &mut TestSigners, + block_builder: F, + after_block: G, + ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + where + F: FnMut( + &mut TestMiner, + &mut StacksChainState, + &SortitionDB, + &[(NakamotoBlock, u64, ExecutionCost)], + ) -> Vec, + G: FnMut(&mut NakamotoBlock) -> bool, { let cycle = self.get_reward_cycle(); let mut stacks_node = self.stacks_node.take().unwrap(); @@ -1082,6 +1138,7 @@ impl<'a> TestPeer<'a> { Some(coinbase), &mut self.coord, block_builder, + after_block, ); let just_blocks = blocks @@ -1154,6 +1211,7 @@ impl<'a> TestPeer<'a> { None, &mut self.coord, block_builder, + |_| true, ); let just_blocks = blocks From 995f5c143c069fbe8cc5bec9b69bab5d3d418af9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 8 Jun 2024 10:42:40 -0400 Subject: [PATCH 0279/1400] feat: add `get-stacks-block-info?` and `get-tenure-info?` Fixes: #4716 --- .../src/vm/analysis/arithmetic_checker/mod.rs | 13 +- clarity/src/vm/analysis/errors.rs | 8 + .../src/vm/analysis/read_only_checker/mod.rs | 9 +- clarity/src/vm/analysis/tests/mod.rs | 80 +++++- .../type_checker/v2_05/natives/mod.rs | 4 +- .../analysis/type_checker/v2_1/natives/mod.rs | 49 +++- .../analysis/type_checker/v2_1/tests/mod.rs | 25 +- clarity/src/vm/docs/mod.rs | 107 ++++++-- clarity/src/vm/functions/database.rs | 221 ++++++++++++++++- clarity/src/vm/functions/mod.rs | 228 +++++++++--------- clarity/src/vm/types/mod.rs | 37 +++ clarity/src/vm/version.rs | 4 - stackslib/src/net/api/postblock_proposal.rs | 2 - 13 files changed, 621 insertions(+), 166 deletions(-) diff --git a/clarity/src/vm/analysis/arithmetic_checker/mod.rs b/clarity/src/vm/analysis/arithmetic_checker/mod.rs index d4c5bafe96..aa69f650f0 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/mod.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/mod.rs @@ -174,13 +174,12 @@ impl<'a> ArithmeticOnlyChecker<'a> { ) -> Result<(), Error> { use crate::vm::functions::NativeFunctions::*; match function { - FetchVar | GetBlockInfo | GetBurnBlockInfo | GetTokenBalance | GetAssetOwner - | FetchEntry | SetEntry | DeleteEntry | InsertEntry | SetVar | MintAsset - | MintToken | TransferAsset | TransferToken | ContractCall | StxTransfer - | StxTransferMemo | StxBurn | AtBlock | GetStxBalance | GetTokenSupply | BurnToken - | FromConsensusBuff | ToConsensusBuff | BurnAsset | StxGetAccount => { - Err(Error::FunctionNotPermitted(function)) - } + FetchVar | GetBlockInfo | GetBurnBlockInfo | GetStacksBlockInfo | GetTenureInfo + | GetTokenBalance | GetAssetOwner | FetchEntry | SetEntry | DeleteEntry + | InsertEntry | SetVar | MintAsset | MintToken | TransferAsset | TransferToken + | ContractCall | StxTransfer | StxTransferMemo | StxBurn | AtBlock | GetStxBalance + | GetTokenSupply | BurnToken | FromConsensusBuff | ToConsensusBuff | BurnAsset + | StxGetAccount => Err(Error::FunctionNotPermitted(function)), Append | Concat | AsMaxLen | ContractOf | PrincipalOf | ListCons | Print | AsContract | ElementAt | ElementAtAlias | IndexOf | IndexOfAlias | Map | Filter | Fold | Slice | ReplaceAt => Err(Error::FunctionNotPermitted(function)), diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 257d2e5bbe..f86308f8d9 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -132,8 +132,12 @@ pub enum CheckErrors { // get-block-info? errors NoSuchBlockInfoProperty(String), NoSuchBurnBlockInfoProperty(String), + NoSuchStacksBlockInfoProperty(String), + NoSuchTenureInfoProperty(String), GetBlockInfoExpectPropertyName, GetBurnBlockInfoExpectPropertyName, + GetStacksBlockInfoExpectPropertyName, + GetTenureInfoExpectPropertyName, NameAlreadyUsed(String), ReservedWord(String), @@ -406,8 +410,12 @@ impl DiagnosableError for CheckErrors { CheckErrors::ExpectedCallableType(found_type) => format!("expected a callable contract, found {}", found_type), CheckErrors::NoSuchBlockInfoProperty(property_name) => format!("use of block unknown property '{}'", property_name), CheckErrors::NoSuchBurnBlockInfoProperty(property_name) => format!("use of burn block unknown property '{}'", property_name), + CheckErrors::NoSuchStacksBlockInfoProperty(property_name) => format!("use of unknown stacks block property '{}'", property_name), + CheckErrors::NoSuchTenureInfoProperty(property_name) => format!("use of unknown tenure property '{}'", property_name), CheckErrors::GetBlockInfoExpectPropertyName => "missing property name for block info introspection".into(), CheckErrors::GetBurnBlockInfoExpectPropertyName => "missing property name for burn block info introspection".into(), + CheckErrors::GetStacksBlockInfoExpectPropertyName => "missing property name for stacks block info introspection".into(), + CheckErrors::GetTenureInfoExpectPropertyName => "missing property name for tenure info introspection".into(), CheckErrors::NameAlreadyUsed(name) => format!("defining '{}' conflicts with previous value", name), CheckErrors::ReservedWord(name) => format!("{name} is a reserved word"), CheckErrors::NonFunctionApplication => "expecting expression of type function".into(), diff --git a/clarity/src/vm/analysis/read_only_checker/mod.rs b/clarity/src/vm/analysis/read_only_checker/mod.rs index b02923c1a1..006b4f0cfe 100644 --- a/clarity/src/vm/analysis/read_only_checker/mod.rs +++ b/clarity/src/vm/analysis/read_only_checker/mod.rs @@ -290,10 +290,11 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { | BuffToUIntBe | IntToAscii | IntToUtf8 | StringToInt | StringToUInt | IsStandard | ToConsensusBuff | PrincipalDestruct | PrincipalConstruct | Append | Concat | AsMaxLen | ContractOf | PrincipalOf | ListCons | GetBlockInfo | GetBurnBlockInfo - | TupleGet | TupleMerge | Len | Print | AsContract | Begin | FetchVar - | GetStxBalance | StxGetAccount | GetTokenBalance | GetAssetOwner | GetTokenSupply - | ElementAt | IndexOf | Slice | ReplaceAt | BitwiseAnd | BitwiseOr | BitwiseNot - | BitwiseLShift | BitwiseRShift | BitwiseXor2 | ElementAtAlias | IndexOfAlias => { + | GetStacksBlockInfo | GetTenureInfo | TupleGet | TupleMerge | Len | Print + | AsContract | Begin | FetchVar | GetStxBalance | StxGetAccount | GetTokenBalance + | GetAssetOwner | GetTokenSupply | ElementAt | IndexOf | Slice | ReplaceAt + | BitwiseAnd | BitwiseOr | BitwiseNot | BitwiseLShift | BitwiseRShift | BitwiseXor2 + | ElementAtAlias | IndexOfAlias => { // Check all arguments. self.check_each_expression_is_read_only(args) } diff --git a/clarity/src/vm/analysis/tests/mod.rs b/clarity/src/vm/analysis/tests/mod.rs index adb36b94fb..2484ee86cd 100644 --- a/clarity/src/vm/analysis/tests/mod.rs +++ b/clarity/src/vm/analysis/tests/mod.rs @@ -14,10 +14,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use stacks_common::types::StacksEpochId; + use crate::vm::analysis::errors::CheckErrors; use crate::vm::analysis::type_checker::v2_1::tests::mem_type_check; -use crate::vm::analysis::{type_check, AnalysisDatabase, ContractAnalysis}; +use crate::vm::analysis::{ + mem_type_check as mem_run_analysis, type_check, AnalysisDatabase, ContractAnalysis, +}; use crate::vm::ast::parse; +use crate::vm::ClarityVersion; #[test] fn test_list_types_must_match() { @@ -202,18 +207,87 @@ fn test_contract_call_expect_name() { #[test] fn test_no_such_block_info_property() { let snippet = "(get-block-info? unicorn 1)"; - let err = mem_type_check(snippet).unwrap_err(); + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity2, StacksEpochId::latest()).unwrap_err(); assert!(format!("{}", err.diagnostic).contains("use of block unknown property 'unicorn'")); } +#[test] +fn test_no_such_stacks_block_info_property() { + let snippet = "(get-stacks-block-info? unicorn 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!( + format!("{}", err.diagnostic).contains("use of unknown stacks block property 'unicorn'") + ); +} + +#[test] +fn test_no_such_tenure_info_property() { + let snippet = "(get-tenure-info? unicorn 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!(format!("{}", err.diagnostic).contains("use of unknown tenure property 'unicorn'")); +} + #[test] fn test_get_block_info_expect_property_name() { let snippet = "(get-block-info? 0 1)"; - let err = mem_type_check(snippet).unwrap_err(); + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity2, StacksEpochId::latest()).unwrap_err(); assert!(format!("{}", err.diagnostic) .contains("missing property name for block info introspection")); } +#[test] +fn test_get_stacks_block_info_expect_property_name() { + let snippet = "(get-stacks-block-info? 0 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!(format!("{}", err.diagnostic) + .contains("missing property name for stacks block info introspection")); +} + +#[test] +fn test_get_tenure_info_expect_property_name() { + let snippet = "(get-tenure-info? 0 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!(format!("{}", err.diagnostic) + .contains("missing property name for tenure info introspection")); +} + +#[test] +fn test_no_such_block_info_height() { + let snippet = "(get-block-info? time 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity2, StacksEpochId::latest()).unwrap_err(); + println!("{}", err.diagnostic); + assert!( + format!("{}", err.diagnostic).contains("expecting expression of type 'uint', found 'int'") + ); +} + +#[test] +fn test_no_such_stacks_block_info_height() { + let snippet = "(get-stacks-block-info? time 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!( + format!("{}", err.diagnostic).contains("expecting expression of type 'uint', found 'int'") + ); +} + +#[test] +fn test_no_such_tenure_info_height() { + let snippet = "(get-tenure-info? time 1)"; + let err = + mem_run_analysis(snippet, ClarityVersion::Clarity3, StacksEpochId::latest()).unwrap_err(); + assert!( + format!("{}", err.diagnostic).contains("expecting expression of type 'uint', found 'int'") + ); +} + #[test] fn test_name_already_used() { let snippet = "(define-constant var1 true) (define-constant var1 1)"; diff --git a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs index b38cfd0d11..201c307986 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/natives/mod.rs @@ -773,9 +773,9 @@ impl TypedNativeFunction { | StringToUInt | IntToAscii | IntToUtf8 | GetBurnBlockInfo | StxTransferMemo | StxGetAccount | BitwiseAnd | BitwiseOr | BitwiseNot | BitwiseLShift | BitwiseRShift | BitwiseXor2 | Slice | ToConsensusBuff | FromConsensusBuff - | ReplaceAt => { + | ReplaceAt | GetStacksBlockInfo | GetTenureInfo => { return Err(CheckErrors::Expects( - "Clarity 2 keywords should not show up in 2.05".into(), + "Clarity 2+ keywords should not show up in 2.05".into(), ) .into()) } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index c5aefb65ed..89a3bff24e 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -35,8 +35,9 @@ use crate::vm::types::signatures::{ use crate::vm::types::TypeSignature::SequenceType; use crate::vm::types::{ BlockInfoProperty, BufferLength, BurnBlockInfoProperty, FixedFunction, FunctionArg, - FunctionSignature, FunctionType, PrincipalData, TupleTypeSignature, TypeSignature, Value, - BUFF_1, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, MAX_VALUE_SIZE, + FunctionSignature, FunctionType, PrincipalData, StacksBlockInfoProperty, TenureInfoProperty, + TupleTypeSignature, TypeSignature, Value, BUFF_1, BUFF_20, BUFF_32, BUFF_33, BUFF_64, BUFF_65, + MAX_VALUE_SIZE, }; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, SymbolicExpressionType}; @@ -699,6 +700,48 @@ fn check_get_burn_block_info( )?) } +fn check_get_stacks_block_info( + checker: &mut TypeChecker, + args: &[SymbolicExpression], + context: &TypingContext, +) -> TypeResult { + check_arguments_at_least(2, args)?; + + let block_info_prop_str = args[0].match_atom().ok_or(CheckError::new( + CheckErrors::GetStacksBlockInfoExpectPropertyName, + ))?; + + let block_info_prop = + StacksBlockInfoProperty::lookup_by_name(block_info_prop_str).ok_or(CheckError::new( + CheckErrors::NoSuchStacksBlockInfoProperty(block_info_prop_str.to_string()), + ))?; + + checker.type_check_expects(&args[1], context, &TypeSignature::UIntType)?; + + Ok(TypeSignature::new_option(block_info_prop.type_result())?) +} + +fn check_get_tenure_info( + checker: &mut TypeChecker, + args: &[SymbolicExpression], + context: &TypingContext, +) -> TypeResult { + check_arguments_at_least(2, args)?; + + let block_info_prop_str = args[0].match_atom().ok_or(CheckError::new( + CheckErrors::GetTenureInfoExpectPropertyName, + ))?; + + let block_info_prop = + TenureInfoProperty::lookup_by_name(block_info_prop_str).ok_or(CheckError::new( + CheckErrors::NoSuchTenureInfoProperty(block_info_prop_str.to_string()), + ))?; + + checker.type_check_expects(&args[1], context, &TypeSignature::UIntType)?; + + Ok(TypeSignature::new_option(block_info_prop.type_result())?) +} + impl TypedNativeFunction { pub fn type_check_application( &self, @@ -1034,6 +1077,8 @@ impl TypedNativeFunction { PrincipalOf => Special(SpecialNativeFunction(&check_principal_of)), GetBlockInfo => Special(SpecialNativeFunction(&check_get_block_info)), GetBurnBlockInfo => Special(SpecialNativeFunction(&check_get_burn_block_info)), + GetStacksBlockInfo => Special(SpecialNativeFunction(&check_get_stacks_block_info)), + GetTenureInfo => Special(SpecialNativeFunction(&check_get_tenure_info)), ConsSome => Special(SpecialNativeFunction(&options::check_special_some)), ConsOkay => Special(SpecialNativeFunction(&options::check_special_okay)), ConsError => Special(SpecialNativeFunction(&options::check_special_error)), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 0333045c5a..12597c88fa 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -60,11 +60,15 @@ fn type_check_helper(exp: &str) -> TypeResult { mem_type_check(exp).map(|(type_sig_opt, _)| type_sig_opt.unwrap()) } -fn type_check_helper_v1(exp: &str) -> TypeResult { - mem_run_analysis(exp, ClarityVersion::Clarity1, StacksEpochId::latest()) +fn type_check_helper_version(exp: &str, version: ClarityVersion) -> TypeResult { + mem_run_analysis(exp, version, StacksEpochId::latest()) .map(|(type_sig_opt, _)| type_sig_opt.unwrap()) } +fn type_check_helper_v1(exp: &str) -> TypeResult { + type_check_helper_version(exp, ClarityVersion::Clarity1) +} + fn buff_type(size: u32) -> TypeSignature { TypeSignature::SequenceType(BufferType(size.try_into().unwrap())) } @@ -269,18 +273,29 @@ fn test_get_block_info() { for (good_test, expected) in good.iter().zip(expected.iter()) { assert_eq!( expected, - &format!("{}", type_check_helper(good_test).unwrap()) + &format!( + "{}", + type_check_helper_version(good_test, ClarityVersion::Clarity2).unwrap() + ) ); } for (good_test_v210, expected_v210) in good_v210.iter().zip(expected_v210.iter()) { assert_eq!( expected_v210, - &format!("{}", type_check_helper(good_test_v210).unwrap()) + &format!( + "{}", + type_check_helper_version(good_test_v210, ClarityVersion::Clarity2).unwrap() + ) ); } for (bad_test, expected) in bad.iter().zip(bad_expected.iter()) { - assert_eq!(expected, &type_check_helper(bad_test).unwrap_err().err); + assert_eq!( + expected, + &type_check_helper_version(bad_test, ClarityVersion::Clarity2) + .unwrap_err() + .err + ); } for good_test in good_v210.iter() { diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 91ec6741f1..07edb331fa 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1727,47 +1727,44 @@ const GET_BLOCK_INFO_API: SpecialAPI = SpecialAPI { snippet: "get-block-info? ${1:prop} ${2:block-height}", output_type: "(optional buff) | (optional uint)", signature: "(get-block-info? prop-name block-height)", - description: "The `get-block-info?` function fetches data for a block of the given *Stacks* block height. The + description: "In Clarity 3, `get-block-info?` is removed. In its place, `get-stacks-block-info?` can be used to retrieve +information about a Stacks block and `get-tenure-info?` can be used to get information pertaining to the tenure. + +The `get-block-info?` function fetches data for a block of the given *Stacks* block height. The value and type returned are determined by the specified `BlockInfoPropertyName`. If the provided `block-height` does not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names are as follows: -`burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the +- `burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the Stacks block at the given Stacks chain height. -`id-header-hash`: This property returns a `(buff 32)` value containing the _index block hash_ of a Stacks block. This hash is globally unique, and is derived +- `id-header-hash`: This property returns a `(buff 32)` value containing the _index block hash_ of a Stacks block. This hash is globally unique, and is derived from the block hash and the history of accepted PoX operations. This is also the block hash value you would pass into `(at-block)`. -`header-hash`: This property returns a `(buff 32)` value containing the header hash of a Stacks block, given a Stacks chain height. **WARNING* this hash is +- `header-hash`: This property returns a `(buff 32)` value containing the header hash of a Stacks block, given a Stacks chain height. **WARNING* this hash is not guaranteed to be globally unique, since the same Stacks block can be mined in different PoX forks. If you need global uniqueness, you should use `id-header-hash`. -`miner-address`: This property returns a `principal` value corresponding to the miner of the given block. **WARNING** In Stacks 2.1, this is not guaranteed to +- `miner-address`: This property returns a `principal` value corresponding to the miner of the given block. **WARNING** In Stacks 2.1, this is not guaranteed to be the same `principal` that received the block reward, since Stacks 2.1 supports coinbase transactions that pay the reward to a contract address. This is merely the address of the `principal` that produced the block. -`time`: This property returns a `uint` value of the block header time field. This is a Unix epoch timestamp in seconds -which roughly corresponds to when the block was mined. - In Clarity 2, this timestamp comes from the burnchain block. **Note**: this does not increase monotonically with each block +- `time`: This property returns a `uint` value of the block header time field. This is a Unix epoch timestamp in seconds +which roughly corresponds to when the block was mined. This timestamp comes from the burnchain block. **Note**: this does not increase monotonically with each block and block times are accurate only to within two hours. See [BIP113](https://github.com/bitcoin/bips/blob/master/bip-0113.mediawiki) for more information. - In Clarity 3, which activates with epoch 3.0 (Nakamoto), this timestamp comes from the Stacks block itself. **Note**: this is the time, according to the miner, when -the block started to be mined, but is not guaranteed to be accurate. It will be validated by the signers to be: - - Greater than the timestamp of the previous block - - Less than 15 seconds into the future (according to their own local clocks) +For blocks mined after epoch 3.0, all Stacks blocks in one tenure will share the same timestamp. To get the Stacks block time for a block in epoch 3.0+, use `get-stacks-block-info?`. -`vrf-seed`: This property returns a `(buff 32)` value of the VRF seed for the corresponding block. +- `vrf-seed`: This property returns a `(buff 32)` value of the VRF seed for the corresponding block. -New in Stacks 2.1: - -`block-reward`: This property returns a `uint` value for the total block reward of the indicated Stacks block. This value is only available once the reward for +- `block-reward`: This property returns a `uint` value for the total block reward of the indicated Stacks block. This value is only available once the reward for the block matures. That is, the latest `block-reward` value available is at least 101 Stacks blocks in the past (on mainnet). The reward includes the coinbase, the anchored block's transaction fees, and the shares of the confirmed and produced microblock transaction fees earned by this block's miner. Note that this value may be smaller than the Stacks coinbase at this height, because the miner may have been punished with a valid `PoisonMicroblock` transaction in the event that the miner -published two or more microblock stream forks. +published two or more microblock stream forks. Added in Clarity 2. -`miner-spend-total`: This property returns a `uint` value for the total number of burnchain tokens (i.e. satoshis) spent by all miners trying to win this block. +- `miner-spend-total`: This property returns a `uint` value for the total number of burnchain tokens (i.e. satoshis) spent by all miners trying to win this block. Added in Clarity 2. -`miner-spend-winner`: This property returns a `uint` value for the number of burnchain tokens (i.e. satoshis) spent by the winning miner for this Stacks block. Note that -this value is less than or equal to the value for `miner-spend-total` at the same block height. +- `miner-spend-winner`: This property returns a `uint` value for the number of burnchain tokens (i.e. satoshis) spent by the winning miner for this Stacks block. Note that +this value is less than or equal to the value for `miner-spend-total` at the same block height. Added in Clarity 2. ", example: "(get-block-info? time u0) ;; Returns (some u1557860301) (get-block-info? header-hash u0) ;; Returns (some 0x374708fff7719dd5979ec875d56cd2286f6d3cf7ec317a3b25632aab28ec37bb) @@ -1812,6 +1809,74 @@ The `addrs` list contains the same PoX address values passed into the PoX smart " }; +const GET_STACKS_BLOCK_INFO_API: SpecialAPI = SpecialAPI { + input_type: "StacksBlockInfoPropertyName, uint", + snippet: "get-stacks-block-info? ${1:prop} ${2:block-height}", + output_type: "(optional buff) | (optional uint)", + signature: "(get-stacks-block-info? prop-name block-height)", + description: "The `get-stacks-block-info?` function fetches data for a block of the given *Stacks* block height. The +value and type returned are determined by the specified `StacksBlockInfoPropertyName`. If the provided `block-height` does +not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names +are as follows: + +- `id-header-hash`: This property returns a `(buff 32)` value containing the _index block hash_ of a Stacks block. This hash is globally unique, and is derived +from the block hash and the history of accepted PoX operations. This is also the block hash value you would pass into `(at-block)`. + +- `header-hash`: This property returns a `(buff 32)` value containing the header hash of a Stacks block, given a Stacks chain height. **WARNING* this hash is +not guaranteed to be globally unique, since the same Stacks block can be mined in different PoX forks. If you need global uniqueness, you should use `id-header-hash`. + +- `time`: This property returns a `uint` value of the block header time field. This is a Unix epoch timestamp in seconds +which roughly corresponds to when the block was mined. For a block mined before epoch 3.0, this timestamp comes from the burnchain block. **Note**: this does not increase monotonically with each block +and block times are accurate only to within two hours. See [BIP113](https://github.com/bitcoin/bips/blob/master/bip-0113.mediawiki) for more information. +For a block mined after epoch 3.0, this timestamp comes from the Stacks block header. **Note**: this is the time, according to the miner, when +the mining of this block started, but is not guaranteed to be accurate. This time will be validated by the signers to be: + - Greater than the timestamp of the previous block + - Less than 15 seconds into the future (according to their own local clocks) +", + example: "(get-stacks-block-info? time u0) ;; Returns (some u1557860301) +(get-stacks-block-info? header-hash u0) ;; Returns (some 0x374708fff7719dd5979ec875d56cd2286f6d3cf7ec317a3b25632aab28ec37bb) +" +}; + +const GET_TENURE_INFO_API: SpecialAPI = SpecialAPI { + input_type: "TenureInfoPropertyName, uint", + snippet: "get-tenure-info? ${1:prop} ${2:tenure-height}", + output_type: "(optional buff) | (optional uint)", + signature: "(get-tenure-info? prop-name tenure-height)", + description: "The `get-tenure-info?` function fetches data for a tenure at the given height. The +value and type returned are determined by the specified `TenureInfoPropertyName`. If the provided `tenure-height` does +not correspond to an existing tenure prior to the current block, the function returns `none`. The currently available property names +are as follows: + +- `burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the +tenure at the given height. + +- `miner-address`: This property returns a `principal` value corresponding to the miner of the given tenure. **WARNING** This is not guaranteed to +be the same `principal` that received the block reward, since Stacks 2.1+ supports coinbase transactions that pay the reward to a contract address. This is merely +the address of the `principal` that produced the tenure. + +- `time`: This property returns a `uint` Unix epoch timestamp in seconds which roughly corresponds to when the tenure was started. This timestamp comes +from the burnchain block. **Note**: this does not increase monotonically with each tenure and tenure times are accurate only to within two hours. See +[BIP113](https://github.com/bitcoin/bips/blob/master/bip-0113.mediawiki) for more information. + +- `vrf-seed`: This property returns a `(buff 32)` value of the VRF seed for the corresponding tenure. + +- `block-reward`: This property returns a `uint` value for the total block reward of the indicated tenure. This value is only available once the reward for +the tenure matures. That is, the latest `block-reward` value available is at least 101 Stacks blocks in the past (on mainnet). The reward includes the coinbase, +the anchored tenure's transaction fees, and the shares of the confirmed and produced microblock transaction fees earned by this block's miner. Note that this value may +be smaller than the Stacks coinbase at this height, because the miner may have been punished with a valid `PoisonMicroblock` transaction in the event that the miner +published two or more microblock stream forks. + +- `miner-spend-total`: This property returns a `uint` value for the total number of burnchain tokens (i.e. satoshis) spent by all miners trying to win this tenure. + +- `miner-spend-winner`: This property returns a `uint` value for the number of burnchain tokens (i.e. satoshis) spent by the winning miner for this tennure. Note that +this value is less than or equal to the value for `miner-spend-total` at the same tenure height. +", + example: "(get-tenure-info? time u0) ;; Returns (some u1557860301) +(get-tenure-info? vrf-seed u0) ;; Returns (some 0xf490de2920c8a35fabeb13208852aa28c76f9be9b03a4dd2b3c075f7a26923b4) +" +}; + const PRINCIPAL_CONSTRUCT_API: SpecialAPI = SpecialAPI { input_type: "(buff 1), (buff 20), [(string-ascii 40)]", output_type: "(response principal { error_code: uint, value: (optional principal) })", @@ -2528,6 +2593,8 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { AsContract => make_for_special(&AS_CONTRACT_API, function), GetBlockInfo => make_for_special(&GET_BLOCK_INFO_API, function), GetBurnBlockInfo => make_for_special(&GET_BURN_BLOCK_INFO_API, function), + GetStacksBlockInfo => make_for_special(&GET_STACKS_BLOCK_INFO_API, function), + GetTenureInfo => make_for_special(&GET_TENURE_INFO_API, function), ConsOkay => make_for_special(&CONS_OK_API, function), ConsError => make_for_special(&CONS_ERR_API, function), ConsSome => make_for_special(&CONS_SOME_API, function), diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index c84d037aca..71cba8df96 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -32,7 +32,7 @@ use crate::vm::functions::tuples; use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType}; use crate::vm::types::{ BlockInfoProperty, BuffData, BurnBlockInfoProperty, OptionalData, PrincipalData, SequenceData, - TupleData, TypeSignature, Value, BUFF_32, + StacksBlockInfoProperty, TenureInfoProperty, TupleData, TypeSignature, Value, BUFF_32, }; use crate::vm::{eval, ClarityVersion, Environment, LocalContext}; @@ -717,6 +717,26 @@ pub fn special_delete_entry_v205( result.map(|data| data.value) } +/// Handles the `get-block-info?` special function. +/// Interprets `args` as variables `[property-name, block-height]`, and returns +/// a property value determined by `property-name`: +/// - `id-header-hash` returns the index block hash at `block-height` +/// - `header-hash` returns the header hash at `block-height` +/// - `time` returns the burn block time of the block at `block-height` +/// - `vrf-seed` returns the VRF seed of the block at `block-height` +/// - `burnchain-header-hash` returns header hash of the burnchain block corresponding to `block-height` +/// - `miner-address` returns the address of the principal that mined the block at `block-height` +/// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the block at `block-height` +/// - `miner-spend-total` returns the total number of satoshis spent by all miners for the block at `block-height` +/// - `block-reward` returns the block reward for the block at `block-height` + +/// +/// # Errors: +/// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. +/// - CheckErrors::GetStacksBlockInfoExpectPropertyName if `args[0]` isn't a ClarityName. +/// - CheckErrors::NoSuchStacksBlockInfoProperty if `args[0]` isn't a StacksBlockInfoProperty. +/// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. + pub fn special_get_block_info( args: &[SymbolicExpression], env: &mut Environment, @@ -756,13 +776,10 @@ pub fn special_get_block_info( let result = match block_info_prop { BlockInfoProperty::Time => { - let block_time = if version.uses_nakamoto_block_timestamp() { - env.global_context.database.get_block_time(height_value)? - } else { - env.global_context - .database - .get_burn_block_time(height_value)? - }; + let block_time = env + .global_context + .database + .get_burn_block_time(height_value)?; Value::UInt(u128::from(block_time)) } BlockInfoProperty::VrfSeed => { @@ -835,6 +852,7 @@ pub fn special_get_block_info( Value::some(result) } +/// Handles the `get-burn-block-info?` special function. /// Interprets `args` as variables `[property_name, burn_block_height]`, and returns /// a property value determined by `property_name`: /// - `header_hash` returns the burn block header hash at `burn_block_height` @@ -929,3 +947,190 @@ pub fn special_get_burn_block_info( } } } + +/// Handles the `get-stacks-block-info?` special function. +/// Interprets `args` as variables `[property-name, block-height]`, and returns +/// a property value determined by `property-name`: +/// - `id-header-hash` returns the index block hash at `block-height` +/// - `header-hash` returns the header hash at `block-height` +/// - `time` returns the block time at `block-height` +/// +/// # Errors: +/// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. +/// - CheckErrors::GetStacksBlockInfoExpectPropertyName if `args[0]` isn't a ClarityName. +/// - CheckErrors::NoSuchStacksBlockInfoProperty if `args[0]` isn't a StacksBlockInfoProperty. +/// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. +pub fn special_get_stacks_block_info( + args: &[SymbolicExpression], + env: &mut Environment, + context: &LocalContext, +) -> Result { + // (get-stacks-block-info? property-name block-height-uint) + runtime_cost(ClarityCostFunction::BlockInfo, env, 0)?; + + check_argument_count(2, args)?; + + // Handle the block property name input arg. + let property_name = args[0] + .match_atom() + .ok_or(CheckErrors::GetStacksBlockInfoExpectPropertyName)?; + + let block_info_prop = StacksBlockInfoProperty::lookup_by_name(property_name).ok_or( + CheckErrors::NoSuchStacksBlockInfoProperty(property_name.to_string()), + )?; + + // Handle the block-height input arg. + let height_eval = eval(&args[1], env, context)?; + let height_value = match height_eval { + Value::UInt(result) => Ok(result), + x => Err(CheckErrors::TypeValueError(TypeSignature::UIntType, x)), + }?; + + let height_value = match u32::try_from(height_value) { + Ok(result) => result, + _ => return Ok(Value::none()), + }; + + let current_block_height = env.global_context.database.get_current_block_height(); + if height_value >= current_block_height { + return Ok(Value::none()); + } + + let result = match block_info_prop { + StacksBlockInfoProperty::Time => { + let block_time = env.global_context.database.get_block_time(height_value)?; + Value::UInt(u128::from(block_time)) + } + StacksBlockInfoProperty::HeaderHash => { + let header_hash = env + .global_context + .database + .get_block_header_hash(height_value)?; + Value::Sequence(SequenceData::Buffer(BuffData { + data: header_hash.as_bytes().to_vec(), + })) + } + StacksBlockInfoProperty::IdentityHeaderHash => { + let id_header_hash = env + .global_context + .database + .get_index_block_header_hash(height_value)?; + Value::Sequence(SequenceData::Buffer(BuffData { + data: id_header_hash.as_bytes().to_vec(), + })) + } + }; + + Value::some(result) +} + +/// Handles the function `get-tenure-info?` special function. +/// Interprets `args` as variables `[property-name, tenure-height]`, and returns +/// a property value determined by `property-name`: +/// - `time` returns the burn block time of the tenure at `tenure-height` +/// - `vrf-seed` returns the VRF seed of the tenure at `tenure-height` +/// - `burnchain-header-hash` returns header hash of the burnchain block corresponding to the tenure at `tenure-height` +/// - `miner-address` returns the address of the principal that mined the tenure at `tenure-height` +/// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the tenure at `tenure-height` +/// - `miner-spend-total` returns the total number of satoshis spent by all miners for the tenure at `tenure-height` +/// - `block-reward` returns the block reward for the tenure at `tenure-height` +/// +/// # Errors: +/// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. +/// - CheckErrors::GetTenureInfoExpectPropertyName if `args[0]` isn't a ClarityName. +/// - CheckErrors::NoSuchTenureInfoProperty if `args[0]` isn't a TenureInfoProperty. +/// - CheckErrors::TypeValueError if `args[1]` isn't a `uint`. +pub fn special_get_tenure_info( + args: &[SymbolicExpression], + env: &mut Environment, + context: &LocalContext, +) -> Result { + // (get-block-info? property-name block-height-int) + runtime_cost(ClarityCostFunction::BlockInfo, env, 0)?; + + check_argument_count(2, args)?; + + // Handle the block property name input arg. + let property_name = args[0] + .match_atom() + .ok_or(CheckErrors::GetTenureInfoExpectPropertyName)?; + + let block_info_prop = TenureInfoProperty::lookup_by_name(property_name) + .ok_or(CheckErrors::GetTenureInfoExpectPropertyName)?; + + // Handle the tenure-height input arg. + let height_eval = eval(&args[1], env, context)?; + let height_value = match height_eval { + Value::UInt(result) => Ok(result), + x => Err(CheckErrors::TypeValueError(TypeSignature::UIntType, x)), + }?; + + let height_value = match u32::try_from(height_value) { + Ok(result) => result, + _ => return Ok(Value::none()), + }; + + let current_block_height = env.global_context.database.get_current_block_height(); + if height_value >= current_block_height { + return Ok(Value::none()); + } + + let result = match block_info_prop { + TenureInfoProperty::Time => { + let block_time = env + .global_context + .database + .get_burn_block_time(height_value)?; + Value::UInt(u128::from(block_time)) + } + TenureInfoProperty::VrfSeed => { + let vrf_seed = env + .global_context + .database + .get_block_vrf_seed(height_value)?; + Value::Sequence(SequenceData::Buffer(BuffData { + data: vrf_seed.as_bytes().to_vec(), + })) + } + TenureInfoProperty::BurnchainHeaderHash => { + let burnchain_header_hash = env + .global_context + .database + .get_burnchain_block_header_hash(height_value)?; + Value::Sequence(SequenceData::Buffer(BuffData { + data: burnchain_header_hash.as_bytes().to_vec(), + })) + } + TenureInfoProperty::MinerAddress => { + let miner_address = env + .global_context + .database + .get_miner_address(height_value)?; + Value::from(miner_address) + } + TenureInfoProperty::MinerSpendWinner => { + let winner_spend = env + .global_context + .database + .get_miner_spend_winner(height_value)?; + Value::UInt(winner_spend) + } + TenureInfoProperty::MinerSpendTotal => { + let total_spend = env + .global_context + .database + .get_miner_spend_total(height_value)?; + Value::UInt(total_spend) + } + TenureInfoProperty::BlockReward => { + // this is already an optional + let block_reward_opt = env.global_context.database.get_block_reward(height_value)?; + return Ok(match block_reward_opt { + Some(x) => Value::some(Value::UInt(x))?, + None => Value::none(), + }); + } + }; + + Value::some(result) +} diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 1a40c1fc51..833ed4baf8 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -84,115 +84,117 @@ pub mod principals; mod sequences; pub mod tuples; -define_versioned_named_enum!(NativeFunctions(ClarityVersion) { - Add("+", ClarityVersion::Clarity1), - Subtract("-", ClarityVersion::Clarity1), - Multiply("*", ClarityVersion::Clarity1), - Divide("/", ClarityVersion::Clarity1), - CmpGeq(">=", ClarityVersion::Clarity1), - CmpLeq("<=", ClarityVersion::Clarity1), - CmpLess("<", ClarityVersion::Clarity1), - CmpGreater(">", ClarityVersion::Clarity1), - ToInt("to-int", ClarityVersion::Clarity1), - ToUInt("to-uint", ClarityVersion::Clarity1), - Modulo("mod", ClarityVersion::Clarity1), - Power("pow", ClarityVersion::Clarity1), - Sqrti("sqrti", ClarityVersion::Clarity1), - Log2("log2", ClarityVersion::Clarity1), - BitwiseXor("xor", ClarityVersion::Clarity1), - And("and", ClarityVersion::Clarity1), - Or("or", ClarityVersion::Clarity1), - Not("not", ClarityVersion::Clarity1), - Equals("is-eq", ClarityVersion::Clarity1), - If("if", ClarityVersion::Clarity1), - Let("let", ClarityVersion::Clarity1), - Map("map", ClarityVersion::Clarity1), - Fold("fold", ClarityVersion::Clarity1), - Append("append", ClarityVersion::Clarity1), - Concat("concat", ClarityVersion::Clarity1), - AsMaxLen("as-max-len?", ClarityVersion::Clarity1), - Len("len", ClarityVersion::Clarity1), - ElementAt("element-at", ClarityVersion::Clarity1), - ElementAtAlias("element-at?", ClarityVersion::Clarity2), - IndexOf("index-of", ClarityVersion::Clarity1), - IndexOfAlias("index-of?", ClarityVersion::Clarity2), - BuffToIntLe("buff-to-int-le", ClarityVersion::Clarity2), - BuffToUIntLe("buff-to-uint-le", ClarityVersion::Clarity2), - BuffToIntBe("buff-to-int-be", ClarityVersion::Clarity2), - BuffToUIntBe("buff-to-uint-be", ClarityVersion::Clarity2), - IsStandard("is-standard", ClarityVersion::Clarity2), - PrincipalDestruct("principal-destruct?", ClarityVersion::Clarity2), - PrincipalConstruct("principal-construct?", ClarityVersion::Clarity2), - StringToInt("string-to-int?", ClarityVersion::Clarity2), - StringToUInt("string-to-uint?", ClarityVersion::Clarity2), - IntToAscii("int-to-ascii", ClarityVersion::Clarity2), - IntToUtf8("int-to-utf8", ClarityVersion::Clarity2), - ListCons("list", ClarityVersion::Clarity1), - FetchVar("var-get", ClarityVersion::Clarity1), - SetVar("var-set", ClarityVersion::Clarity1), - FetchEntry("map-get?", ClarityVersion::Clarity1), - SetEntry("map-set", ClarityVersion::Clarity1), - InsertEntry("map-insert", ClarityVersion::Clarity1), - DeleteEntry("map-delete", ClarityVersion::Clarity1), - TupleCons("tuple", ClarityVersion::Clarity1), - TupleGet("get", ClarityVersion::Clarity1), - TupleMerge("merge", ClarityVersion::Clarity1), - Begin("begin", ClarityVersion::Clarity1), - Hash160("hash160", ClarityVersion::Clarity1), - Sha256("sha256", ClarityVersion::Clarity1), - Sha512("sha512", ClarityVersion::Clarity1), - Sha512Trunc256("sha512/256", ClarityVersion::Clarity1), - Keccak256("keccak256", ClarityVersion::Clarity1), - Secp256k1Recover("secp256k1-recover?", ClarityVersion::Clarity1), - Secp256k1Verify("secp256k1-verify", ClarityVersion::Clarity1), - Print("print", ClarityVersion::Clarity1), - ContractCall("contract-call?", ClarityVersion::Clarity1), - AsContract("as-contract", ClarityVersion::Clarity1), - ContractOf("contract-of", ClarityVersion::Clarity1), - PrincipalOf("principal-of?", ClarityVersion::Clarity1), - AtBlock("at-block", ClarityVersion::Clarity1), - GetBlockInfo("get-block-info?", ClarityVersion::Clarity1), - GetBurnBlockInfo("get-burn-block-info?", ClarityVersion::Clarity2), - ConsError("err", ClarityVersion::Clarity1), - ConsOkay("ok", ClarityVersion::Clarity1), - ConsSome("some", ClarityVersion::Clarity1), - DefaultTo("default-to", ClarityVersion::Clarity1), - Asserts("asserts!", ClarityVersion::Clarity1), - UnwrapRet("unwrap!", ClarityVersion::Clarity1), - UnwrapErrRet("unwrap-err!", ClarityVersion::Clarity1), - Unwrap("unwrap-panic", ClarityVersion::Clarity1), - UnwrapErr("unwrap-err-panic", ClarityVersion::Clarity1), - Match("match", ClarityVersion::Clarity1), - TryRet("try!", ClarityVersion::Clarity1), - IsOkay("is-ok", ClarityVersion::Clarity1), - IsNone("is-none", ClarityVersion::Clarity1), - IsErr("is-err", ClarityVersion::Clarity1), - IsSome("is-some", ClarityVersion::Clarity1), - Filter("filter", ClarityVersion::Clarity1), - GetTokenBalance("ft-get-balance", ClarityVersion::Clarity1), - GetAssetOwner("nft-get-owner?", ClarityVersion::Clarity1), - TransferToken("ft-transfer?", ClarityVersion::Clarity1), - TransferAsset("nft-transfer?", ClarityVersion::Clarity1), - MintAsset("nft-mint?", ClarityVersion::Clarity1), - MintToken("ft-mint?", ClarityVersion::Clarity1), - GetTokenSupply("ft-get-supply", ClarityVersion::Clarity1), - BurnToken("ft-burn?", ClarityVersion::Clarity1), - BurnAsset("nft-burn?", ClarityVersion::Clarity1), - GetStxBalance("stx-get-balance", ClarityVersion::Clarity1), - StxTransfer("stx-transfer?", ClarityVersion::Clarity1), - StxTransferMemo("stx-transfer-memo?", ClarityVersion::Clarity2), - StxBurn("stx-burn?", ClarityVersion::Clarity1), - StxGetAccount("stx-account", ClarityVersion::Clarity2), - BitwiseAnd("bit-and", ClarityVersion::Clarity2), - BitwiseOr("bit-or", ClarityVersion::Clarity2), - BitwiseNot("bit-not", ClarityVersion::Clarity2), - BitwiseLShift("bit-shift-left", ClarityVersion::Clarity2), - BitwiseRShift("bit-shift-right", ClarityVersion::Clarity2), - BitwiseXor2("bit-xor", ClarityVersion::Clarity2), - Slice("slice?", ClarityVersion::Clarity2), - ToConsensusBuff("to-consensus-buff?", ClarityVersion::Clarity2), - FromConsensusBuff("from-consensus-buff?", ClarityVersion::Clarity2), - ReplaceAt("replace-at?", ClarityVersion::Clarity2), +define_versioned_named_enum_with_max!(NativeFunctions(ClarityVersion) { + Add("+", ClarityVersion::Clarity1, None), + Subtract("-", ClarityVersion::Clarity1, None), + Multiply("*", ClarityVersion::Clarity1, None), + Divide("/", ClarityVersion::Clarity1, None), + CmpGeq(">=", ClarityVersion::Clarity1, None), + CmpLeq("<=", ClarityVersion::Clarity1, None), + CmpLess("<", ClarityVersion::Clarity1, None), + CmpGreater(">", ClarityVersion::Clarity1, None), + ToInt("to-int", ClarityVersion::Clarity1, None), + ToUInt("to-uint", ClarityVersion::Clarity1, None), + Modulo("mod", ClarityVersion::Clarity1, None), + Power("pow", ClarityVersion::Clarity1, None), + Sqrti("sqrti", ClarityVersion::Clarity1, None), + Log2("log2", ClarityVersion::Clarity1, None), + BitwiseXor("xor", ClarityVersion::Clarity1, None), + And("and", ClarityVersion::Clarity1, None), + Or("or", ClarityVersion::Clarity1, None), + Not("not", ClarityVersion::Clarity1, None), + Equals("is-eq", ClarityVersion::Clarity1, None), + If("if", ClarityVersion::Clarity1, None), + Let("let", ClarityVersion::Clarity1, None), + Map("map", ClarityVersion::Clarity1, None), + Fold("fold", ClarityVersion::Clarity1, None), + Append("append", ClarityVersion::Clarity1, None), + Concat("concat", ClarityVersion::Clarity1, None), + AsMaxLen("as-max-len?", ClarityVersion::Clarity1, None), + Len("len", ClarityVersion::Clarity1, None), + ElementAt("element-at", ClarityVersion::Clarity1, None), + ElementAtAlias("element-at?", ClarityVersion::Clarity2, None), + IndexOf("index-of", ClarityVersion::Clarity1, None), + IndexOfAlias("index-of?", ClarityVersion::Clarity2, None), + BuffToIntLe("buff-to-int-le", ClarityVersion::Clarity2, None), + BuffToUIntLe("buff-to-uint-le", ClarityVersion::Clarity2, None), + BuffToIntBe("buff-to-int-be", ClarityVersion::Clarity2, None), + BuffToUIntBe("buff-to-uint-be", ClarityVersion::Clarity2, None), + IsStandard("is-standard", ClarityVersion::Clarity2, None), + PrincipalDestruct("principal-destruct?", ClarityVersion::Clarity2, None), + PrincipalConstruct("principal-construct?", ClarityVersion::Clarity2, None), + StringToInt("string-to-int?", ClarityVersion::Clarity2, None), + StringToUInt("string-to-uint?", ClarityVersion::Clarity2, None), + IntToAscii("int-to-ascii", ClarityVersion::Clarity2, None), + IntToUtf8("int-to-utf8", ClarityVersion::Clarity2, None), + ListCons("list", ClarityVersion::Clarity1, None), + FetchVar("var-get", ClarityVersion::Clarity1, None), + SetVar("var-set", ClarityVersion::Clarity1, None), + FetchEntry("map-get?", ClarityVersion::Clarity1, None), + SetEntry("map-set", ClarityVersion::Clarity1, None), + InsertEntry("map-insert", ClarityVersion::Clarity1, None), + DeleteEntry("map-delete", ClarityVersion::Clarity1, None), + TupleCons("tuple", ClarityVersion::Clarity1, None), + TupleGet("get", ClarityVersion::Clarity1, None), + TupleMerge("merge", ClarityVersion::Clarity1, None), + Begin("begin", ClarityVersion::Clarity1, None), + Hash160("hash160", ClarityVersion::Clarity1, None), + Sha256("sha256", ClarityVersion::Clarity1, None), + Sha512("sha512", ClarityVersion::Clarity1, None), + Sha512Trunc256("sha512/256", ClarityVersion::Clarity1, None), + Keccak256("keccak256", ClarityVersion::Clarity1, None), + Secp256k1Recover("secp256k1-recover?", ClarityVersion::Clarity1, None), + Secp256k1Verify("secp256k1-verify", ClarityVersion::Clarity1, None), + Print("print", ClarityVersion::Clarity1, None), + ContractCall("contract-call?", ClarityVersion::Clarity1, None), + AsContract("as-contract", ClarityVersion::Clarity1, None), + ContractOf("contract-of", ClarityVersion::Clarity1, None), + PrincipalOf("principal-of?", ClarityVersion::Clarity1, None), + AtBlock("at-block", ClarityVersion::Clarity1, None), + GetBlockInfo("get-block-info?", ClarityVersion::Clarity1, Some(ClarityVersion::Clarity2)), + GetBurnBlockInfo("get-burn-block-info?", ClarityVersion::Clarity2, None), + ConsError("err", ClarityVersion::Clarity1, None), + ConsOkay("ok", ClarityVersion::Clarity1, None), + ConsSome("some", ClarityVersion::Clarity1, None), + DefaultTo("default-to", ClarityVersion::Clarity1, None), + Asserts("asserts!", ClarityVersion::Clarity1, None), + UnwrapRet("unwrap!", ClarityVersion::Clarity1, None), + UnwrapErrRet("unwrap-err!", ClarityVersion::Clarity1, None), + Unwrap("unwrap-panic", ClarityVersion::Clarity1, None), + UnwrapErr("unwrap-err-panic", ClarityVersion::Clarity1, None), + Match("match", ClarityVersion::Clarity1, None), + TryRet("try!", ClarityVersion::Clarity1, None), + IsOkay("is-ok", ClarityVersion::Clarity1, None), + IsNone("is-none", ClarityVersion::Clarity1, None), + IsErr("is-err", ClarityVersion::Clarity1, None), + IsSome("is-some", ClarityVersion::Clarity1, None), + Filter("filter", ClarityVersion::Clarity1, None), + GetTokenBalance("ft-get-balance", ClarityVersion::Clarity1, None), + GetAssetOwner("nft-get-owner?", ClarityVersion::Clarity1, None), + TransferToken("ft-transfer?", ClarityVersion::Clarity1, None), + TransferAsset("nft-transfer?", ClarityVersion::Clarity1, None), + MintAsset("nft-mint?", ClarityVersion::Clarity1, None), + MintToken("ft-mint?", ClarityVersion::Clarity1, None), + GetTokenSupply("ft-get-supply", ClarityVersion::Clarity1, None), + BurnToken("ft-burn?", ClarityVersion::Clarity1, None), + BurnAsset("nft-burn?", ClarityVersion::Clarity1, None), + GetStxBalance("stx-get-balance", ClarityVersion::Clarity1, None), + StxTransfer("stx-transfer?", ClarityVersion::Clarity1, None), + StxTransferMemo("stx-transfer-memo?", ClarityVersion::Clarity2, None), + StxBurn("stx-burn?", ClarityVersion::Clarity1, None), + StxGetAccount("stx-account", ClarityVersion::Clarity2, None), + BitwiseAnd("bit-and", ClarityVersion::Clarity2, None), + BitwiseOr("bit-or", ClarityVersion::Clarity2, None), + BitwiseNot("bit-not", ClarityVersion::Clarity2, None), + BitwiseLShift("bit-shift-left", ClarityVersion::Clarity2, None), + BitwiseRShift("bit-shift-right", ClarityVersion::Clarity2, None), + BitwiseXor2("bit-xor", ClarityVersion::Clarity2, None), + Slice("slice?", ClarityVersion::Clarity2, None), + ToConsensusBuff("to-consensus-buff?", ClarityVersion::Clarity2, None), + FromConsensusBuff("from-consensus-buff?", ClarityVersion::Clarity2, None), + ReplaceAt("replace-at?", ClarityVersion::Clarity2, None), + GetStacksBlockInfo("get-stacks-block-info?", ClarityVersion::Clarity3, None), + GetTenureInfo("get-tenure-info?", ClarityVersion::Clarity3, None), }); /// @@ -421,6 +423,14 @@ pub fn lookup_reserved_functions(name: &str, version: &ClarityVersion) -> Option "special_get_burn_block_info", &database::special_get_burn_block_info, ), + GetStacksBlockInfo => SpecialFunction( + "special_get_stacks_block_info", + &database::special_get_stacks_block_info, + ), + GetTenureInfo => SpecialFunction( + "special_get_tenure_info", + &database::special_get_tenure_info, + ), ConsSome => NativeFunction( "native_some", NativeHandle::SingleArg(&options::native_some), diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 5662f2bb8a..ba11371d73 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -732,6 +732,22 @@ define_named_enum!(BurnBlockInfoProperty { PoxAddrs("pox-addrs"), }); +define_named_enum!(StacksBlockInfoProperty { + IdentityHeaderHash("id-header-hash"), + HeaderHash("header-hash"), + Time("time"), +}); + +define_named_enum!(TenureInfoProperty { + Time("time"), + VrfSeed("vrf-seed"), + BurnchainHeaderHash("burnchain-header-hash"), + MinerAddress("miner-address"), + MinerSpendWinner("miner-spend-winner"), + MinerSpendTotal("miner-spend-total"), + BlockReward("block-reward"), +}); + impl OptionalData { pub fn type_signature(&self) -> std::result::Result { let type_result = match self.data { @@ -806,6 +822,27 @@ impl BurnBlockInfoProperty { } } +impl StacksBlockInfoProperty { + pub fn type_result(&self) -> TypeSignature { + use self::StacksBlockInfoProperty::*; + match self { + Time => TypeSignature::UIntType, + IdentityHeaderHash | HeaderHash => BUFF_32.clone(), + } + } +} + +impl TenureInfoProperty { + pub fn type_result(&self) -> TypeSignature { + use self::TenureInfoProperty::*; + match self { + Time | MinerSpendWinner | MinerSpendTotal | BlockReward => TypeSignature::UIntType, + VrfSeed | BurnchainHeaderHash => BUFF_32.clone(), + MinerAddress => TypeSignature::PrincipalType, + } + } +} + impl PartialEq for ListData { fn eq(&self, other: &ListData) -> bool { self.data == other.data diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index d26ecd44ff..4c437d52cc 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -42,10 +42,6 @@ impl ClarityVersion { StacksEpochId::Epoch30 => ClarityVersion::Clarity3, } } - - pub fn uses_nakamoto_block_timestamp(&self) -> bool { - self >= &ClarityVersion::Clarity3 - } } impl FromStr for ClarityVersion { diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 32b9fcc07c..62b6a7d211 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -558,8 +558,6 @@ impl HttpResponse for RPCBlockProposalRequestHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let body_str = std::str::from_utf8(body) - .map_err(|e| Error::DecodeError(format!("Failed to parse body: {e}")))?; let response: BlockProposalResponse = parse_json(preamble, body)?; HttpResponsePayload::try_from_json(response) } From 7185f5788db72e9c295d6807d145971a3c8c0fb7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sun, 9 Jun 2024 08:18:26 -0500 Subject: [PATCH 0280/1400] feat: add specificity to the integration test --- .../chainstate/nakamoto/coordinator/tests.rs | 366 ++++++++---------- 1 file changed, 159 insertions(+), 207 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 6a995e720b..f3e4b45c4a 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::collections::{HashMap, HashSet}; +use std::sync::Mutex; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::PrincipalData; @@ -690,7 +691,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { tenure_change_tx, coinbase_tx, &mut test_signers, - |miner, chainstate, sortdb, blocks_so_far| { + |_miner, chainstate, sortdb, blocks_so_far| { if blocks_so_far.len() < 10 { debug!("\n\nProduce block {}\n\n", blocks_so_far.len()); @@ -765,19 +766,99 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { peer.check_nakamoto_migration(); } +impl <'a> TestPeer <'a> { + pub fn mine_single_block_tenure( + &mut self, + sender_key: &StacksPrivateKey, + tenure_change_tx: &StacksTransaction, + coinbase_tx: &StacksTransaction, + after_block: G, + ) -> NakamotoBlock + where + G: FnMut(&mut NakamotoBlock) -> bool, + { + let sender_addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_key)); + let mut test_signers = self.config.test_signers.clone().unwrap(); + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + // do a stx transfer in each block to a given recipient + let mut blocks_and_sizes = self.make_nakamoto_tenure_and( + tenure_change_tx.clone(), + coinbase_tx.clone(), + &mut test_signers, + |_miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 1 { + let account = get_account(chainstate, sortdb, &sender_addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &sender_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + after_block, + ); + assert_eq!(blocks_and_sizes.len(), 1); + let block = blocks_and_sizes.pop().unwrap().0; + block + } + + pub fn single_block_tenure( + &mut self, + sender_key: &StacksPrivateKey, + mut after_burn_ops: F, + after_block: G, + ) -> (NakamotoBlock, u64, StacksTransaction, StacksTransaction) + where + F: FnMut(&mut Vec), + G: FnMut(&mut NakamotoBlock) -> bool, + { + let (mut burn_ops, mut tenure_change, miner_key) = + self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + after_burn_ops(&mut burn_ops); + let (burn_height, _, consensus_hash) = self.next_burnchain_block(burn_ops.clone()); + let pox_constants = self.sortdb().pox_constants.clone(); + let first_burn_height = self.sortdb().first_block_height; + + info!( + "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", + pox_constants.is_in_prepare_phase(first_burn_height, burn_height), + pox_constants.is_reward_cycle_start(first_burn_height, burn_height) + ); + let vrf_proof = self.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = self + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = self.miner.make_nakamoto_coinbase(None, vrf_proof); + + let block = self.mine_single_block_tenure(sender_key, &tenure_change_tx, &coinbase_tx, after_block); + + (block, burn_height, tenure_change_tx, coinbase_tx) + } +} + + #[test] -// TODO: DRY this up. -// Cover the "find addr" by checking the invalid block cases -// with the specific addresses set incorrectly. +// Test PoX Reward and Punish treatment in nakamoto +// - create a 12 address PoX reward set +// - make a normal block commit, assert that the bitvec must contain 1s for those addresses +// - make a burn block commit, assert that the bitvec must contain 0s for those addresses fn pox_treatment() { let private_key = StacksPrivateKey::from_seed(&[2]); - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&private_key)], - ) - .unwrap(); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); let num_stackers: u32 = 4; let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); @@ -808,121 +889,60 @@ fn pox_treatment() { let mut blocks = vec![]; let pox_constants = peer.sortdb().pox_constants.clone(); let first_burn_height = peer.sortdb().first_block_height; - let recipient_addr = - StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); // mine until we're at the start of the next reward phase (so we *know* // that the reward set contains entries) loop { - let (burn_ops, mut tenure_change, miner_key) = - peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (burn_height, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - info!("Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", - pox_constants.is_in_prepare_phase(first_burn_height, burn_height), - pox_constants.is_reward_cycle_start(first_burn_height, burn_height) - ); - let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); - - tenure_change.tenure_consensus_hash = consensus_hash.clone(); - tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - - let tenure_change_tx = peer - .miner - .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); - - // do a stx transfer in each block to a given recipient - let blocks_and_sizes = peer.make_nakamoto_tenure( - tenure_change_tx, - coinbase_tx, - &mut test_signers, - |_miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() < 1 { - info!("Produce nakamoto block {}", blocks_so_far.len()); - - let account = get_account(chainstate, sortdb, &addr); - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - account.nonce, - 100, - 1, - &recipient_addr, - ); - - vec![stx_transfer] - } else { - vec![] - } - }, - ); - blocks.extend(blocks_and_sizes.into_iter().map(|(block, _, _)| block)); + let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| true); + blocks.push(block); if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 1) { break; } } + let mut expected_reward_set = vec![]; + for stacker in test_stackers.iter() { + let pox_addr = stacker.pox_address.as_ref().unwrap(); + (0..3).for_each(|_| expected_reward_set.push(pox_addr.clone())); + } + expected_reward_set.sort_by_key(|addr| addr.to_burnchain_repr()); + expected_reward_set.reverse(); + let pox_recipients = Mutex::new(vec![]); // The next block should be the start of a reward phase, so the PoX recipient should // be chosen. // // First: perform a normal block commit, and then try to mine a block with all zeros in the // bitvector. - let (burn_ops, mut tenure_change, miner_key) = - peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (burn_height, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - info!( - "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", - pox_constants.is_in_prepare_phase(first_burn_height, burn_height), - pox_constants.is_reward_cycle_start(first_burn_height, burn_height) - ); - let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); - - tenure_change.tenure_consensus_hash = consensus_hash.clone(); - tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - - let tenure_change_tx = peer - .miner - .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); - - // do a stx transfer in each block to a given recipient - let mut blocks_and_sizes = peer.make_nakamoto_tenure_and( - tenure_change_tx.clone(), - coinbase_tx.clone(), - &mut test_signers, - |_miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() < 1 { - info!("Produce nakamoto block {}", blocks_so_far.len()); - - let account = get_account(chainstate, sortdb, &addr); - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - account.nonce, - 100, - 1, - &recipient_addr, - ); - - vec![stx_transfer] - } else { - vec![] - } + let (invalid_block, _, tenure_change_tx, coinbase_tx) = peer.single_block_tenure(&private_key, + |burn_ops| { + burn_ops.iter().for_each(|op| { + if let BlockstackOperationType::LeaderBlockCommit(ref commit) = op { + *pox_recipients.lock().unwrap() = commit.commit_outs.clone(); + } + }); }, |block| { - block.header.signer_bitvec = BitVec::zeros(12).unwrap(); + let pox_recipients = pox_recipients.lock().unwrap(); + assert_eq!(pox_recipients.len(), 2); + info!("Expected reward set: {:?}", expected_reward_set.iter().map(|x| x.to_burnchain_repr()).collect::>()); + let target_indexes = pox_recipients.iter().map( + |pox_addr| expected_reward_set.iter().enumerate().find_map(|(ix, rs_addr)| if rs_addr == pox_addr { Some(ix) } else { None }) + .unwrap() + ); + let mut bitvec = BitVec::ones(12).unwrap(); + target_indexes.for_each(|ix| { + let ix: u16 = ix.try_into().unwrap(); + bitvec.set(ix, false).unwrap(); + bitvec.set(1 + ix, false).unwrap(); + bitvec.set(2 + ix, false).unwrap(); + }); + block.header.signer_bitvec = bitvec; // don't try to process this block yet, just return it so that // we can assert the block error. false }, ); - assert_eq!(blocks_and_sizes.len(), 1); - let invalid_block = blocks_and_sizes.pop().unwrap().0; let processing_result = peer.try_process_block(&invalid_block).unwrap_err(); assert_eq!( processing_result.to_string(), @@ -935,30 +955,10 @@ fn pox_treatment() { // set the bitvec to a heterogenous one: either punish or // reward is acceptable, so this block should just process. - let blocks_and_sizes = peer.make_nakamoto_tenure_and( - tenure_change_tx, - coinbase_tx, - &mut test_signers, - |_miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() < 1 { - info!("Produce nakamoto block {}", blocks_so_far.len()); - - let account = get_account(chainstate, sortdb, &addr); - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - account.nonce, - 100, - 1, - &recipient_addr, - ); - - vec![stx_transfer] - } else { - vec![] - } - }, + let block = peer.mine_single_block_tenure( + &private_key, + &tenure_change_tx, + &coinbase_tx, |block| { // each stacker has 3 entries in the bitvec. // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 @@ -972,70 +972,42 @@ fn pox_treatment() { true }, ); - blocks.extend(blocks_and_sizes.into_iter().map(|(block, _, _)| { - info!("Processed block: {}", block.block_id()); - block - })); + blocks.push(block); // now we need to test punishment! - let (mut burn_ops, mut tenure_change, miner_key) = - peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - burn_ops.iter_mut().for_each(|op| { - if let BlockstackOperationType::LeaderBlockCommit(ref mut commit) = op { - commit.commit_outs[0] = PoxAddress::standard_burn_address(false); - } - }); - let (burn_height, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - - info!( - "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", - pox_constants.is_in_prepare_phase(first_burn_height, burn_height), - pox_constants.is_reward_cycle_start(first_burn_height, burn_height) - ); - let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); - - tenure_change.tenure_consensus_hash = consensus_hash.clone(); - tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - - let tenure_change_tx = peer - .miner - .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); - - // do a stx transfer in each block to a given recipient - let mut blocks_and_sizes = peer.make_nakamoto_tenure_and( - tenure_change_tx.clone(), - coinbase_tx.clone(), - &mut test_signers, - |_miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() < 1 { - info!("Produce nakamoto block {}", blocks_so_far.len()); - - let account = get_account(chainstate, sortdb, &addr); - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - account.nonce, - 100, - 1, - &recipient_addr, - ); - - vec![stx_transfer] - } else { - vec![] + let pox_recipients = Mutex::new(vec![]); + let (invalid_block, _, tenure_change_tx, coinbase_tx) = peer.single_block_tenure( + &private_key, + |burn_ops| { + burn_ops.iter_mut().for_each(|op| { + if let BlockstackOperationType::LeaderBlockCommit(ref mut commit) = op { + *pox_recipients.lock().unwrap() = vec![commit.commit_outs[0].clone()]; + commit.commit_outs[0] = PoxAddress::standard_burn_address(false); } + }); }, |block| { - block.header.signer_bitvec = BitVec::ones(12).unwrap(); + let pox_recipients = pox_recipients.lock().unwrap(); + assert_eq!(pox_recipients.len(), 1); + info!("Expected reward set: {:?}", expected_reward_set.iter().map(|x| x.to_burnchain_repr()).collect::>()); + let target_indexes = pox_recipients.iter().map( + |pox_addr| expected_reward_set.iter().enumerate().find_map(|(ix, rs_addr)| if rs_addr == pox_addr { Some(ix) } else { None }) + .unwrap() + ); + let mut bitvec = BitVec::zeros(12).unwrap(); + target_indexes.for_each(|ix| { + let ix: u16 = ix.try_into().unwrap(); + bitvec.set(ix, true).unwrap(); + bitvec.set(1 + ix, true).unwrap(); + bitvec.set(2 + ix, true).unwrap(); + }); + + block.header.signer_bitvec = bitvec; // don't try to process this block yet, just return it so that // we can assert the block error. false }, ); - assert_eq!(blocks_and_sizes.len(), 1); - let invalid_block = blocks_and_sizes.pop().unwrap().0; let processing_result = peer.try_process_block(&invalid_block).unwrap_err(); assert_eq!( processing_result.to_string(), @@ -1048,30 +1020,10 @@ fn pox_treatment() { // set the bitvec to a heterogenous one: either punish or // reward is acceptable, so this block should just process. - let blocks_and_sizes = peer.make_nakamoto_tenure_and( - tenure_change_tx, - coinbase_tx, - &mut test_signers, - |_miner, chainstate, sortdb, blocks_so_far| { - if blocks_so_far.len() < 1 { - info!("Produce nakamoto block {}", blocks_so_far.len()); - - let account = get_account(chainstate, sortdb, &addr); - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - account.nonce, - 100, - 1, - &recipient_addr, - ); - - vec![stx_transfer] - } else { - vec![] - } - }, + let block = peer.mine_single_block_tenure( + &private_key, + &tenure_change_tx, + &coinbase_tx, |block| { // each stacker has 3 entries in the bitvec. // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 @@ -1085,7 +1037,7 @@ fn pox_treatment() { true }, ); - blocks.extend(blocks_and_sizes.into_iter().map(|(block, _, _)| block)); + blocks.push(block); let tip = { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; From dc9e01966f386b2c0b982165c8b03fe8a4e45c79 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Mon, 10 Jun 2024 11:32:14 +0300 Subject: [PATCH 0281/1400] Fix PoX-4 stateful prop tests comments This commit brings typo fixes for all the comments inside the PoX-4 stateful property tests. --- .../tests/pox-4/pox_AllowContractCallerCommand.ts | 1 - .../tests/pox-4/pox_DelegateStackExtendCommand.ts | 5 +++-- .../tests/pox-4/pox_DelegateStackExtendCommand_Err.ts | 4 ++-- .../tests/pox-4/pox_DelegateStackIncreaseCommand.ts | 4 ++-- .../tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts | 2 +- .../tests/pox-4/pox_DelegateStackStxCommand.ts | 4 ++-- .../tests/pox-4/pox_DelegateStackStxCommand_Err.ts | 2 +- .../tests/pox-4/pox_DelegateStxCommand.ts | 2 -- .../tests/pox-4/pox_RevokeDelegateStxCommand.ts | 2 +- .../tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts | 2 +- .../tests/pox-4/pox_StackAggregationCommitAuthCommand.ts | 3 ++- .../pox-4/pox_StackAggregationCommitAuthCommand_Err.ts | 3 ++- .../pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts | 4 ++-- .../pox_StackAggregationCommitIndexedAuthCommand_Err.ts | 4 ++-- .../pox-4/pox_StackAggregationCommitIndexedSigCommand.ts | 4 ++-- .../pox_StackAggregationCommitIndexedSigCommand_Err.ts | 4 ++-- .../tests/pox-4/pox_StackAggregationCommitSigCommand.ts | 3 ++- .../tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts | 3 ++- .../tests/pox-4/pox_StackExtendAuthCommand.ts | 2 +- .../tests/pox-4/pox_StackExtendAuthCommand_Err.ts | 6 +++++- .../tests/pox-4/pox_StackExtendSigCommand.ts | 2 +- .../tests/pox-4/pox_StackExtendSigCommand_Err.ts | 2 +- .../tests/pox-4/pox_StackIncreaseAuthCommand.ts | 2 +- .../tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts | 2 +- .../tests/pox-4/pox_StackIncreaseSigCommand.ts | 2 +- .../tests/pox-4/pox_StackIncreaseSigCommand_Err.ts | 2 +- .../tests/pox-4/pox_StackStxSigCommand_Err.ts | 2 +- 27 files changed, 42 insertions(+), 36 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts index 141676cdae..931326ce1f 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_AllowContractCallerCommand.ts @@ -77,7 +77,6 @@ export class AllowContractCallerCommand implements PoxCommand { const callerToAllow = model.stackers.get(this.allowanceTo.stxAddress)!; // Update model so that we know this wallet has authorized a contract-caller. - // If the caller is already allowed, there's no need to add it again. const callerToAllowIndexInAllowedList = wallet.allowedContractCallers .indexOf(this.allowanceTo.stxAddress); diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts index 2875551342..0151967932 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts @@ -37,8 +37,9 @@ export class DelegateStackExtendCommand implements PoxCommand { * height as a Pool Operator on behalf of a Stacker. * * @param operator - Represents the Pool Operator's wallet. - * @param stacker - Represents the STacker's wallet. - * @param extendCount - Represents the cycles to be expended. + * @param stacker - Represents the Stacker's wallet. + * @param extendCount - Represents the number of cycles to extend + * the stack for. * @param currentCycle - Represents the current PoX reward cycle. */ constructor( diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts index 830fb7d182..680532bef6 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand_Err.ts @@ -27,8 +27,8 @@ export class DelegateStackExtendCommand_Err implements PoxCommand { * height as a Pool Operator on behalf of a Stacker. * * @param operator - Represents the Pool Operator's wallet. - * @param stacker - Represents the STacker's wallet. - * @param extendCount - Represents the cycles to be expended. + * @param stacker - Represents the Stacker's wallet. + * @param extendCount - Represents the number of cycles to extend the stack for. * @param currentCycle - Represents the current PoX reward cycle. * @param checkFunc - A function to check constraints for running this command. * @param errorCode - The expected error code when running this command. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts index b9ec4a837c..6a5837f48b 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts @@ -10,7 +10,7 @@ import { expect } from "vitest"; import { Cl } from "@stacks/transactions"; /** - * The DelegateStackIncreaseCommand allows a pool operator to + * The `DelegateStackIncreaseCommand` allows a pool operator to * increase an active stacking lock, issuing a "partial commitment" * for the increased cycles. * @@ -33,7 +33,7 @@ export class DelegateStackIncreaseCommand implements PoxCommand { readonly increaseBy: number; /** - * Constructs a DelegateStackIncreaseCommand to increase the uSTX amount + * Constructs a `DelegateStackIncreaseCommand` to increase the uSTX amount * previously locked on behalf of a Stacker. * * @param operator - Represents the Pool Operator's wallet. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts index d54853dcb6..fe33805264 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand_Err.ts @@ -22,7 +22,7 @@ export class DelegateStackIncreaseCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a DelegateStackIncreaseCommand_Err to increase the uSTX amount + * Constructs a `DelegateStackIncreaseCommand_Err` to increase the uSTX amount * previously locked on behalf of a Stacker. * * @param operator - Represents the Pool Operator's wallet. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts index e3d9dd25c1..c284975ae0 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts @@ -26,7 +26,7 @@ import { currentCycle } from "./pox_Commands.ts"; * `get-stacking-minimum` function at the time of this call. * - The Stacker cannot currently be engaged in another stacking operation. * - The Stacker has to currently be delegating to the Operator. - * - The stacked STX amount should be less than or equal to the delegated + * - The stacked uSTX amount should be less than or equal to the delegated * amount. * - The stacked uSTX amount should be less than or equal to the Stacker's * balance. @@ -47,7 +47,7 @@ export class DelegateStackStxCommand implements PoxCommand { * on behalf of a Stacker. * * @param operator - Represents the Pool Operator's wallet. - * @param stacker - Represents the STacker's wallet. + * @param stacker - Represents the Stacker's wallet. * @param period - Number of reward cycles to lock uSTX. * @param amountUstx - The uSTX amount stacked by the Operator on behalf * of the Stacker. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts index b4e5a491dd..d064d8b4cd 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts @@ -28,7 +28,7 @@ export class DelegateStackStxCommand_Err implements PoxCommand { * on behalf of a Stacker. * * @param operator - Represents the Pool Operator's wallet. - * @param stacker - Represents the STacker's wallet. + * @param stacker - Represents the Stacker's wallet. * @param period - Number of reward cycles to lock uSTX. * @param amountUstx - The uSTX amount stacked by the Operator on behalf * of the Stacker. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts index e70d466c9d..836b7d5162 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts @@ -24,8 +24,6 @@ import { * * Constraints for running this command include: * - The Stacker cannot currently be a delegator in another delegation. - * - The PoX address provided should have a valid version (between 0 and 6 - * inclusive). */ export class DelegateStxCommand implements PoxCommand { readonly wallet: Wallet; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index c39a1a5e42..2c3593f27d 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -21,7 +21,7 @@ export class RevokeDelegateStxCommand implements PoxCommand { readonly wallet: Wallet; /** - * Constructs a RevokeDelegateStxCommand to revoke delegate uSTX for stacking. + * Constructs a `RevokeDelegateStxCommand` to revoke a stacking delegation. * * @param wallet - Represents the Stacker's wallet. */ diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts index 60b3439e8e..a7a4cb0a6e 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand_Err.ts @@ -19,7 +19,7 @@ export class RevokeDelegateStxCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `RevokeDelegateStxCommand_Err` to revoke delegate uSTX for stacking. + * Constructs a `RevokeDelegateStxCommand_Err` to revoke a stacking delegation. * * @param wallet - Represents the Stacker's wallet. * @param checkFunc - A function to check constraints for running this command. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts index 62622f4bd3..999aa2f5b0 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts @@ -31,7 +31,8 @@ export class StackAggregationCommitAuthCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackAggregationCommitAuthCommand` to lock uSTX for stacking. + * Constructs a `StackAggregationCommitAuthCommand` to commit partially + * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. * @param authId - Unique `auth-id` for the authorization. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts index 3580061fae..ddc986f1a4 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand_Err.ts @@ -23,7 +23,8 @@ export class StackAggregationCommitAuthCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `StackAggregationCommitAuthCommand_Err` to lock uSTX for stacking. + * Constructs a `StackAggregationCommitAuthCommand_Err` to commit partially + * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. * @param authId - Unique `auth-id` for the authorization. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts index cfafccc674..926c923135 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts @@ -33,8 +33,8 @@ export class StackAggregationCommitIndexedAuthCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackAggregationCommitIndexedAuthCommand` to lock uSTX - * for stacking. + * Constructs a `StackAggregationCommitIndexedAuthCommand` to commit partially + * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. * @param authId - Unique `auth-id` for the authorization. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts index 92ebfa0d19..1c891df270 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand_Err.ts @@ -24,8 +24,8 @@ export class StackAggregationCommitIndexedAuthCommand_Err readonly errorCode: number; /** - * Constructs a `StackAggregationCommitIndexedAuthCommand_Err` to lock uSTX - * for stacking. + * Constructs a `StackAggregationCommitIndexedAuthCommand_Err` to commit partially + * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. * @param authId - Unique `auth-id` for the authorization. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts index 59707e21f4..712706d156 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts @@ -33,8 +33,8 @@ export class StackAggregationCommitIndexedSigCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackAggregationCommitIndexedSigCommand` to lock uSTX - * for stacking. + * Constructs a `StackAggregationCommitIndexedSigCommand` to commit partially + * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. * @param authId - Unique `auth-id` for the authorization. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts index 22b5a4f923..045142f3b4 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand_Err.ts @@ -23,8 +23,8 @@ export class StackAggregationCommitIndexedSigCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `StackAggregationCommitIndexedSigCommand_Err` to lock uSTX - * for stacking. + * Constructs a `StackAggregationCommitIndexedSigCommand_Err` to commit partially + * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. * @param authId - Unique `auth-id` for the authorization. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts index 32fe552477..cda1d9cd96 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts @@ -31,7 +31,8 @@ export class StackAggregationCommitSigCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackAggregationCommitSigCommand` to lock uSTX for stacking. + * Constructs a `StackAggregationCommitSigCommand` to commit partially + * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. * @param authId - Unique `auth-id` for the authorization. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts index ca53b56d1c..1238a4f32b 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand_Err.ts @@ -23,7 +23,8 @@ export class StackAggregationCommitSigCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `StackAggregationCommitAuthCommand_Err` to lock uSTX for stacking. + * Constructs a `StackAggregationCommitAuthCommand_Err` to commit partially + * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. * @param authId - Unique `auth-id` for the authorization. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts index fa796673ea..13a835347f 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts @@ -16,7 +16,7 @@ export class StackExtendAuthCommand implements PoxCommand { readonly currentCycle: number; /** - * Constructs a `StackExtendAuthCommand` to lock uSTX for stacking. + * Constructs a `StackExtendAuthCommand` to extend an active stacking lock. * * This command calls `stack-extend` using an `authorization`. * diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts index e3deed040c..46b8ce173e 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand_Err.ts @@ -19,7 +19,7 @@ export class StackExtendAuthCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `StackExtendAuthCommand` to lock uSTX for stacking. + * Constructs a `StackExtendAuthCommand_Err` to extend an active stacking lock. * * This command calls `stack-extend` using an `authorization`. * @@ -52,6 +52,10 @@ export class StackExtendAuthCommand_Err implements PoxCommand { const currentRewCycle = currentCycle(real.network); const stacker = model.stackers.get(this.wallet.stxAddress)!; + // Include the authorization and the `stack-extend` transactions in a single + // block. This way we ensure both the authorization and the stack-extend + // transactions are called during the same reward cycle, so the authorization + // currentRewCycle param is relevant for the upcoming stack-extend call. const block = real.network.mineBlock([ tx.callPublicFn( "ST000000000000000000002AMW42H.pox-4", diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts index 56848d9448..5b23d021f6 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts @@ -15,7 +15,7 @@ export class StackExtendSigCommand implements PoxCommand { readonly currentCycle: number; /** - * Constructs a `StackExtendSigCommand` to lock uSTX for stacking. + * Constructs a `StackExtendSigCommand` to extend an active stacking lock. * * This command calls `stack-extend` using a `signature`. * diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts index 185f2796d1..9c37b96a60 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand_Err.ts @@ -18,7 +18,7 @@ export class StackExtendSigCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `StackExtendSigCommand` to lock uSTX for stacking. + * Constructs a `StackExtendSigCommand_Err` to extend an active stacking lock. * * This command calls `stack-extend` using a `signature`. * diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts index 127ea1d984..cd802b1b88 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts @@ -25,7 +25,7 @@ export class StackIncreaseAuthCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackIncreaseAuthCommand` to increase lock uSTX for stacking. + * Constructs a `StackIncreaseAuthCommand` to increase the locked uSTX amount. * * @param wallet - Represents the Stacker's wallet. * @param increaseBy - Represents the locked amount to be increased by. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts index a74aa3c211..5722b50236 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand_Err.ts @@ -18,7 +18,7 @@ export class StackIncreaseAuthCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `StackIncreaseAuthCommand` to increase lock uSTX for stacking. + * Constructs a `StackIncreaseAuthCommand_Err` to increase the locked uSTX amount. * * @param wallet - Represents the Stacker's wallet. * @param increaseBy - Represents the locked amount to be increased by. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts index ec51e3d7e4..1bd50691b6 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts @@ -30,7 +30,7 @@ export class StackIncreaseSigCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackIncreaseSigCommand` to lock uSTX for stacking. + * Constructs a `StackIncreaseSigCommand` to increase the locked uSTX amount. * * @param wallet - Represents the Stacker's wallet. * @param increaseBy - Represents the locked amount to be increased by. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts index 4a122784b3..4d0297b624 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand_Err.ts @@ -23,7 +23,7 @@ export class StackIncreaseSigCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `StackIncreaseSigCommand_Err` to lock uSTX for stacking. + * Constructs a `StackIncreaseSigCommand_Err` to increase the locked uSTX amount. * * @param wallet - Represents the Stacker's wallet. * @param increaseBy - Represents the locked amount to be increased by. diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts index f9c2cdc8d4..919fa56c76 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand_Err.ts @@ -30,7 +30,7 @@ export class StackStxSigCommand_Err implements PoxCommand { readonly errorCode: number; /** - * Constructs a `StackStxSigCommand` to lock uSTX for stacking. + * Constructs a `StackStxSigCommand_Err` to lock uSTX for stacking. * * @param wallet - Represents the Stacker's wallet. * @param authId - Unique auth-id for the authorization. From 98d21eaa9941d55c02722b7faf01364ca9711b32 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 08:19:57 -0400 Subject: [PATCH 0282/1400] test: fix doc example for `get-stacks-block-info?` --- clarity/src/vm/docs/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 07edb331fa..5ae1063f1b 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1833,7 +1833,7 @@ the mining of this block started, but is not guaranteed to be accurate. This tim - Greater than the timestamp of the previous block - Less than 15 seconds into the future (according to their own local clocks) ", - example: "(get-stacks-block-info? time u0) ;; Returns (some u1557860301) + example: "(get-stacks-block-info? time u0) ;; Returns (some u1557860302) (get-stacks-block-info? header-hash u0) ;; Returns (some 0x374708fff7719dd5979ec875d56cd2286f6d3cf7ec317a3b25632aab28ec37bb) " }; From 89b69991b97e8979e8b18eda244478ce7ae4f3e1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 10 Jun 2024 10:28:29 -0500 Subject: [PATCH 0283/1400] feat: move bitvec check to setup_block (now miner and processing paths both perform the check) --- .../chainstate/nakamoto/coordinator/mod.rs | 60 +++++++++++- .../chainstate/nakamoto/coordinator/tests.rs | 95 ++++++++++++++----- stackslib/src/chainstate/nakamoto/miner.rs | 67 ++++++++++++- stackslib/src/chainstate/nakamoto/mod.rs | 21 ++-- .../src/chainstate/nakamoto/tests/node.rs | 22 ++++- 5 files changed, 225 insertions(+), 40 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index ab324012c8..9ac63ce6e8 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -17,8 +17,10 @@ use std::collections::VecDeque; use std::sync::{Arc, Mutex}; +use clarity::boot_util::boot_code_id; +use clarity::vm::ast::ASTRules; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::database::BurnStateDB; +use clarity::vm::database::{BurnStateDB, HeadersDB}; use clarity::vm::types::PrincipalData; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, @@ -28,7 +30,7 @@ use stacks_common::types::{StacksEpoch, StacksEpochId}; use crate::burnchains::db::{BurnchainBlockData, BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{Burnchain, BurnchainBlockHeader}; -use crate::chainstate::burn::db::sortdb::{get_ancestor_sort_id, SortitionDB}; +use crate::chainstate::burn::db::sortdb::{get_ancestor_sort_id, SortitionDB, SortitionHandleConn}; use crate::chainstate::burn::operations::leader_block_commit::RewardSetInfo; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::comm::{ @@ -43,8 +45,10 @@ use crate::chainstate::coordinator::{ use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{RewardSet, SIGNERS_NAME}; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; +use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use crate::chainstate::stacks::Error as ChainstateError; +use crate::clarity_vm::database::HeadersDBConn; use crate::cost_estimates::{CostEstimator, FeeEstimator}; use crate::monitoring::increment_stx_blocks_processed_counter; use crate::net::Error as NetError; @@ -92,6 +96,7 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { .expect("FATAL: no reward cycle for burn height"); self.read_reward_set_nakamoto_of_cycle(cycle, chainstate, sortdb, block_id, debug_log) } + /// Read a reward_set written while updating .signers at a given cycle_id /// `debug_log` should be set to true if the reward set loading should /// log messages as `debug!` instead of `error!` or `info!`. This allows @@ -129,6 +134,57 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { return Err(Error::PoXAnchorBlockRequired); }; + self.reward_reward_set_at_calculated_block( + coinbase_height_of_calculation, + chainstate, + block_id, + debug_log, + ) + } + + pub fn get_height_of_pox_calculation( + &self, + cycle: u64, + chainstate: &mut StacksChainState, + sort_handle: &SortitionHandleConn, + block_id: &StacksBlockId, + ) -> Result { + let Some(coinbase_height_of_calculation) = chainstate + .clarity_state + .eval_read_only( + block_id, + &HeadersDBConn(chainstate.state_index.sqlite_conn()), + sort_handle, + &boot_code_id(SIGNERS_NAME, chainstate.mainnet), + &format!("(map-get? cycle-set-height u{})", cycle), + ASTRules::PrecheckSize, + ) + .map_err(ChainstateError::ClarityError)? + .expect_optional() + .map_err(|e| Error::ChainstateError(e.into()))? + .map(|x| { + let as_u128 = x.expect_u128()?; + Ok(u64::try_from(as_u128).expect("FATAL: block height exceeded u64")) + }) + .transpose() + .map_err(|e| Error::ChainstateError(ChainstateError::ClarityError(e)))? + else { + error!( + "The reward set was not written to .signers before it was needed by Nakamoto"; + "cycle_number" => cycle, + ); + return Err(Error::PoXAnchorBlockRequired); + }; + Ok(coinbase_height_of_calculation) + } + + pub fn reward_reward_set_at_calculated_block( + &self, + coinbase_height_of_calculation: u64, + chainstate: &mut StacksChainState, + block_id: &StacksBlockId, + debug_log: bool, + ) -> Result { let Some(reward_set_block) = NakamotoChainState::get_header_by_coinbase_height( &mut chainstate.index_tx_begin()?, block_id, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index f3e4b45c4a..615b03721a 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -39,6 +39,7 @@ use wsts::curve::point::Point; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; @@ -766,15 +767,17 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { peer.check_nakamoto_migration(); } -impl <'a> TestPeer <'a> { - pub fn mine_single_block_tenure( +impl<'a> TestPeer<'a> { + pub fn mine_single_block_tenure( &mut self, sender_key: &StacksPrivateKey, tenure_change_tx: &StacksTransaction, coinbase_tx: &StacksTransaction, + miner_setup: F, after_block: G, ) -> NakamotoBlock where + F: FnMut(&mut NakamotoBlockBuilder), G: FnMut(&mut NakamotoBlock) -> bool, { let sender_addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_key)); @@ -787,6 +790,7 @@ impl <'a> TestPeer <'a> { tenure_change_tx.clone(), coinbase_tx.clone(), &mut test_signers, + miner_setup, |_miner, chainstate, sortdb, blocks_so_far| { if blocks_so_far.len() < 1 { let account = get_account(chainstate, sortdb, &sender_addr); @@ -812,13 +816,15 @@ impl <'a> TestPeer <'a> { block } - pub fn single_block_tenure( + pub fn single_block_tenure( &mut self, sender_key: &StacksPrivateKey, + miner_setup: S, mut after_burn_ops: F, after_block: G, ) -> (NakamotoBlock, u64, StacksTransaction, StacksTransaction) where + S: FnMut(&mut NakamotoBlockBuilder), F: FnMut(&mut Vec), G: FnMut(&mut NakamotoBlock) -> bool, { @@ -844,13 +850,18 @@ impl <'a> TestPeer <'a> { .make_nakamoto_tenure_change(tenure_change.clone()); let coinbase_tx = self.miner.make_nakamoto_coinbase(None, vrf_proof); - let block = self.mine_single_block_tenure(sender_key, &tenure_change_tx, &coinbase_tx, after_block); + let block = self.mine_single_block_tenure( + sender_key, + &tenure_change_tx, + &coinbase_tx, + miner_setup, + after_block, + ); (block, burn_height, tenure_change_tx, coinbase_tx) } } - #[test] // Test PoX Reward and Punish treatment in nakamoto // - create a 12 address PoX reward set @@ -893,7 +904,8 @@ fn pox_treatment() { // mine until we're at the start of the next reward phase (so we *know* // that the reward set contains entries) loop { - let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| true); + let (block, burn_height, ..) = + peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); blocks.push(block); if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 1) { @@ -909,12 +921,15 @@ fn pox_treatment() { expected_reward_set.sort_by_key(|addr| addr.to_burnchain_repr()); expected_reward_set.reverse(); let pox_recipients = Mutex::new(vec![]); + info!("Starting the test... beginning with an reward commit"); // The next block should be the start of a reward phase, so the PoX recipient should // be chosen. // // First: perform a normal block commit, and then try to mine a block with all zeros in the // bitvector. - let (invalid_block, _, tenure_change_tx, coinbase_tx) = peer.single_block_tenure(&private_key, + let (invalid_block, _, tenure_change_tx, coinbase_tx) = peer.single_block_tenure( + &private_key, + |_| {}, |burn_ops| { burn_ops.iter().for_each(|op| { if let BlockstackOperationType::LeaderBlockCommit(ref commit) = op { @@ -925,11 +940,20 @@ fn pox_treatment() { |block| { let pox_recipients = pox_recipients.lock().unwrap(); assert_eq!(pox_recipients.len(), 2); - info!("Expected reward set: {:?}", expected_reward_set.iter().map(|x| x.to_burnchain_repr()).collect::>()); - let target_indexes = pox_recipients.iter().map( - |pox_addr| expected_reward_set.iter().enumerate().find_map(|(ix, rs_addr)| if rs_addr == pox_addr { Some(ix) } else { None }) - .unwrap() + info!( + "Expected reward set: {:?}", + expected_reward_set + .iter() + .map(|x| x.to_burnchain_repr()) + .collect::>() ); + let target_indexes = pox_recipients.iter().map(|pox_addr| { + expected_reward_set + .iter() + .enumerate() + .find_map(|(ix, rs_addr)| if rs_addr == pox_addr { Some(ix) } else { None }) + .unwrap() + }); let mut bitvec = BitVec::ones(12).unwrap(); target_indexes.for_each(|ix| { let ix: u16 = ix.try_into().unwrap(); @@ -959,6 +983,7 @@ fn pox_treatment() { &private_key, &tenure_change_tx, &coinbase_tx, + |_| {}, |block| { // each stacker has 3 entries in the bitvec. // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 @@ -975,25 +1000,47 @@ fn pox_treatment() { blocks.push(block); // now we need to test punishment! + info!("Testing a punish commit"); let pox_recipients = Mutex::new(vec![]); let (invalid_block, _, tenure_change_tx, coinbase_tx) = peer.single_block_tenure( &private_key, + |miner| { + // we want the miner to finish assembling the block, and then we'll + // alter the bitvec before it signs the block (in a subsequent closure). + // this way, we can test the block processing behavior. + miner.header.signer_bitvec = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, true, + ] + .as_slice(), + ) + .unwrap(); + }, |burn_ops| { - burn_ops.iter_mut().for_each(|op| { - if let BlockstackOperationType::LeaderBlockCommit(ref mut commit) = op { - *pox_recipients.lock().unwrap() = vec![commit.commit_outs[0].clone()]; - commit.commit_outs[0] = PoxAddress::standard_burn_address(false); - } - }); + burn_ops.iter_mut().for_each(|op| { + if let BlockstackOperationType::LeaderBlockCommit(ref mut commit) = op { + *pox_recipients.lock().unwrap() = vec![commit.commit_outs[0].clone()]; + commit.commit_outs[0] = PoxAddress::standard_burn_address(false); + } + }); }, |block| { let pox_recipients = pox_recipients.lock().unwrap(); assert_eq!(pox_recipients.len(), 1); - info!("Expected reward set: {:?}", expected_reward_set.iter().map(|x| x.to_burnchain_repr()).collect::>()); - let target_indexes = pox_recipients.iter().map( - |pox_addr| expected_reward_set.iter().enumerate().find_map(|(ix, rs_addr)| if rs_addr == pox_addr { Some(ix) } else { None }) - .unwrap() + info!( + "Expected reward set: {:?}", + expected_reward_set + .iter() + .map(|x| x.to_burnchain_repr()) + .collect::>() ); + let target_indexes = pox_recipients.iter().map(|pox_addr| { + expected_reward_set + .iter() + .enumerate() + .find_map(|(ix, rs_addr)| if rs_addr == pox_addr { Some(ix) } else { None }) + .unwrap() + }); let mut bitvec = BitVec::zeros(12).unwrap(); target_indexes.for_each(|ix| { let ix: u16 = ix.try_into().unwrap(); @@ -1024,18 +1071,18 @@ fn pox_treatment() { &private_key, &tenure_change_tx, &coinbase_tx, - |block| { + |miner| { // each stacker has 3 entries in the bitvec. // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 - block.header.signer_bitvec = BitVec::try_from( + miner.header.signer_bitvec = BitVec::try_from( [ false, false, true, false, false, true, false, false, true, false, false, true, ] .as_slice(), ) .unwrap(); - true }, + |_block| true, ); blocks.push(block); diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index da1a7af570..5fc1724b3f 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -46,13 +46,14 @@ use crate::chainstate::burn::db::sortdb::{ }; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; +use crate::chainstate::coordinator::OnChainRewardSetProvider; use crate::chainstate::nakamoto::{ MaturedMinerRewards, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SetupBlockResult, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::MINERS_NAME; use crate::chainstate::stacks::db::accounts::MinerReward; -use crate::chainstate::stacks::db::blocks::MemPoolRejection; +use crate::chainstate::stacks::db::blocks::{DummyEventDispatcher, MemPoolRejection}; use crate::chainstate::stacks::db::transactions::{ handle_clarity_runtime_error, ClarityRuntimeTxError, }; @@ -121,7 +122,7 @@ pub struct NakamotoBlockBuilder { /// transactions selected txs: Vec, /// header we're filling in - header: NakamotoBlockHeader, + pub header: NakamotoBlockHeader, } pub struct MinerTenureInfo<'a> { @@ -138,6 +139,8 @@ pub struct MinerTenureInfo<'a> { pub parent_burn_block_height: u32, pub coinbase_height: u64, pub cause: Option, + pub active_reward_set: boot::RewardSet, + pub tenure_block_commit: LeaderBlockCommitOp, } impl NakamotoBlockBuilder { @@ -228,6 +231,61 @@ impl NakamotoBlockBuilder { ) -> Result, Error> { debug!("Nakamoto miner tenure begin"); + let Some(tenure_election_sn) = + SortitionDB::get_block_snapshot_consensus(&burn_dbconn, &self.header.consensus_hash)? + else { + warn!("Could not find sortition snapshot for burn block that elected the miner"; + "consensus_hash" => %self.header.consensus_hash); + return Err(Error::NoSuchBlockError); + }; + let Some(tenure_block_commit) = SortitionDB::get_block_commit( + &burn_dbconn, + &tenure_election_sn.winning_block_txid, + &tenure_election_sn.sortition_id, + )? + else { + warn!("Could not find winning block commit for burn block that elected the miner"; + "consensus_hash" => %self.header.consensus_hash, + "winning_txid" => %tenure_election_sn.winning_block_txid); + return Err(Error::NoSuchBlockError); + }; + + let elected_height = tenure_election_sn.block_height; + let elected_in_cycle = burn_dbconn + .context + .pox_constants + .block_height_to_reward_cycle(burn_dbconn.context.first_block_height, elected_height) + .ok_or_else(|| { + Error::InvalidStacksBlock( + "Elected in block height before first_block_height".into(), + ) + })?; + let rs_provider = OnChainRewardSetProvider::(None); + let coinbase_height_of_calc = rs_provider.get_height_of_pox_calculation( + elected_in_cycle, + chainstate, + burn_dbconn, + &self.header.parent_block_id, + ).map_err(|e| { + warn!( + "Cannot process Nakamoto block: could not load reward set that elected the block"; + "err" => ?e, + ); + Error::NoSuchBlockError + })?; + let active_reward_set = rs_provider.reward_reward_set_at_calculated_block( + coinbase_height_of_calc, + chainstate, + &self.header.parent_block_id, + true, + ).map_err(|e| { + warn!( + "Cannot process Nakamoto block: could not load reward set that elected the block"; + "err" => ?e, + ); + Error::NoSuchBlockError + })?; + // must build off of the header's consensus hash as the burnchain view, not the canonical_tip_bhh: let burn_sn = SortitionDB::get_block_snapshot_consensus(burn_dbconn.conn(), &self.header.consensus_hash)? .ok_or_else(|| { @@ -289,6 +347,8 @@ impl NakamotoBlockBuilder { parent_burn_block_height: chain_tip.burn_header_height, cause, coinbase_height, + active_reward_set, + tenure_block_commit, }) } @@ -321,6 +381,9 @@ impl NakamotoBlockBuilder { info.cause == Some(TenureChangeCause::BlockFound), info.coinbase_height, info.cause == Some(TenureChangeCause::Extended), + &self.header.signer_bitvec, + &info.tenure_block_commit, + &info.active_reward_set, )?; self.matured_miner_rewards_opt = matured_miner_rewards_opt; Ok(clarity_tx) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 981b75543e..253b612fb9 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2660,6 +2660,9 @@ impl NakamotoChainState { /// * coinbase_height: the number of tenures that this block confirms (including epoch2 blocks) /// (this is equivalent to the number of coinbases) /// * tenure_extend: whether or not to reset the tenure's ongoing execution cost + /// * block_bitvec: the bitvec that will control PoX reward handling for this block + /// * tenure_block_commit: the block commit that elected this miner + /// * active_reward_set: the reward and signer set active during `tenure_block_commit` /// /// Returns clarity_tx, list of receipts, microblock execution cost, /// microblock fees, microblock burns, list of microblock tx receipts, @@ -2680,7 +2683,12 @@ impl NakamotoChainState { new_tenure: bool, coinbase_height: u64, tenure_extend: bool, + block_bitvec: &BitVec<4000>, + tenure_block_commit: &LeaderBlockCommitOp, + active_reward_set: &RewardSet, ) -> Result, ChainstateError> { + Self::check_pox_bitvector(block_bitvec, tenure_block_commit, active_reward_set)?; + let parent_index_hash = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); let parent_sortition_id = sortition_dbconn .get_sortition_id_from_consensus_hash(&parent_consensus_hash) @@ -2914,7 +2922,7 @@ impl NakamotoChainState { } fn check_pox_bitvector( - block: &NakamotoBlock, + block_bitvec: &BitVec<4000>, tenure_block_commit: &LeaderBlockCommitOp, active_reward_set: &RewardSet, ) -> Result<(), ChainstateError> { @@ -2948,7 +2956,7 @@ impl NakamotoChainState { |ix| { let ix = u16::try_from(ix) .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; - let bitvec_value = block.header.signer_bitvec.get(ix) + let bitvec_value = block_bitvec.get(ix) .unwrap_or_else(|| { info!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); true @@ -2966,7 +2974,6 @@ impl NakamotoChainState { "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; "reward_address" => %treated_addr.deref(), "bitvec_values" => ?bitvec_values, - "block_id" => %block.header.block_id(), ); return Err(ChainstateError::InvalidStacksBlock( "Bitvec does not match the block commit's PoX handling".into(), @@ -2978,7 +2985,6 @@ impl NakamotoChainState { "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; "reward_address" => %treated_addr.deref(), "bitvec_values" => ?bitvec_values, - "block_id" => %block.header.block_id(), ); return Err(ChainstateError::InvalidStacksBlock( "Bitvec does not match the block commit's PoX handling".into(), @@ -3157,10 +3163,6 @@ impl NakamotoChainState { ChainstateError::NoSuchBlockError })?; - // TODO: this should be checked in the miner path as well... - // the easiest way to ensure this is via the setup_block function. - Self::check_pox_bitvector(&block, &tenure_block_commit, active_reward_set)?; - // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start // block. // (note that we can't check this earlier, since we need the parent tenure to have been @@ -3263,6 +3265,9 @@ impl NakamotoChainState { new_tenure, coinbase_height, tenure_extend, + &block.header.signer_bitvec, + &tenure_block_commit, + active_reward_set, )?; let starting_cost = clarity_tx.cost_so_far(); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index f2b811affe..0299b33a74 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -505,7 +505,7 @@ impl TestStacksNode { /// /// The first block will contain a coinbase, if coinbase is Some(..) /// Process the blocks via the chains coordinator as we produce them. - pub fn make_nakamoto_tenure_blocks<'a, F, G>( + pub fn make_nakamoto_tenure_blocks<'a, S, F, G>( chainstate: &mut StacksChainState, sortdb: &SortitionDB, miner: &mut TestMiner, @@ -522,10 +522,12 @@ impl TestStacksNode { (), BitcoinIndexer, >, + mut miner_setup: S, mut block_builder: F, mut after_block: G, ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> where + S: FnMut(&mut NakamotoBlockBuilder), F: FnMut( &mut TestMiner, &mut StacksChainState, @@ -561,7 +563,7 @@ impl TestStacksNode { ); // make a block - let builder = if let Some(parent_tip) = parent_tip_opt { + let mut builder = if let Some(parent_tip) = parent_tip_opt { NakamotoBlockBuilder::new( &parent_tip, tenure_id_consensus_hash, @@ -585,6 +587,7 @@ impl TestStacksNode { &coinbase.clone().unwrap(), ) }; + miner_setup(&mut builder); tenure_change = None; coinbase = None; @@ -1092,7 +1095,14 @@ impl<'a> TestPeer<'a> { &[(NakamotoBlock, u64, ExecutionCost)], ) -> Vec, { - self.make_nakamoto_tenure_and(tenure_change, coinbase, signers, block_builder, |_| true) + self.make_nakamoto_tenure_and( + tenure_change, + coinbase, + signers, + |_| {}, + block_builder, + |_| true, + ) } /// Produce and process a Nakamoto tenure, after processing the block-commit from @@ -1100,15 +1110,17 @@ impl<'a> TestPeer<'a> { /// take the consensus hash, and feed it in here. /// /// Returns the blocks, their sizes, and runtime costs - pub fn make_nakamoto_tenure_and( + pub fn make_nakamoto_tenure_and( &mut self, tenure_change: StacksTransaction, coinbase: StacksTransaction, signers: &mut TestSigners, + miner_setup: S, block_builder: F, after_block: G, ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> where + S: FnMut(&mut NakamotoBlockBuilder), F: FnMut( &mut TestMiner, &mut StacksChainState, @@ -1137,6 +1149,7 @@ impl<'a> TestPeer<'a> { Some(tenure_change), Some(coinbase), &mut self.coord, + miner_setup, block_builder, after_block, ); @@ -1210,6 +1223,7 @@ impl<'a> TestPeer<'a> { Some(tenure_extend_tx), None, &mut self.coord, + |_| {}, block_builder, |_| true, ); From afdaa24257552e9cb0af0fcfa676df9157dff2ce Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 10 Jun 2024 11:29:27 -0500 Subject: [PATCH 0284/1400] feat: check bitvec is all 1s in signer chainstate checks --- stacks-common/src/bitvec.rs | 46 +++++++++++++++++++++++++++++++++ stacks-signer/src/chainstate.rs | 12 +++++++++ 2 files changed, 58 insertions(+) diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 01a34d0e62..cd7209a801 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -125,6 +125,33 @@ impl ToSql for BitVec { } } +pub struct BitVecIter<'a, const MAX_SIZE: u16> { + index: u16, + byte: Option<&'a u8>, + bitvec: &'a BitVec, +} + +impl<'a, const MAX_SIZE: u16> Iterator for BitVecIter<'a, MAX_SIZE> { + type Item = bool; + + fn next(&mut self) -> Option { + if self.index >= self.bitvec.len { + return None; + } + let byte = self.byte?; + let next = (*byte & BitVec::::bit_index(self.index)) != 0; + self.index = self.index.saturating_add(1); + if self.index < self.bitvec.len { + // check if byte needs to be incremented + if self.index % 8 == 0 { + let vec_index = usize::from(self.index / 8); + self.byte = self.bitvec.data.get(vec_index); + } + } + Some(next) + } +} + impl BitVec { /// Construct a new BitVec with all entries set to `false` and total length `len` pub fn zeros(len: u16) -> Result, String> { @@ -146,6 +173,15 @@ impl BitVec { Ok(bitvec) } + pub fn iter(&self) -> BitVecIter { + let byte = self.data.get(0); + BitVecIter { + index: 0, + bitvec: self, + byte, + } + } + pub fn len(&self) -> u16 { self.len } @@ -252,6 +288,15 @@ mod test { assert!(input.set(input.len(), false).is_err()); } + fn check_iter(input: &BitVec<{ u16::MAX }>) { + let mut checked = 0; + for (ix, entry) in input.iter().enumerate() { + checked += 1; + assert_eq!(input.get(u16::try_from(ix).unwrap()).unwrap(), entry); + } + assert_eq!(checked, input.len()); + } + fn check_serialization(input: &BitVec<{ u16::MAX }>) { let byte_ser = input.serialize_to_vec(); let deserialized = BitVec::consensus_deserialize(&mut byte_ser.as_slice()).unwrap(); @@ -288,6 +333,7 @@ mod test { } check_serialization(&bitvec); + check_iter(&bitvec); check_set_get(bitvec); } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index a79251f73f..bc48fbf46f 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -113,6 +113,18 @@ impl SortitionsView { block: &NakamotoBlock, block_pk: &StacksPublicKey, ) -> Result { + let bitvec_all_1s = block.header.signer_bitvec.iter().all(|entry| entry); + if !bitvec_all_1s { + warn!( + "Miner block proposal has bitvec field which punishes in disagreement with signer. Considering invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + ); + return Ok(false); + } + let block_pkh = Hash160::from_data(&block_pk.to_bytes_compressed()); let Some(proposed_by) = (if block.header.consensus_hash == self.cur_sortition.consensus_hash { From 169c8d85026f71dbf9a265f3623ec3416839cf97 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 12:42:02 -0400 Subject: [PATCH 0285/1400] test: add integration test for block info functions --- clarity/src/vm/database/clarity_db.rs | 9 + clarity/src/vm/functions/database.rs | 4 +- stacks-common/src/types/mod.rs | 6 + stackslib/src/clarity_vm/tests/contracts.rs | 37 +- stackslib/src/clarity_vm/tests/costs.rs | 2 + .../src/tests/nakamoto_integrations.rs | 437 ++++++++++++++++++ .../src/tests/neon_integrations.rs | 10 +- 7 files changed, 497 insertions(+), 8 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 0e23443cec..1d1429dd7d 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -1008,6 +1008,15 @@ impl<'a> ClarityDatabase<'a> { } pub fn get_block_time(&mut self, block_height: u32) -> Result { + let epoch = self.get_stacks_epoch(block_height).ok_or_else(|| { + InterpreterError::Expect( + format!("Failed to get epoch for block height {block_height}.)").into(), + ) + })?; + if !epoch.epoch_id.has_block_timestamps() { + return self.get_burn_block_time(block_height); + } + let id_bhh = self.get_index_block_header_hash(block_height)?; self.headers_db .get_block_time_for_block(&id_bhh) diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index 71cba8df96..46e7585fbc 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -1070,8 +1070,8 @@ pub fn special_get_tenure_info( _ => return Ok(Value::none()), }; - let current_block_height = env.global_context.database.get_current_block_height(); - if height_value >= current_block_height { + let current_tenure = env.global_context.database.get_tenure_height()?; + if height_value > current_tenure { return Ok(Value::none()); } diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index e6e5cf5f79..df239663fa 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -151,6 +151,12 @@ impl StacksEpochId { StacksEpochId::Epoch30 => MINING_COMMITMENT_FREQUENCY_NAKAMOTO, } } + + /// Is a timestamp saved for each Stacks block in this epoch? + /// If not, the timestamp is retrieved from the burn block. + pub fn has_block_timestamps(&self) -> bool { + self >= &StacksEpochId::Epoch30 + } } impl std::fmt::Display for StacksEpochId { diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 116443c524..f70582722e 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -1605,9 +1605,10 @@ fn test_get_block_info_time() { let contract_identifier2 = QualifiedContractIdentifier::local("test-contract-2").unwrap(); let contract_identifier3 = QualifiedContractIdentifier::local("test-contract-3").unwrap(); + let contract_identifier3_3 = QualifiedContractIdentifier::local("test-contract-3-3").unwrap(); // Advance to epoch 3.0 - while sim.block_height <= 7 { + while sim.block_height <= 10 { sim.execute_next_block(|_env| {}); } @@ -1618,7 +1619,8 @@ fn test_get_block_info_time() { let contract2 = "(define-private (get-time) (get-block-info? time (- block-height u1)))"; let contract3 = - "(define-private (get-time) (get-block-info? time (- stacks-block-height u1)))"; + "(define-private (get-time) (get-stacks-block-info? time (- stacks-block-height u1)))"; + let contract3_3 = "(define-private (get-time) (get-stacks-block-info? time u1))"; conn.as_transaction(|clarity_db| { // Analyze the contract as Clarity 2 @@ -1664,19 +1666,46 @@ fn test_get_block_info_time() { |_, _| false, ) .unwrap(); + + // Analyze the contract as Clarity 3 + let (ast, analysis) = clarity_db + .analyze_smart_contract( + &contract_identifier3_3, + ClarityVersion::Clarity3, + &contract3_3, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // Publish the contract as Clarity 3 + clarity_db + .initialize_smart_contract( + &contract_identifier3_3, + ClarityVersion::Clarity3, + &ast, + contract3_3, + None, + |_, _| false, + ) + .unwrap(); }); // Call the contracts and validate the results let mut tx = conn.start_transaction_processing(); assert_eq!( - Value::some(Value::UInt(8)).unwrap(), + Value::some(Value::UInt(11)).unwrap(), tx.eval_read_only(&contract_identifier2, "(get-time)") .unwrap() ); assert_eq!( - Value::some(Value::UInt(1713799981)).unwrap(), + Value::some(Value::UInt(1713799984)).unwrap(), tx.eval_read_only(&contract_identifier3, "(get-time)") .unwrap() ); + assert_eq!( + Value::some(Value::UInt(1)).unwrap(), + tx.eval_read_only(&contract_identifier3_3, "(get-time)") + .unwrap() + ); }); } diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 0dfaa630e2..37621ceb4e 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -168,6 +168,8 @@ pub fn get_simple_test(function: &NativeFunctions) -> &'static str { ToConsensusBuff => "(to-consensus-buff? u1)", FromConsensusBuff => "(from-consensus-buff? bool 0x03)", ReplaceAt => "(replace-at? list-bar u0 5)", + GetStacksBlockInfo => "(get-stacks-block-info? time u1)", + GetTenureInfo => "(get-tenure-info? time u1)", } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7c57e8c14c..d1963563f1 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4225,3 +4225,440 @@ fn nakamoto_attempt_time() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// Verify the timestamps using `get-block-info?`, `get-stacks-block-info?`, and `get-tenure-info?`. +fn check_block_times() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut sender_nonce = 0; + + // Deploy this version with the Clarity 1 / 2 before epoch 3 + let contract0_name = "test-contract-0"; + let contract_clarity1 = + "(define-read-only (get-time (height uint)) (get-block-info? time height))"; + + let contract_tx0 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract0_name, + contract_clarity1, + ); + sender_nonce += 1; + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let time0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(1)], + ); + let time0 = time0_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + info!("Time from pre-epoch 3.0: {}", time0); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // This version uses the Clarity 1 / 2 keywords + let contract1_name = "test-contract-1"; + let contract_tx1 = make_contract_publish_versioned( + &sender_sk, + sender_nonce, + deploy_fee, + contract1_name, + contract_clarity1, + Some(ClarityVersion::Clarity2), + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx1); + + // This version uses the Clarity 3 keywords + let contract3_name = "test-contract-3"; + let contract_clarity3 = + "(define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) + (define-read-only (get-tenure-time (height uint)) (get-tenure-info? time height))"; + + let contract_tx3 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract3_name, + contract_clarity3, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx3); + + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + println!("Chain info: {:?}", info); + let mut last_burn_block_height = info.burn_block_height as u128; + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height as u128; + + let time0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time0 = time0_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time1_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time1 = time1_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0, time1, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + + let time3_tenure_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-time", + vec![&clarity::vm::Value::UInt(last_tenure_height - 1)], + ); + let time3_tenure = time3_tenure_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0, time3_tenure, + "Tenure time should match Clarity 2 block time" + ); + + let time3_block_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time3_block = time3_block_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + println!("Chain info: {:?}", info); + let mut last_burn_block_height = info.burn_block_height as u128; + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height as u128; + + let time0a_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time0a = time0a_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert!( + time0a - time0 >= 1, + "get-block-info? time should have changed" + ); + + let time1a_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time1a = time1a_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0a, time1a, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + + let time3a_tenure_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-time", + vec![&clarity::vm::Value::UInt(last_tenure_height)], + ); + let time3a_tenure = time3a_tenure_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0a, time3a_tenure, + "Tenure time should match Clarity 2 block time" + ); + + let time3a_block_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let time3a_block = time3a_block_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let time0b_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height)], + ); + let time0b = time0b_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0a, time0b, + "get-block-info? time should not have changed" + ); + + let time1b_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height)], + ); + let time1b = time1b_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0b, time1b, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + + let time3b_tenure_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-time", + vec![&clarity::vm::Value::UInt(last_tenure_height)], + ); + let time3b_tenure = time3b_tenure_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + assert_eq!( + time0b, time3b_tenure, + "Tenure time should match Clarity 2 block time" + ); + + let time3b_block_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-time", + vec![&clarity::vm::Value::UInt(last_stacks_block_height)], + ); + let time3b_block = time3b_block_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + assert!( + time3b_block - time3a_block >= 1, + "get-stacks-block-info? time should have changed" + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ff3bc9b3d5..1dd46f2517 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -877,7 +877,7 @@ pub fn call_read_only( principal: &StacksAddress, contract: &str, function: &str, - args: Vec<&str>, + args: Vec<&Value>, ) -> Value { let http_origin = format!("http://{}", &conf.node.rpc_bind); let client = reqwest::blocking::Client::new(); @@ -886,8 +886,14 @@ pub fn call_read_only( "{http_origin}/v2/contracts/call-read/{}/{}/{}", principal, contract, function ); + + let serialized_args = args + .iter() + .map(|arg| arg.serialize_to_hex().unwrap()) + .collect::>(); + let body = json!({ - "arguments": args, + "arguments": serialized_args, "sender": principal.to_string(), }); let response: ReadOnlyResponse = client From 3fcd23f837d740125b5170583eed00c6a85b55db Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 10 Jun 2024 12:50:39 -0500 Subject: [PATCH 0286/1400] refactor: rename signer_bitvec, rename .punished field in block commit --- stacks-signer/src/chainstate.rs | 2 +- stacks-signer/src/tests/chainstate.rs | 2 +- stackslib/src/burnchains/tests/burnchain.rs | 8 +-- stackslib/src/burnchains/tests/db.rs | 2 +- .../src/chainstate/burn/db/processing.rs | 2 +- stackslib/src/chainstate/burn/db/sortdb.rs | 16 ++--- stackslib/src/chainstate/burn/distribution.rs | 8 +-- .../burn/operations/leader_block_commit.rs | 62 +++++++++---------- .../src/chainstate/burn/operations/mod.rs | 9 +-- stackslib/src/chainstate/burn/sortition.rs | 2 +- stackslib/src/chainstate/coordinator/tests.rs | 4 +- .../chainstate/nakamoto/coordinator/tests.rs | 10 +-- stackslib/src/chainstate/nakamoto/miner.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 30 ++++----- .../src/chainstate/nakamoto/tests/mod.rs | 22 +++---- stackslib/src/chainstate/stacks/block.rs | 2 +- stackslib/src/net/api/postblock_proposal.rs | 2 +- stackslib/src/net/tests/download/nakamoto.rs | 6 +- .../src/burnchains/mocknet_controller.rs | 2 +- testnet/stacks-node/src/chain_data.rs | 24 +++---- testnet/stacks-node/src/event_dispatcher.rs | 2 +- .../stacks-node/src/nakamoto_node/relayer.rs | 2 +- testnet/stacks-node/src/neon_node.rs | 2 +- testnet/stacks-node/src/node.rs | 2 +- testnet/stacks-node/src/tests/epoch_205.rs | 2 +- testnet/stacks-node/src/tests/epoch_21.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 8 +-- testnet/stacks-node/src/tests/signer/v1.rs | 4 +- 28 files changed, 122 insertions(+), 119 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index bc48fbf46f..ef7ae1aa03 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -113,7 +113,7 @@ impl SortitionsView { block: &NakamotoBlock, block_pk: &StacksPublicKey, ) -> Result { - let bitvec_all_1s = block.header.signer_bitvec.iter().all(|entry| entry); + let bitvec_all_1s = block.header.pox_treatment.iter().all(|entry| entry); if !bitvec_all_1s { warn!( "Miner block proposal has bitvec field which punishes in disagreement with signer. Considering invalid."; diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index d12d941ecc..35b6f71a6f 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -95,7 +95,7 @@ fn setup_test_environment( state_index_root: TrieHash([0; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }, txs: vec![], }; diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index a0f1bd8bac..b08d7a097e 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -153,7 +153,7 @@ fn test_process_block_ops() { let block_commit_1 = LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222").unwrap(), @@ -192,7 +192,7 @@ fn test_process_block_ops() { let block_commit_2 = LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222223").unwrap(), @@ -231,7 +231,7 @@ fn test_process_block_ops() { let block_commit_3 = LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222224").unwrap(), @@ -781,7 +781,7 @@ fn test_burn_snapshot_sequence() { if i > 0 { let next_block_commit = LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], commit_outs: vec![], block_header_hash: BlockHeaderHash::from_bytes(&vec![ i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index fb5d141430..df1c3ec1b0 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -515,7 +515,7 @@ pub fn make_simple_block_commit( let block_height = burn_header.block_height; let mut new_op = LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], block_header_hash: block_hash, new_seed: VRFSeed([1u8; 32]), parent_block_ptr: 0, diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index a3377f9d10..b439a9512f 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -404,7 +404,7 @@ mod tests { block_height: 102, burn_parent_modulus: (101 % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), - punished: vec![], + treatment: vec![], }; let mut burnchain = Burnchain::default_unittest(100, &first_burn_hash); diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index e4d2ac22f0..efe2ce8265 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -306,7 +306,7 @@ impl FromRow for LeaderBlockCommitOp { vtxindex, block_height, burn_header_hash, - punished, + treatment: punished, }; Ok(block_commit) } @@ -5741,7 +5741,7 @@ impl<'a> SortitionHandleTx<'a> { &block_commit.sunset_burn.to_string(), &apparent_sender_str, &block_commit.burn_parent_modulus, - &serde_json::to_string(&block_commit.punished).unwrap(), + &serde_json::to_string(&block_commit.treatment).unwrap(), ]; self.execute("INSERT INTO block_commits (txid, vtxindex, block_height, burn_header_hash, block_header_hash, new_seed, parent_block_ptr, parent_vtxindex, key_block_ptr, key_vtxindex, memo, burn_fee, input, sortition_id, commit_outs, sunset_burn, apparent_sender, burn_parent_modulus, punished) \ @@ -7137,7 +7137,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), - punished: vec![], + treatment: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); @@ -7856,7 +7856,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), - punished: vec![], + treatment: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); @@ -10073,7 +10073,7 @@ pub mod tests { block_height: block_height + 2, burn_parent_modulus: ((block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x03; 32]), - punished: vec![], + treatment: vec![], }; // descends from genesis @@ -10116,7 +10116,7 @@ pub mod tests { block_height: block_height + 3, burn_parent_modulus: ((block_height + 2) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x04; 32]), - punished: vec![], + treatment: vec![], }; // descends from block_commit_1 @@ -10159,7 +10159,7 @@ pub mod tests { block_height: block_height + 4, burn_parent_modulus: ((block_height + 3) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x05; 32]), - punished: vec![], + treatment: vec![], }; // descends from genesis_block_commit @@ -10202,7 +10202,7 @@ pub mod tests { block_height: block_height + 5, burn_parent_modulus: ((block_height + 4) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: BurnchainHeaderHash([0x06; 32]), - punished: vec![], + treatment: vec![], }; let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index c6e7831670..ed01ae014b 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -512,7 +512,7 @@ mod tests { let input_txid = Txid(input_txid); LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], block_header_hash: BlockHeaderHash(block_header_hash), new_seed: VRFSeed([0; 32]), parent_block_ptr: (block_id - 1) as u32, @@ -885,7 +885,7 @@ mod tests { }; let block_commit_1 = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222") @@ -931,7 +931,7 @@ mod tests { }; let block_commit_2 = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222223") @@ -977,7 +977,7 @@ mod tests { }; let block_commit_3 = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222224") diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index c66d00b755..5278a86775 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -127,7 +127,7 @@ impl LeaderBlockCommitOp { txid: Txid([0u8; 32]), vtxindex: 0, burn_header_hash: BurnchainHeaderHash::zero(), - punished: vec![], + treatment: vec![], } } @@ -166,7 +166,7 @@ impl LeaderBlockCommitOp { - 1, burn_header_hash: BurnchainHeaderHash::zero(), - punished: vec![], + treatment: vec![], } } @@ -454,7 +454,7 @@ impl LeaderBlockCommitOp { input, apparent_sender, - punished: Vec::new(), + treatment: Vec::new(), txid: tx.txid(), vtxindex: tx.vtxindex(), block_height: block_height, @@ -1128,7 +1128,7 @@ impl LeaderBlockCommitOp { self.check_common(epoch.epoch_id, tx)?; if reward_set_info.is_some_and(|r| r.allow_nakamoto_punishment) { - self.punished = punished; + self.treatment = punished; } // good to go! @@ -1756,7 +1756,7 @@ mod tests { block_height: block_height, burn_parent_modulus: ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: burn_header_hash, - punished: vec![], }) + treatment: vec![], }) }, OpFixture { // invalid -- wrong opcode @@ -1990,7 +1990,7 @@ mod tests { commit_outs: vec![], burn_fee: 12345, - punished: vec![], + treatment: vec![], input: (Txid([0; 32]), 0), apparent_sender: BurnchainSigner::mock_parts( AddressHashMode::SerializeP2PKH, @@ -2125,7 +2125,7 @@ mod tests { // accept -- consumes leader_key_2 op: LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2175,7 +2175,7 @@ mod tests { CheckFixture { // accept -- builds directly off of genesis block and consumes leader_key_2 op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2226,7 +2226,7 @@ mod tests { CheckFixture { // accept -- also consumes leader_key_1 op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2277,7 +2277,7 @@ mod tests { CheckFixture { // reject -- bad burn parent modulus op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2340,7 +2340,7 @@ mod tests { CheckFixture { // reject -- bad burn parent modulus op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2512,7 +2512,7 @@ mod tests { // consumes leader_key_1 let block_commit_1 = LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes("2222222222222222222222222222222222222222222222222222222222222222") .unwrap(), @@ -2661,7 +2661,7 @@ mod tests { CheckFixture { // reject -- predates start block op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2713,7 +2713,7 @@ mod tests { // reject -- no such leader key op: LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2763,7 +2763,7 @@ mod tests { CheckFixture { // reject -- previous block must exist op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2814,7 +2814,7 @@ mod tests { CheckFixture { // reject -- previous block must exist in a different block op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2868,7 +2868,7 @@ mod tests { // here) op: LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( "2222222222222222222222222222222222222222222222222222222222222222", @@ -2918,7 +2918,7 @@ mod tests { CheckFixture { // reject -- fee is 0 op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -2969,7 +2969,7 @@ mod tests { CheckFixture { // accept -- consumes leader_key_2 op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -3020,7 +3020,7 @@ mod tests { CheckFixture { // accept -- builds directly off of genesis block and consumes leader_key_2 op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -3071,7 +3071,7 @@ mod tests { CheckFixture { // accept -- also consumes leader_key_1 op: LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash::from_bytes( &hex_bytes( @@ -3207,7 +3207,7 @@ mod tests { }; let default_block_commit = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -3519,7 +3519,7 @@ mod tests { }; let block_commit_pre_2_05 = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x02; 32]), new_seed: VRFSeed([0x03; 32]), @@ -3549,7 +3549,7 @@ mod tests { }; let block_commit_post_2_05_valid = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3579,7 +3579,7 @@ mod tests { }; let block_commit_post_2_05_valid_bigger_epoch = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3609,7 +3609,7 @@ mod tests { }; let block_commit_post_2_05_invalid_bad_memo = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x04; 32]), new_seed: VRFSeed([0x05; 32]), @@ -3639,7 +3639,7 @@ mod tests { }; let block_commit_post_2_05_invalid_no_memo = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x05; 32]), new_seed: VRFSeed([0x06; 32]), @@ -3669,7 +3669,7 @@ mod tests { }; let block_commit_post_2_1_valid = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3699,7 +3699,7 @@ mod tests { }; let block_commit_post_2_1_valid_bigger_epoch = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x03; 32]), new_seed: VRFSeed([0x04; 32]), @@ -3729,7 +3729,7 @@ mod tests { }; let block_commit_post_2_1_invalid_bad_memo = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x04; 32]), new_seed: VRFSeed([0x05; 32]), @@ -3759,7 +3759,7 @@ mod tests { }; let block_commit_post_2_1_invalid_no_memo = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x05; 32]), new_seed: VRFSeed([0x06; 32]), diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 597db3de49..41e9f8fc0c 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -244,11 +244,12 @@ pub struct LeaderBlockCommitOp { /// PoX/Burn outputs pub commit_outs: Vec, - /// If this block commit punished one or both of its PoX recipients, - /// they will be in this vector. + /// If the active epoch supports PoX reward/punishment + /// via burns, this vector will contain the treatment (rewarded or punished) + /// of the PoX addresses active during the block commit. /// - /// This value is set by the check() call. - pub punished: Vec, + /// This value is set by the check() call, not during parsing. + pub treatment: Vec, // PoX sunset burn pub sunset_burn: u64, diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index 67362fc3b9..0c5e020a64 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -1119,7 +1119,7 @@ mod test { block_height: header.block_height, burn_parent_modulus: (i % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: header.block_hash.clone(), - punished: vec![], + treatment: vec![], }; let tip = SortitionDB::get_canonical_burn_chain_tip(db.conn()).unwrap(); diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 3479c82a23..7bd06aaaea 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -698,7 +698,7 @@ fn make_genesis_block_with_recipients( let commit_op = LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], block_header_hash: block.block_hash(), burn_fee: my_burn, input: (Txid([0; 32]), 0), @@ -971,7 +971,7 @@ fn make_stacks_block_with_input( let commit_op = LeaderBlockCommitOp { sunset_burn, - punished: vec![], + treatment: vec![], block_header_hash: block.block_hash(), burn_fee: my_burn, input, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 615b03721a..0627422d45 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -961,7 +961,7 @@ fn pox_treatment() { bitvec.set(1 + ix, false).unwrap(); bitvec.set(2 + ix, false).unwrap(); }); - block.header.signer_bitvec = bitvec; + block.header.pox_treatment = bitvec; // don't try to process this block yet, just return it so that // we can assert the block error. false @@ -987,7 +987,7 @@ fn pox_treatment() { |block| { // each stacker has 3 entries in the bitvec. // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 - block.header.signer_bitvec = BitVec::try_from( + block.header.pox_treatment = BitVec::try_from( [ false, false, true, false, false, true, false, false, true, false, false, true, ] @@ -1008,7 +1008,7 @@ fn pox_treatment() { // we want the miner to finish assembling the block, and then we'll // alter the bitvec before it signs the block (in a subsequent closure). // this way, we can test the block processing behavior. - miner.header.signer_bitvec = BitVec::try_from( + miner.header.pox_treatment = BitVec::try_from( [ false, false, true, false, false, true, false, false, true, false, false, true, ] @@ -1049,7 +1049,7 @@ fn pox_treatment() { bitvec.set(2 + ix, true).unwrap(); }); - block.header.signer_bitvec = bitvec; + block.header.pox_treatment = bitvec; // don't try to process this block yet, just return it so that // we can assert the block error. false @@ -1074,7 +1074,7 @@ fn pox_treatment() { |miner| { // each stacker has 3 entries in the bitvec. // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 - miner.header.signer_bitvec = BitVec::try_from( + miner.header.pox_treatment = BitVec::try_from( [ false, false, true, false, false, true, false, false, true, false, false, true, ] diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 5fc1724b3f..6f402a6d11 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -381,7 +381,7 @@ impl NakamotoBlockBuilder { info.cause == Some(TenureChangeCause::BlockFound), info.coinbase_height, info.cause == Some(TenureChangeCause::Extended), - &self.header.signer_bitvec, + &self.header.pox_treatment, &info.tenure_block_commit, &info.active_reward_set, )?; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 253b612fb9..bb6ca882a5 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -328,9 +328,11 @@ pub struct NakamotoBlockHeader { /// the block header from the signer set active during the tenure. /// (ordered by reward set order) pub signer_signature: Vec, - /// A bitvec which represents the signers that participated in this block signature. + /// A bitvec which conveys whether reward addresses should be punished (by burning their PoX rewards) + /// or not in this block. + /// /// The maximum number of entries in the bitvec is 4000. - pub signer_bitvec: BitVec<4000>, + pub pox_treatment: BitVec<4000>, } impl FromRow for NakamotoBlockHeader { @@ -362,7 +364,7 @@ impl FromRow for NakamotoBlockHeader { state_index_root, signer_signature, miner_signature, - signer_bitvec, + pox_treatment: signer_bitvec, }) } } @@ -413,7 +415,7 @@ impl StacksMessageCodec for NakamotoBlockHeader { write_next(fd, &self.state_index_root)?; write_next(fd, &self.miner_signature)?; write_next(fd, &self.signer_signature)?; - write_next(fd, &self.signer_bitvec)?; + write_next(fd, &self.pox_treatment)?; Ok(()) } @@ -429,7 +431,7 @@ impl StacksMessageCodec for NakamotoBlockHeader { state_index_root: read_next(fd)?, miner_signature: read_next(fd)?, signer_signature: read_next(fd)?, - signer_bitvec: read_next(fd)?, + pox_treatment: read_next(fd)?, }) } } @@ -477,7 +479,7 @@ impl NakamotoBlockHeader { write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; write_next(fd, &self.miner_signature)?; - write_next(fd, &self.signer_bitvec)?; + write_next(fd, &self.pox_treatment)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -626,7 +628,7 @@ impl NakamotoBlockHeader { state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::ones(bitvec_len) + pox_treatment: BitVec::ones(bitvec_len) .expect("BUG: bitvec of length-1 failed to construct"), } } @@ -643,7 +645,7 @@ impl NakamotoBlockHeader { state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), + pox_treatment: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } @@ -659,7 +661,7 @@ impl NakamotoBlockHeader { state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), + pox_treatment: BitVec::zeros(1).expect("BUG: bitvec of length-1 failed to construct"), } } } @@ -1676,7 +1678,7 @@ impl NakamotoChainState { // succeeds, since *we have already processed* the block. Self::infallible_set_block_processed(stacks_chain_state, &block_id); - let signer_bitvec = (&next_ready_block).header.signer_bitvec.clone(); + let signer_bitvec = (&next_ready_block).header.pox_treatment.clone(); // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { @@ -2417,7 +2419,7 @@ impl NakamotoChainState { &header.parent_block_id, if tenure_changed { &1i64 } else { &0i64 }, &vrf_proof_bytes.as_ref(), - &header.signer_bitvec, + &header.pox_treatment, tip_info.burn_view.as_ref().ok_or_else(|| { error!( "Attempted to store nakamoto block header information without burnchain view"; @@ -2926,10 +2928,10 @@ impl NakamotoChainState { tenure_block_commit: &LeaderBlockCommitOp, active_reward_set: &RewardSet, ) -> Result<(), ChainstateError> { - if !tenure_block_commit.punished.is_empty() { + if !tenure_block_commit.treatment.is_empty() { // our block commit issued a punishment, check the reward set and bitvector // to ensure that this was valid. - for treated_addr in tenure_block_commit.punished.iter() { + for treated_addr in tenure_block_commit.treatment.iter() { if treated_addr.is_burn() { // Don't need to assert anything about burn addresses. // If they were in the reward set, "punishing" them is meaningless. @@ -3265,7 +3267,7 @@ impl NakamotoChainState { new_tenure, coinbase_height, tenure_extend, - &block.header.signer_bitvec, + &block.header.pox_treatment, &tenure_block_commit, active_reward_set, )?; diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 0e02ad1ac9..e7fb533f82 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -240,7 +240,7 @@ fn codec_nakamoto_header() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![MessageSignature::from_bytes(&[0x01; 65]).unwrap()], - signer_bitvec: BitVec::zeros(8).unwrap(), + pox_treatment: BitVec::zeros(8).unwrap(), }; let mut bytes = vec![ @@ -291,7 +291,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; // sortition-inducing tenure change @@ -854,7 +854,7 @@ pub fn test_load_store_update_nakamoto_blocks() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: header_signatures.clone(), - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let nakamoto_header_info = StacksHeaderInfo { @@ -899,7 +899,7 @@ pub fn test_load_store_update_nakamoto_blocks() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let nakamoto_header_info_2 = StacksHeaderInfo { @@ -939,7 +939,7 @@ pub fn test_load_store_update_nakamoto_blocks() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let nakamoto_header_info_3 = StacksHeaderInfo { @@ -1618,7 +1618,7 @@ fn test_nakamoto_block_static_verification() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; nakamoto_header.sign_miner(&private_key).unwrap(); @@ -1637,7 +1637,7 @@ fn test_nakamoto_block_static_verification() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_ch.sign_miner(&private_key).unwrap(); @@ -1656,7 +1656,7 @@ fn test_nakamoto_block_static_verification() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; nakamoto_header_bad_miner_sig .sign_miner(&private_key) @@ -1809,7 +1809,7 @@ pub fn test_get_highest_nakamoto_tenure() { state_index_root: TrieHash([0x00; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let tenure_change = TenureChangePayload { tenure_consensus_hash: sn.consensus_hash.clone(), @@ -2046,7 +2046,7 @@ fn test_make_miners_stackerdb_config() { block_height: snapshot.block_height, burn_parent_modulus: ((snapshot.block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8, burn_header_hash: snapshot.burn_header_hash.clone(), - punished: vec![], + treatment: vec![], }; let winning_ops = if i == 0 { @@ -2114,7 +2114,7 @@ fn test_make_miners_stackerdb_config() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let block = NakamotoBlock { header, diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 19e8990cea..6ede2bc8e6 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -1311,7 +1311,7 @@ mod test { let mut block_commit = LeaderBlockCommitOp { sunset_burn: 0, - punished: vec![], + treatment: vec![], block_header_hash: header.block_hash(), new_seed: VRFSeed::from_proof(&header.proof), parent_block_ptr: 0, diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 43189b6847..906492f907 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -257,7 +257,7 @@ impl NakamotoBlockProposal { self.block.header.burn_spent, tenure_change, coinbase, - self.block.header.signer_bitvec.len(), + self.block.header.pox_treatment.len(), )?; let mut miner_tenure_info = diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 719e901076..5f94363ca1 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -104,7 +104,7 @@ fn test_nakamoto_tenure_downloader() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let tenure_change_payload = TenureChangePayload { @@ -171,7 +171,7 @@ fn test_nakamoto_tenure_downloader() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let mut block = NakamotoBlock { @@ -192,7 +192,7 @@ fn test_nakamoto_tenure_downloader() { state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let next_tenure_change_payload = TenureChangePayload { diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 1e2dba0ef9..6bb958e070 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -199,7 +199,7 @@ impl BurnchainController for MocknetController { } BlockstackOperationType::LeaderBlockCommit(payload) => { BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: payload.block_header_hash, new_seed: payload.new_seed, diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index c761c639e7..b1e32c15ea 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -277,7 +277,7 @@ impl MinerStats { // mocked commit let mocked_commit = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash(DEADBEEF.clone()), new_seed: VRFSeed(DEADBEEF.clone()), @@ -442,7 +442,7 @@ impl MinerStats { for (miner, last_commit) in active_miners_and_commits.iter() { if !commit_table.contains_key(miner) { let mocked_commit = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash(DEADBEEF.clone()), new_seed: VRFSeed(DEADBEEF.clone()), @@ -552,7 +552,7 @@ pub mod tests { #[test] fn test_burn_dist_to_prob_dist() { let block_commit_1 = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -585,7 +585,7 @@ pub mod tests { }; let block_commit_2 = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -621,7 +621,7 @@ pub mod tests { }; let block_commit_3 = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -822,7 +822,7 @@ EOF ( "miner-1".to_string(), LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -854,7 +854,7 @@ EOF ( "miner-2".to_string(), LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -889,7 +889,7 @@ EOF ( "miner-3".to_string(), LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -924,7 +924,7 @@ EOF let unconfirmed_block_commits = vec![ LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -951,7 +951,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -978,7 +978,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), @@ -1005,7 +1005,7 @@ EOF burn_header_hash: BurnchainHeaderHash([0x01; 32]), }, LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0x22; 32]), new_seed: VRFSeed([0x33; 32]), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 43258fc048..9badfda567 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1241,7 +1241,7 @@ impl EventDispatcher { return; } - let signer_bitvec = serde_json::to_value(block.header.signer_bitvec.clone()) + let signer_bitvec = serde_json::to_value(block.header.pox_treatment.clone()) .unwrap_or_default() .as_str() .unwrap_or_default() diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 9399e7da5d..5e8492a8a2 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -507,7 +507,7 @@ impl RelayerThread { .get_active() .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?; let op = LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn, block_header_hash: BlockHeaderHash(parent_block_id.0), burn_fee: rest_commit, diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index d761d473a6..6873588e64 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1113,7 +1113,7 @@ impl BlockMinerThread { let burn_parent_modulus = (current_burn_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; let sender = self.keychain.get_burnchain_signer(); BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn, block_header_hash, burn_fee, diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 88037af71f..3108485c00 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -1035,7 +1035,7 @@ impl Node { let txid = Txid(txid_bytes); BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash, burn_fee, diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 231a10074d..68f37b4fb8 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -602,7 +602,7 @@ fn transition_empty_blocks() { let burn_parent_modulus = (tip_info.burn_block_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; let op = BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn, block_header_hash: BlockHeaderHash([0xff; 32]), burn_fee: rest_commit, diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 4f7fc0d059..6696467930 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1928,7 +1928,7 @@ fn transition_empty_blocks() { let burn_parent_modulus = ((tip_info.burn_block_height + 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8; let op = BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { - punished: vec![], + treatment: vec![], sunset_burn: 0, block_header_hash: BlockHeaderHash([0xff; 32]), burn_fee: burn_fee_cap, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 58f7b6a05a..c630776d5c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4789,7 +4789,7 @@ fn signer_chainstate() { state_index_root: TrieHash([0; 32]), miner_signature: MessageSignature([0; 65]), signer_signature: Vec::new(), - signer_bitvec: BitVec::ones(1).unwrap(), + pox_treatment: BitVec::ones(1).unwrap(), }; sibling_block_header.sign_miner(&miner_sk).unwrap(); @@ -4819,7 +4819,7 @@ fn signer_chainstate() { state_index_root: TrieHash([0; 32]), miner_signature: MessageSignature([0; 65]), signer_signature: Vec::new(), - signer_bitvec: BitVec::ones(1).unwrap(), + pox_treatment: BitVec::ones(1).unwrap(), }; sibling_block_header.sign_miner(&miner_sk).unwrap(); @@ -4869,7 +4869,7 @@ fn signer_chainstate() { state_index_root: TrieHash([0; 32]), miner_signature: MessageSignature([0; 65]), signer_signature: Vec::new(), - signer_bitvec: BitVec::ones(1).unwrap(), + pox_treatment: BitVec::ones(1).unwrap(), }; sibling_block_header.sign_miner(&miner_sk).unwrap(); @@ -4927,7 +4927,7 @@ fn signer_chainstate() { state_index_root: TrieHash([0; 32]), miner_signature: MessageSignature([0; 65]), signer_signature: Vec::new(), - signer_bitvec: BitVec::ones(1).unwrap(), + pox_treatment: BitVec::ones(1).unwrap(), }; sibling_block_header.sign_miner(&miner_sk).unwrap(); diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 30f499caae..52437acf97 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -534,7 +534,7 @@ fn sign_request_rejected() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let mut block1 = NakamotoBlock { header: header1, @@ -561,7 +561,7 @@ fn sign_request_rejected() { state_index_root: TrieHash([0x08; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }; let mut block2 = NakamotoBlock { header: header2, From 590e6818b41d5b81b6991243ed557d81f6d298f9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 14:02:19 -0400 Subject: [PATCH 0287/1400] fix: merge errors --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 34 +++++++++++++------ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index a783ea2461..ad0e3fee50 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2441,7 +2441,7 @@ impl NakamotoChainState { signer_bitvec, burn_view ) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25)", + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25, ?26)", args )?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 65c9f7eb96..f764bd0d98 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4368,9 +4368,13 @@ fn clarity_burn_state() { // Don't submit this tx on the first iteration, because the contract is not published yet. if tenure_ix > 0 { // Call the read-only function and see if we see the correct burn block height - let expected_height = Value::UInt(burn_block_height); - let arg = expected_height.serialize_to_hex().unwrap(); - let result = call_read_only(&naka_conf, &sender_addr, contract_name, "foo", vec![&arg]); + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "foo", + vec![&Value::UInt(burn_block_height)], + ); result.expect_result_ok().expect("Read-only call failed"); // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) @@ -4427,8 +4431,13 @@ fn clarity_burn_state() { // Call the read-only function and see if we see the correct burn block height let expected_height = Value::UInt(burn_block_height); - let arg = expected_height.serialize_to_hex().unwrap(); - let result = call_read_only(&naka_conf, &sender_addr, contract_name, "foo", vec![&arg]); + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "foo", + vec![&expected_height], + ); info!("Read-only result: {:?}", result); result.expect_result_ok().expect("Read-only call failed"); @@ -4787,6 +4796,7 @@ fn signer_chainstate() { parent_block_id: last_tenure_header.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), + timestamp: last_tenure_header.timestamp + 1, miner_signature: MessageSignature([0; 65]), signer_signature: Vec::new(), signer_bitvec: BitVec::ones(1).unwrap(), @@ -4817,6 +4827,7 @@ fn signer_chainstate() { parent_block_id: last_tenure_header.parent_block_id.clone(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), + timestamp: last_tenure_header.timestamp + 1, miner_signature: MessageSignature([0; 65]), signer_signature: Vec::new(), signer_bitvec: BitVec::ones(1).unwrap(), @@ -4867,6 +4878,7 @@ fn signer_chainstate() { parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), + timestamp: last_tenure_header.timestamp + 1, miner_signature: MessageSignature([0; 65]), signer_signature: Vec::new(), signer_bitvec: BitVec::ones(1).unwrap(), @@ -4925,6 +4937,7 @@ fn signer_chainstate() { parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), + timestamp: reorg_to_block.header.timestamp + 1, miner_signature: MessageSignature([0; 65]), signer_signature: Vec::new(), signer_bitvec: BitVec::ones(1).unwrap(), @@ -5085,6 +5098,7 @@ fn check_block_times() { contract_clarity1, ); sender_nonce += 1; + submit_tx(&http_origin, &contract_tx0); boot_to_epoch_3( &naka_conf, @@ -5163,9 +5177,8 @@ fn check_block_times() { let info = get_chain_info_result(&naka_conf).unwrap(); println!("Chain info: {:?}", info); - let mut last_burn_block_height = info.burn_block_height as u128; - let mut last_stacks_block_height = info.stacks_tip_height as u128; - let mut last_tenure_height = last_stacks_block_height as u128; + let last_stacks_block_height = info.stacks_tip_height as u128; + let last_tenure_height = last_stacks_block_height as u128; let time0_value = call_read_only( &naka_conf, @@ -5260,9 +5273,8 @@ fn check_block_times() { let info = get_chain_info_result(&naka_conf).unwrap(); println!("Chain info: {:?}", info); - let mut last_burn_block_height = info.burn_block_height as u128; - let mut last_stacks_block_height = info.stacks_tip_height as u128; - let mut last_tenure_height = last_stacks_block_height as u128; + let last_stacks_block_height = info.stacks_tip_height as u128; + let last_tenure_height = last_stacks_block_height as u128; let time0a_value = call_read_only( &naka_conf, From 916b19026a5ca4fddd76c3a95ac4f50e71c73f4b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 16:07:28 -0400 Subject: [PATCH 0288/1400] test: resolve remaining issues with integration test --- clarity/src/vm/database/clarity_db.rs | 7 +- stackslib/src/clarity_vm/database/mod.rs | 71 +++++++++++----- .../src/tests/nakamoto_integrations.rs | 81 ++++++++++--------- 3 files changed, 95 insertions(+), 64 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 504d6fda76..8507dec80e 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -915,7 +915,11 @@ impl<'a> ClarityDatabase<'a> { } pub fn get_block_time(&mut self, block_height: u32) -> Result { - let epoch = self.get_stacks_epoch(block_height).ok_or_else(|| { + let id_bhh = self.get_index_block_header_hash(block_height)?; + let burn_block_height = self + .get_burnchain_block_height(&id_bhh) + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()))?; + let epoch = self.get_stacks_epoch(burn_block_height).ok_or_else(|| { InterpreterError::Expect( format!("Failed to get epoch for block height {block_height}.)").into(), ) @@ -924,7 +928,6 @@ impl<'a> ClarityDatabase<'a> { return self.get_burn_block_time(block_height); } - let id_bhh = self.get_index_block_header_hash(block_height)?; self.headers_db .get_block_time_for_block(&id_bhh) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index d063154cdf..4fe4a9dcd9 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -73,7 +73,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { } fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "timestamp", |r| { + get_stacks_header_column_nakamoto(self.0, id_bhh, "timestamp", |r| { u64::from_row(r).expect("FATAL: malformed timestamp") }) } @@ -153,7 +153,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { } fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "timestamp", |r| { + get_stacks_header_column_nakamoto(self.deref().deref(), id_bhh, "timestamp", |r| { u64::from_row(r).expect("FATAL: malformed timestamp") }) } @@ -236,7 +236,7 @@ impl HeadersDB for MARF { } fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "timestamp", |r| { + get_stacks_header_column_nakamoto(self.sqlite_conn(), id_bhh, "timestamp", |r| { u64::from_row(r).expect("FATAL: malformed timestamp") }) } @@ -287,36 +287,39 @@ impl HeadersDB for MARF { } } -fn get_stacks_header_column( +fn get_stacks_header_column_internal( conn: &DBConn, id_bhh: &StacksBlockId, column_name: &str, loader: F, + nakamoto_only: bool, ) -> Option where F: Fn(&Row) -> R, { let args: &[&dyn ToSql] = &[id_bhh]; - if let Some(result) = conn - .query_row( - &format!( - "SELECT {} FROM block_headers WHERE index_block_hash = ?", - column_name - ), - args, - |x| Ok(loader(x)), - ) - .optional() - .unwrap_or_else(|_| { - panic!( - "Unexpected SQL failure querying block header table for '{}'", - column_name + if !nakamoto_only { + if let Some(result) = conn + .query_row( + &format!( + "SELECT {} FROM block_headers WHERE index_block_hash = ?", + column_name + ), + args, + |x| Ok(loader(x)), ) - }) - { - return Some(result); + .optional() + .unwrap_or_else(|_| { + panic!( + "Unexpected SQL failure querying block header table for '{}'", + column_name + ) + }) + { + return Some(result); + } } - // if nothing was found in `block_headers`, try `nakamoto_block_headers` + // if `nakamoto_only` or nothing was found in `block_headers`, try `nakamoto_block_headers` conn.query_row( &format!( "SELECT {} FROM nakamoto_block_headers WHERE index_block_hash = ?", @@ -334,6 +337,30 @@ where }) } +fn get_stacks_header_column( + conn: &DBConn, + id_bhh: &StacksBlockId, + column_name: &str, + loader: F, +) -> Option +where + F: Fn(&Row) -> R, +{ + get_stacks_header_column_internal(conn, id_bhh, column_name, loader, false) +} + +fn get_stacks_header_column_nakamoto( + conn: &DBConn, + id_bhh: &StacksBlockId, + column_name: &str, + loader: F, +) -> Option +where + F: Fn(&Row) -> R, +{ + get_stacks_header_column_internal(conn, id_bhh, column_name, loader, true) +} + fn get_miner_column( conn: &DBConn, id_bhh: &StacksBlockId, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f764bd0d98..18106c97bf 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3663,7 +3663,7 @@ fn check_block_heights() { .unwrap(); let info = get_chain_info_result(&naka_conf).unwrap(); - println!("Chain info: {:?}", info); + info!("Chain info: {:?}", info); let mut last_burn_block_height; let mut last_stacks_block_height = info.stacks_tip_height as u128; let mut last_tenure_height = last_stacks_block_height as u128; @@ -5143,7 +5143,7 @@ fn check_block_times() { }) .unwrap(); - // This version uses the Clarity 1 / 2 keywords + // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; let contract_tx1 = make_contract_publish_versioned( &sender_sk, @@ -5156,7 +5156,7 @@ fn check_block_times() { sender_nonce += 1; submit_tx(&http_origin, &contract_tx1); - // This version uses the Clarity 3 keywords + // This version uses the Clarity 3 functions let contract3_name = "test-contract-3"; let contract_clarity3 = "(define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) @@ -5176,7 +5176,7 @@ fn check_block_times() { .unwrap(); let info = get_chain_info_result(&naka_conf).unwrap(); - println!("Chain info: {:?}", info); + info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; let last_tenure_height = last_stacks_block_height as u128; @@ -5272,9 +5272,8 @@ fn check_block_times() { } let info = get_chain_info_result(&naka_conf).unwrap(); - println!("Chain info: {:?}", info); + info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; - let last_tenure_height = last_stacks_block_height as u128; let time0a_value = call_read_only( &naka_conf, @@ -5312,23 +5311,24 @@ fn check_block_times() { "Time from pre- and post-epoch 3.0 contracts should match" ); - let time3a_tenure_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-time", - vec![&clarity::vm::Value::UInt(last_tenure_height)], - ); - let time3a_tenure = time3a_tenure_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0a, time3a_tenure, - "Tenure time should match Clarity 2 block time" - ); + // TODO: enable access to current tenure. + // let time3a_tenure_value = call_read_only( + // &naka_conf, + // &sender_addr, + // contract3_name, + // "get-tenure-time", + // vec![&clarity::vm::Value::UInt(last_tenure_height)], + // ); + // let time3a_tenure = time3a_tenure_value + // .expect_optional() + // .unwrap() + // .unwrap() + // .expect_u128() + // .unwrap(); + // assert_eq!( + // time0a, time3a_tenure, + // "Tenure time should match Clarity 2 block time" + // ); let time3a_block_value = call_read_only( &naka_conf, @@ -5407,23 +5407,24 @@ fn check_block_times() { "Time from pre- and post-epoch 3.0 contracts should match" ); - let time3b_tenure_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-time", - vec![&clarity::vm::Value::UInt(last_tenure_height)], - ); - let time3b_tenure = time3b_tenure_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0b, time3b_tenure, - "Tenure time should match Clarity 2 block time" - ); + // TODO: Enable access to current tenure. + // let time3b_tenure_value = call_read_only( + // &naka_conf, + // &sender_addr, + // contract3_name, + // "get-tenure-time", + // vec![&clarity::vm::Value::UInt(last_tenure_height)], + // ); + // let time3b_tenure = time3b_tenure_value + // .expect_optional() + // .unwrap() + // .unwrap() + // .expect_u128() + // .unwrap(); + // assert_eq!( + // time0b, time3b_tenure, + // "Tenure time should match Clarity 2 block time" + // ); let time3b_block_value = call_read_only( &naka_conf, From 3d84bf8e3bbff6b76acb17820912f906b51ab603 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 16:08:39 -0400 Subject: [PATCH 0289/1400] test: add new bitcoin test to yaml file --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 04ad0160d1..ea86772d8d 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -97,6 +97,7 @@ jobs: - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state + - tests::nakamoto_integrations::check_block_times # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From 52a0c6fe9f60a9369b00efd80a444d685f4f01ab Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 16:09:55 -0400 Subject: [PATCH 0290/1400] fix: update doc example to use burn chain time (pre-epoch-3) --- clarity/src/vm/docs/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 93842be647..eb08b856d9 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1833,7 +1833,7 @@ the mining of this block started, but is not guaranteed to be accurate. This tim - Greater than the timestamp of the previous block - Less than 15 seconds into the future (according to their own local clocks) ", - example: "(get-stacks-block-info? time u0) ;; Returns (some u1557860302) + example: "(get-stacks-block-info? time u0) ;; Returns (some u1557860301) (get-stacks-block-info? header-hash u0) ;; Returns (some 0x374708fff7719dd5979ec875d56cd2286f6d3cf7ec317a3b25632aab28ec37bb) " }; From d3afb28f3c2616bdca0704b9cdf4cac0bc78141b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 16:13:12 -0400 Subject: [PATCH 0291/1400] docs: update changelog with block info updates --- CHANGELOG.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0068086840..56f9b43e10 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,15 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -- Added support for Clarity 3, with new keywords, `tenure-height` and `stacks-block-height`, and removal of `block-height` +- Added support for Clarity 3 + - Keywords / variable + - `tenure-height` added + - `stacks-block-height` added + - `block-height` removed + - Functions + - `get-stacks-block-info?` added + - `get-tenure-info?` added + - `get-block-info?` removed ### Changed From 6781e0bce4dae5b33dcd429a221aff692876f9a3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 16:56:18 -0400 Subject: [PATCH 0292/1400] fix: merge error --- stackslib/src/net/api/tests/postblock_proposal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 30ccac4be0..a9defc6e3e 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -287,7 +287,7 @@ fn test_try_make_response() { chainstate: &mut StacksChainState, _: &mut Relayer, _: &mut MemPoolDB| { - let burn_dbconn = sort_db.index_conn(); + let burn_dbconn = sort_db.index_handle_at_tip(); let mut miner_tenure_info = builder .load_tenure_info(chainstate, &burn_dbconn, None) .unwrap(); From 71f61a2762ce00287eb190fbddbe77a6e78b0b2b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 10 Jun 2024 16:09:07 -0500 Subject: [PATCH 0293/1400] address PR feedback --- .../chainstate/burn/operations/leader_block_commit.rs | 11 ++++++++--- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 4 ++-- .../src/chainstate/nakamoto/coordinator/tests.rs | 4 +--- stackslib/src/chainstate/nakamoto/miner.rs | 2 +- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 5278a86775..98d52efd5e 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -595,6 +595,11 @@ impl RewardSetInfo { impl LeaderBlockCommitOp { /// Perform PoX checks on this block-commit, given the reward set info (which may be None if /// PoX is not active). + /// + /// If PoX was active (i.e., `reward_set_info` is `Some`), this method will return how the + /// PoX addresses were treated by the block commit. Prior to Epoch 3.0, these will be all + /// treated with rewards (attempting to punish pre-nakamoto will result in a op_error). + /// /// If `reward_set_info` is not None, then *only* the addresses in .recipients are used. The u16 /// indexes are *ignored* (and *must be* ignored, since this method gets called by /// `check_intneded_sortition()`, which does not have this information). @@ -778,12 +783,12 @@ impl LeaderBlockCommitOp { return Err(op_error::BlockCommitBadOutputs); } - let mut punished_outputs: Vec<_> = check_recipients + let mut treated_outputs: Vec<_> = check_recipients .into_iter() .map(|x| Treatment::Punish(x.0)) .collect(); - punished_outputs.extend(rewarded); - return Ok(punished_outputs); + treated_outputs.extend(rewarded); + return Ok(treated_outputs); } } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 9ac63ce6e8..da88d2f608 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -134,7 +134,7 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { return Err(Error::PoXAnchorBlockRequired); }; - self.reward_reward_set_at_calculated_block( + self.read_reward_set_at_calculated_block( coinbase_height_of_calculation, chainstate, block_id, @@ -178,7 +178,7 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { Ok(coinbase_height_of_calculation) } - pub fn reward_reward_set_at_calculated_block( + pub fn read_reward_set_at_calculated_block( &self, coinbase_height_of_calculation: u64, chainstate: &mut StacksChainState, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0627422d45..bd4d4f664c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -156,7 +156,6 @@ fn advance_to_nakamoto_long( ) .unwrap(); - //let pox_4_stacking_height = peer.config.epochs.as_ref().unwrap().iter().find(|e| e.epoch_id == StacksEpochId::Epoch25).unwrap().start_height; let mut stacked_pox_4 = false; let mut signer_voted = false; let nakamoto_height = peer @@ -394,8 +393,7 @@ pub fn boot_nakamoto<'a>( } /// Make a peer and transition it into the Nakamoto epoch. -/// The node needs to be stacking and it needs to vote for an aggregate key; -/// otherwise, Nakamoto can't activate. +/// The node needs to be stacking otherwise, Nakamoto can't activate. pub fn boot_nakamoto_long_reward_sets<'a>( test_name: &str, mut initial_balances: Vec<(PrincipalData, u64)>, diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 6f402a6d11..75168266d2 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -273,7 +273,7 @@ impl NakamotoBlockBuilder { ); Error::NoSuchBlockError })?; - let active_reward_set = rs_provider.reward_reward_set_at_calculated_block( + let active_reward_set = rs_provider.read_reward_set_at_calculated_block( coinbase_height_of_calc, chainstate, &self.header.parent_block_id, From 809c89f0cd7d6bbe51e87c2970b6eca0436984f0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 17:15:27 -0400 Subject: [PATCH 0294/1400] test: update test --- stacks-signer/src/tests/chainstate.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index d12d941ecc..177cfa3b94 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -93,6 +93,7 @@ fn setup_test_environment( parent_block_id: StacksBlockId([0; 32]), tx_merkle_root: Sha512Trunc256Sum([0; 32]), state_index_root: TrieHash([0; 32]), + timestamp: 11, miner_signature: MessageSignature::empty(), signer_signature: vec![], signer_bitvec: BitVec::zeros(1).unwrap(), From 47557b471b88be472078eaf6a91a1559367d704f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 19:49:10 -0400 Subject: [PATCH 0295/1400] fix: use `get-block-info?` costs for new block info functions --- stackslib/src/clarity_vm/tests/costs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 37621ceb4e..29c57b2e92 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -168,8 +168,8 @@ pub fn get_simple_test(function: &NativeFunctions) -> &'static str { ToConsensusBuff => "(to-consensus-buff? u1)", FromConsensusBuff => "(from-consensus-buff? bool 0x03)", ReplaceAt => "(replace-at? list-bar u0 5)", - GetStacksBlockInfo => "(get-stacks-block-info? time u1)", - GetTenureInfo => "(get-tenure-info? time u1)", + GetStacksBlockInfo => "(get-block-info? time u1)", + GetTenureInfo => "(get-block-info? time u1)", } } From 418da10e1208cae735d5197cd04a83956e994834 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Jun 2024 19:52:42 -0400 Subject: [PATCH 0296/1400] test: update `test_block_heights_at_block` --- stackslib/src/clarity_vm/tests/contracts.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 5ef3bc4309..c7de36aa1c 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -1609,8 +1609,8 @@ fn test_block_heights_at_block() { assert_eq!(epoch, StacksEpochId::Epoch30); let contract =r#" - (define-private (test-tenure) (at-block (unwrap-panic (get-block-info? id-header-hash u0)) tenure-height)) - (define-private (test-stacks) (at-block (unwrap-panic (get-block-info? id-header-hash u1)) stacks-block-height)) + (define-private (test-tenure) (at-block (unwrap-panic (get-stacks-block-info? id-header-hash u0)) tenure-height)) + (define-private (test-stacks) (at-block (unwrap-panic (get-stacks-block-info? id-header-hash u1)) stacks-block-height)) "#; conn.as_transaction(|clarity_db| { From ff8cfe71499f2708f55413683cd4531e4c58e677 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 10 Jun 2024 22:39:53 -0400 Subject: [PATCH 0297/1400] chore: fix failing tests and expand test coverage --- stackslib/src/net/p2p.rs | 5 +- stackslib/src/net/relay.rs | 13 +- stackslib/src/net/tests/relay/nakamoto.rs | 149 +++++++++++++++++----- stackslib/src/net/unsolicited.rs | 105 +++++++++------ 4 files changed, 192 insertions(+), 80 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 34f82b2720..e92200832c 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -409,8 +409,9 @@ pub struct PeerNetwork { antientropy_start_reward_cycle: u64, pub antientropy_last_push_ts: u64, - // pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks) that we - // can't process yet, but might be able to process on the next chain view update + /// Pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks, + /// NakamotoBlocks) that we can't process yet, but might be able to process on a subsequent + /// chain view update. pub pending_messages: HashMap>, // fault injection -- force disconnects diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 750fba7f6b..6aaff981b2 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -645,7 +645,7 @@ impl Relayer { if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { info!( - "Signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e + "Signature verification failure for Nakamoto block {}/{} in reward cycle {}: {:?}", &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e ); return Err(net_error::InvalidMessage); } @@ -1574,14 +1574,14 @@ impl Relayer { Ok(accepted) => { if accepted { debug!( - "Accepted Nakamoto block {} from {}", - &block_id, neighbor_key + "Accepted Nakamoto block {} ({}) from {}", + &block_id, &nakamoto_block.header.consensus_hash, neighbor_key ); accepted_blocks.push(nakamoto_block.clone()); } else { debug!( - "Rejected Nakamoto block {} from {}", - &block_id, &neighbor_key, + "Rejected Nakamoto block {} ({}) from {}", + &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, ); } } @@ -1595,7 +1595,6 @@ impl Relayer { "Could not process pushed Nakamoto block {}: {:?}", &block_id, &e ); - break; } } } @@ -1943,7 +1942,7 @@ impl Relayer { ) { Ok(x) => x, Err(e) => { - warn!("Failed to process pushed Nakamoot blocks: {:?}", &e); + warn!("Failed to process pushed Nakamoto blocks: {:?}", &e); (vec![], vec![]) } }; diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index bbabf6fc0d..2d286e157f 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -434,6 +434,8 @@ fn test_no_buffer_ready_nakamoto_blocks() { let mut sortdb = follower.sortdb.take().unwrap(); let mut node = follower.stacks_node.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // no need to buffer this because we can process it right away let buffer = follower .network @@ -454,6 +456,87 @@ fn test_no_buffer_ready_nakamoto_blocks() { &node.chainstate, block )); + + // suppose these blocks were invalid -- they would not be bufferable. + // bad signature? not bufferable + let mut bad_block = block.clone(); + let block_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &bad_block.header.consensus_hash, + ) + .unwrap() + .unwrap(); + bad_block + .header + .signer_signature + .push(bad_block.header.signer_signature.last().cloned().unwrap()); + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle(block_sn.block_height) + .unwrap() + ), + true + ) + ); + assert!(!follower.network.is_nakamoto_block_bufferable( + &sortdb, + &node.chainstate, + &bad_block + )); + + // unrecognized consensus hash + let mut bad_block = block.clone(); + bad_block.header.consensus_hash = ConsensusHash([0xde; 20]); + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle( + follower.network.burnchain_tip.block_height + ) + .unwrap() + ), + false + ) + ); + + // stale consensus hash + let mut bad_block = block.clone(); + let ancestor_sn = SortitionDB::get_ancestor_snapshot( + &sortdb.index_conn(), + 1, + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + bad_block.header.consensus_hash = ancestor_sn.consensus_hash; + assert_eq!( + follower + .network + .find_nakamoto_block_reward_cycle(&sortdb, &bad_block), + ( + Some( + follower + .network + .burnchain + .pox_reward_cycle(ancestor_sn.block_height) + .unwrap() + ), + true + ) + ); } // go process the blocks _as if_ they came from a network result @@ -631,8 +714,8 @@ fn test_buffer_nonready_nakamoto_blocks() { debug!("Follower got Nakamoto blocks {:?}", &blocks); all_blocks.push(blocks.clone()); - let mut sortdb = follower.sortdb.take().unwrap(); - let mut node = follower.stacks_node.take().unwrap(); + let sortdb = follower.sortdb.take().unwrap(); + let node = follower.stacks_node.take().unwrap(); // we will need to buffer this since the sortition for these blocks hasn't been // processed yet @@ -657,33 +740,8 @@ fn test_buffer_nonready_nakamoto_blocks() { )); } - // try to process the blocks _as if_ they came from a network result. - // It should fail. - let mut unsolicited = HashMap::new(); - let msg = StacksMessage::from_chain_view( - follower.network.bound_neighbor_key().peer_version, - follower.network.bound_neighbor_key().network_id, - follower.network.get_chain_view(), - StacksMessageType::NakamotoBlocks(NakamotoBlocksData { - blocks: blocks.clone(), - }), - ); - unsolicited.insert(peer_nk.clone(), vec![msg]); - - if let Some(mut network_result) = network_result.take() { - network_result.consume_unsolicited(unsolicited); - follower_relayer.process_new_epoch3_blocks( - follower.network.get_local_peer(), - &mut network_result, - &follower.network.burnchain, - &mut sortdb, - &mut node.chainstate, - true, - None, - ); - } - - // have the peer network buffer them up + // pass this and other blocks to the p2p network's unsolicited message handler, + // so they can be buffered up and processed. let mut unsolicited_msgs: HashMap> = HashMap::new(); for (event_id, convo) in follower.network.peers.iter() { for blks in all_blocks.iter() { @@ -692,7 +750,7 @@ fn test_buffer_nonready_nakamoto_blocks() { follower.network.bound_neighbor_key().network_id, follower.network.get_chain_view(), StacksMessageType::NakamotoBlocks(NakamotoBlocksData { - blocks: blocks.clone(), + blocks: blks.clone(), }), ); @@ -703,6 +761,7 @@ fn test_buffer_nonready_nakamoto_blocks() { } } } + follower.network.handle_unsolicited_messages( &sortdb, &node.chainstate, @@ -730,10 +789,6 @@ fn test_buffer_nonready_nakamoto_blocks() { assert_eq!(follower_consensus_hash, buffered_consensus_hash); } - let mut network_result = follower - .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) - .ok(); - // process the last buffered messages let mut sortdb = follower.sortdb.take().unwrap(); let mut node = follower.stacks_node.take().unwrap(); @@ -753,12 +808,33 @@ fn test_buffer_nonready_nakamoto_blocks() { follower.stacks_node = Some(node); follower.sortdb = Some(sortdb); + network_result = follower + .step_with_ibd_and_dns(true, Some(&mut follower_dns_client)) + .ok(); + seed_exited = true; exited_peer = Some(exited); follower_comms.send_exit(); } } + if let Some(mut network_result) = network_result.take() { + let mut sortdb = follower.sortdb.take().unwrap(); + let mut node = follower.stacks_node.take().unwrap(); + let num_processed = follower_relayer.process_new_epoch3_blocks( + follower.network.get_local_peer(), + &mut network_result, + &follower.network.burnchain, + &mut sortdb, + &mut node.chainstate, + true, + None, + ); + info!("Processed {} unsolicited Nakamoto blocks", num_processed); + follower.stacks_node = Some(node); + follower.sortdb = Some(sortdb); + } + follower.coord.handle_new_burnchain_block().unwrap(); follower.coord.handle_new_stacks_block().unwrap(); follower.coord.handle_new_nakamoto_stacks_block().unwrap(); @@ -785,7 +861,10 @@ fn test_buffer_nonready_nakamoto_blocks() { exited_peer.stacks_node = Some(stacks_node); exited_peer.sortdb = Some(sortdb); - assert_eq!(exited_peer_burn_tip, follower_burn_tip); + assert_eq!( + exited_peer_burn_tip.sortition_id, + follower_burn_tip.sortition_id + ); assert_eq!(exited_peer_stacks_tip, follower_stacks_tip); }); } diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index cf7ef67089..8bed8e5312 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -179,8 +179,8 @@ impl PeerNetwork { let mut blocks_data = 0; let mut microblocks_data = 0; let mut nakamoto_blocks_data = 0; - for msg in msgs.iter() { - match &msg.payload { + for stored_msg in msgs.iter() { + match &stored_msg.payload { StacksMessageType::BlocksAvailable(_) => { blocks_available += 1; if blocks_available >= self.connection_opts.max_buffered_blocks_available { @@ -669,27 +669,49 @@ impl PeerNetwork { } } - /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially - /// buffer - pub(crate) fn is_nakamoto_block_bufferable( + /// Check the signature of a NakamotoBlock against its sortition's reward cycle. + /// The reward cycle must be recent. + pub(crate) fn check_nakamoto_block_signer_signature( &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, + reward_cycle: u64, nakamoto_block: &NakamotoBlock, ) -> bool { - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block(&nakamoto_block.block_id()) - .unwrap_or(false) - { - debug!( - "{:?}: Aleady have Nakamoto block {}", - &self.local_peer, - &nakamoto_block.block_id() + let Some(rc_data) = self.current_reward_sets.get(&reward_cycle) else { + info!( + "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", + self.get_local_peer(), + &nakamoto_block.header.consensus_hash, + &nakamoto_block.header.block_hash() + ); + return false; + }; + let Some(reward_set) = rc_data.reward_set() else { + info!( + "{:?}: No reward set for reward cycle {}", + self.get_local_peer(), + reward_cycle + ); + return false; + }; + + if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { + info!( + "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e ); return false; } + true + } + /// Find the reward cycle in which to validate the signature for this block. + /// This may not actually correspond to the sortition for this block's tenure -- for example, + /// it may be for a block whose sortition is about to be processed. As such, return both the + /// reward cycle, and whether or not it corresponds to the sortition. + pub(crate) fn find_nakamoto_block_reward_cycle( + &self, + sortdb: &SortitionDB, + nakamoto_block: &NakamotoBlock, + ) -> (Option, bool) { let mut can_process = true; let sn = match SortitionDB::get_block_snapshot_consensus( &sortdb.conn(), @@ -715,7 +737,7 @@ impl PeerNetwork { &nakamoto_block.header.consensus_hash, &e ); - return false; + return (None, false); } }; @@ -725,36 +747,45 @@ impl PeerNetwork { self.get_local_peer(), &nakamoto_block.header.consensus_hash ); - return false; + return (None, false); } - // block must be signed by reward set signers let sn_rc = self .burnchain .pox_reward_cycle(sn.block_height) .expect("FATAL: sortition has no reward cycle"); - let Some(rc_data) = self.current_reward_sets.get(&sn_rc) else { - info!( - "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", - self.get_local_peer(), - &nakamoto_block.header.consensus_hash, - &nakamoto_block.header.block_hash() + + return (Some(sn_rc), can_process); + } + + /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially + /// buffer. Returns whether or not the block can be buffered. + pub(crate) fn is_nakamoto_block_bufferable( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + nakamoto_block: &NakamotoBlock, + ) -> bool { + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block(&nakamoto_block.block_id()) + .unwrap_or(false) + { + debug!( + "{:?}: Aleady have Nakamoto block {}", + &self.local_peer, + &nakamoto_block.block_id() ); return false; - }; - let Some(reward_set) = rc_data.reward_set() else { - info!( - "{:?}: No reward set for reward cycle {}", - self.get_local_peer(), - sn_rc - ); + } + + let (sn_rc_opt, can_process) = + self.find_nakamoto_block_reward_cycle(sortdb, nakamoto_block); + let Some(sn_rc) = sn_rc_opt else { return false; }; - if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { - info!( - "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e - ); + if !self.check_nakamoto_block_signer_signature(sn_rc, nakamoto_block) { return false; } @@ -942,6 +973,8 @@ impl PeerNetwork { /// /// If `buffer` is false, then if the message handler deems the message valid, it will be /// forwraded to the relayer. + /// + /// Returns the messages to be forward to the relayer, keyed by sender. pub fn handle_unsolicited_messages( &mut self, sortdb: &SortitionDB, From 3df384ba8ced53c585e970b4cf885d3ebb85d0ad Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 11 Jun 2024 08:33:50 -0400 Subject: [PATCH 0298/1400] fix: keep `height_value >= current_tenure` check Change back to `height_value > current_tenure` after the current tenure support is fixed. --- clarity/src/vm/functions/database.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index 46e7585fbc..cea154aa12 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -1071,7 +1071,7 @@ pub fn special_get_tenure_info( }; let current_tenure = env.global_context.database.get_tenure_height()?; - if height_value > current_tenure { + if height_value >= current_tenure { return Ok(Value::none()); } From e777f4f159ae5d19beb4310051eb3dc2b85fbb10 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 11 Jun 2024 16:14:45 +0300 Subject: [PATCH 0299/1400] add is_fully_synced feature to v2/info --- stacks-signer/src/client/mod.rs | 1 + stackslib/src/net/api/getinfo.rs | 6 ++++++ stackslib/src/net/api/tests/mod.rs | 3 +++ stackslib/src/net/mod.rs | 3 +++ stackslib/src/net/p2p.rs | 2 +- 5 files changed, 14 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 26ce5f05f5..0bab72b287 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -382,6 +382,7 @@ pub(crate) mod tests { unanchored_tip: None, unanchored_seq: Some(0), exit_at_block_height: None, + is_fully_synced: false, genesis_chainstate_hash: Sha256Sum::zero(), node_public_key: Some(public_key_buf), node_public_key_hash: Some(public_key_hash), diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 38c802f966..344f9f8286 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -82,6 +82,7 @@ pub struct RPCPeerInfoData { pub unanchored_tip: Option, pub unanchored_seq: Option, pub exit_at_block_height: Option, + pub is_fully_synced: bool, #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub node_public_key: Option, @@ -105,6 +106,7 @@ impl RPCPeerInfoData { chainstate: &StacksChainState, exit_at_block_height: Option, genesis_chainstate_hash: &Sha256Sum, + ibd: bool, ) -> RPCPeerInfoData { let server_version = version_string( "stacks-node", @@ -130,6 +132,7 @@ impl RPCPeerInfoData { let public_key_buf = StacksPublicKeyBuffer::from_public_key(&public_key); let public_key_hash = Hash160::from_node_public_key(&public_key); let stackerdb_contract_ids = network.get_local_peer().stacker_dbs.clone(); + let is_fully_synced = !ibd; RPCPeerInfoData { peer_version: network.burnchain.peer_version, @@ -146,6 +149,7 @@ impl RPCPeerInfoData { unanchored_tip: unconfirmed_tip, unanchored_seq: unconfirmed_seq, exit_at_block_height: exit_at_block_height, + is_fully_synced, genesis_chainstate_hash: genesis_chainstate_hash.clone(), node_public_key: Some(public_key_buf), node_public_key_hash: Some(public_key_hash), @@ -212,6 +216,7 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let ibd = node.ibd; let rpc_peer_info = node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { RPCPeerInfoData::from_network( @@ -219,6 +224,7 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { chainstate, rpc_args.exit_at_block_height.clone(), &rpc_args.genesis_chainstate_hash, + ibd, ) }); let mut preamble = HttpResponsePreamble::ok_json(&preamble); diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 1ef9a4fe66..acf4d929de 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -954,6 +954,7 @@ impl<'a> TestRPC<'a> { &mut peer_1_stacks_node.chainstate, &mut peer_1_mempool, &rpc_args, + false, ); convo_1.chat(&mut node_state).unwrap(); } @@ -996,6 +997,7 @@ impl<'a> TestRPC<'a> { &mut peer_2_stacks_node.chainstate, &mut peer_2_mempool, &rpc_args, + false, ); convo_2.chat(&mut node_state).unwrap(); } @@ -1042,6 +1044,7 @@ impl<'a> TestRPC<'a> { &mut peer_1_stacks_node.chainstate, &mut peer_1_mempool, &rpc_args, + false, ); convo_1.chat(&mut node_state).unwrap(); } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index fd424f441e..c22ccce1fa 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -647,6 +647,7 @@ pub struct StacksNodeState<'a> { inner_mempool: Option<&'a mut MemPoolDB>, inner_rpc_args: Option<&'a RPCHandlerArgs<'a>>, relay_message: Option, + ibd: bool, } impl<'a> StacksNodeState<'a> { @@ -656,6 +657,7 @@ impl<'a> StacksNodeState<'a> { inner_chainstate: &'a mut StacksChainState, inner_mempool: &'a mut MemPoolDB, inner_rpc_args: &'a RPCHandlerArgs<'a>, + ibd: bool, ) -> StacksNodeState<'a> { StacksNodeState { inner_network: Some(inner_network), @@ -664,6 +666,7 @@ impl<'a> StacksNodeState<'a> { inner_mempool: Some(inner_mempool), inner_rpc_args: Some(inner_rpc_args), relay_message: None, + ibd, } } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 4e999e1abf..407f775847 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -6188,7 +6188,7 @@ impl PeerNetwork { PeerNetwork::with_network_state(self, |ref mut network, ref mut network_state| { let http_stacks_msgs = PeerNetwork::with_http(network, |ref mut net, ref mut http| { let mut node_state = - StacksNodeState::new(net, sortdb, chainstate, mempool, handler_args); + StacksNodeState::new(net, sortdb, chainstate, mempool, handler_args, ibd); http.run(network_state, &mut node_state, http_poll_state) }); network_result.consume_http_uploads(http_stacks_msgs); From 442c6fec44df3d73e065eb420813b15ea062ea64 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 17:22:07 -0400 Subject: [PATCH 0300/1400] chore: resource accounting for pushed nakamoto blocks --- stackslib/src/net/chat.rs | 124 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 3037ac60d1..b129fab50a 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -128,6 +128,8 @@ pub struct NeighborStats { pub transaction_push_rx_counts: VecDeque<(u64, u64)>, /// (timestamp, num bytes) pub stackerdb_push_rx_counts: VecDeque<(u64, u64)>, + /// (timestamp, num bytes) + pub nakamoto_block_push_rx_counts: VecDeque<(u64, u64)>, pub relayed_messages: HashMap, } @@ -152,6 +154,7 @@ impl NeighborStats { microblocks_push_rx_counts: VecDeque::new(), transaction_push_rx_counts: VecDeque::new(), stackerdb_push_rx_counts: VecDeque::new(), + nakamoto_block_push_rx_counts: VecDeque::new(), relayed_messages: HashMap::new(), } } @@ -214,6 +217,17 @@ impl NeighborStats { } } + /// Record that we recently received a Nakamoto blcok push of the given size. + /// Keeps track of the last `NUM_BANDWIDTH_POINTS` such events, so we can estimate the current + /// bandwidth consumed by Nakamoto block pushes + pub fn add_nakamoto_block_push(&mut self, message_size: u64) -> () { + self.nakamoto_block_push_rx_counts + .push_back((get_epoch_time_secs(), message_size)); + while self.nakamoto_block_push_rx_counts.len() > NUM_BANDWIDTH_POINTS { + self.nakamoto_block_push_rx_counts.pop_front(); + } + } + pub fn add_relayer(&mut self, addr: &NeighborAddress, num_bytes: u64) -> () { if let Some(stats) = self.relayed_messages.get_mut(addr) { stats.num_messages += 1; @@ -298,6 +312,14 @@ impl NeighborStats { NeighborStats::get_bandwidth(&self.stackerdb_push_rx_counts, BANDWIDTH_POINT_LIFETIME) } + /// Get a peer's total nakamoto block bandwidth usage + pub fn get_nakamoto_block_push_bandwidth(&self) -> f64 { + NeighborStats::get_bandwidth( + &self.nakamoto_block_push_rx_counts, + BANDWIDTH_POINT_LIFETIME, + ) + } + /// Determine how many of a particular message this peer has received pub fn get_message_recv_count(&self, msg_id: StacksMessageID) -> u64 { *(self.msg_rx_counts.get(&msg_id).unwrap_or(&0)) @@ -2217,6 +2239,45 @@ impl ConversationP2P { Ok(None) } + /// Validate a pushed Nakamoto block list. + /// Update bandwidth accounting, but forward the blocks along if we can accept them. + /// Possibly return a reply handle for a NACK if we throttle the remote sender + fn validate_nakamoto_block_push( + &mut self, + network: &PeerNetwork, + preamble: &Preamble, + relayers: Vec, + ) -> Result, net_error> { + assert!(preamble.payload_len > 1); // don't count 1-byte type prefix + + let local_peer = network.get_local_peer(); + let chain_view = network.get_chain_view(); + + if !self.process_relayers(local_peer, preamble, &relayers) { + warn!( + "Drop pushed Nakamoto blocks -- invalid relayers {:?}", + &relayers + ); + self.stats.msgs_err += 1; + return Err(net_error::InvalidMessage); + } + + self.stats + .add_nakamoto_block_push((preamble.payload_len as u64) - 1); + + if self.connection.options.max_nakamoto_block_push_bandwidth > 0 + && self.stats.get_nakamoto_block_push_bandwidth() + > (self.connection.options.max_nakamoto_block_push_bandwidth as f64) + { + debug!("Neighbor {:?} exceeded max Nakamoto block push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_nakamoto_block_push_bandwidth, self.stats.get_nakamoto_block_push_bandwidth()); + return self + .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) + .and_then(|handle| Ok(Some(handle))); + } + + Ok(None) + } + /// Handle an inbound authenticated p2p data-plane message. /// Return the message if not handled fn handle_data_message( @@ -2305,6 +2366,21 @@ impl ConversationP2P { } } } + StacksMessageType::NakamotoBlocks(_) => { + // not handled here, but do some accounting -- we can't receive too many + // Nakamoto blocks per second + match self.validate_nakamoto_block_push( + network, + &msg.preamble, + msg.relayers.clone(), + )? { + Some(handle) => Ok(handle), + None => { + // will forward upstream + return Ok(Some(msg)); + } + } + } _ => { // all else will forward upstream return Ok(Some(msg)); @@ -6603,6 +6679,54 @@ mod test { assert_eq!(bw_stats.get_stackerdb_push_bandwidth(), 110.0); } + #[test] + fn test_neighbor_stats_nakamoto_block_push_bandwidth() { + let mut stats = NeighborStats::new(false); + + assert_eq!(stats.get_nakamoto_block_push_bandwidth(), 0.0); + + stats.add_nakamoto_block_push(100); + assert_eq!(stats.get_nakamoto_block_push_bandwidth(), 0.0); + + // this should all happen in one second + let bw_stats = loop { + let mut bw_stats = stats.clone(); + let start = get_epoch_time_secs(); + + for _ in 0..(NUM_BANDWIDTH_POINTS - 1) { + bw_stats.add_nakamoto_block_push(100); + } + + let end = get_epoch_time_secs(); + if end == start { + break bw_stats; + } + }; + + assert_eq!( + bw_stats.get_nakamoto_block_push_bandwidth(), + (NUM_BANDWIDTH_POINTS as f64) * 100.0 + ); + + // space some out; make sure it takes 11 seconds + let bw_stats = loop { + let mut bw_stats = NeighborStats::new(false); + let start = get_epoch_time_secs(); + for _ in 0..11 { + bw_stats.add_nakamoto_block_push(100); + sleep_ms(1001); + } + + let end = get_epoch_time_secs(); + if end == start + 11 { + break bw_stats; + } + }; + + // 100 bytes/sec + assert_eq!(bw_stats.get_nakamoto_block_push_bandwidth(), 110.0); + } + #[test] fn test_sign_relay_forward_message() { let conn_opts = ConnectionOptions::default(); From c80b2c968075c05ad8c03edee97f30314c704323 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 17:22:23 -0400 Subject: [PATCH 0301/1400] chore: option for maximum nakamoto block push bandwidth --- stackslib/src/net/connection.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 06e3c54f85..6fca681a77 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -368,6 +368,7 @@ pub struct ConnectionOptions { pub max_microblocks_push_bandwidth: u64, pub max_transaction_push_bandwidth: u64, pub max_stackerdb_push_bandwidth: u64, + pub max_nakamoto_block_push_bandwidth: u64, pub max_sockets: usize, pub public_ip_address: Option<(PeerAddress, u16)>, pub public_ip_request_timeout: u64, @@ -498,6 +499,7 @@ impl std::default::Default for ConnectionOptions { max_microblocks_push_bandwidth: 0, // infinite upload bandwidth allowed max_transaction_push_bandwidth: 0, // infinite upload bandwidth allowed max_stackerdb_push_bandwidth: 0, // infinite upload bandwidth allowed + max_nakamoto_block_push_bandwidth: 0, // infinite upload bandwidth allowed max_sockets: 800, // maximum number of client sockets we'll ever register public_ip_address: None, // resolve it at runtime by default public_ip_request_timeout: 60, // how often we can attempt to look up our public IP address From 868be4c544337b15879e70089525e7a812d52273 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 17:22:39 -0400 Subject: [PATCH 0302/1400] chore: take &NakamotoBlock instead of NakamotoBlock --- .../src/chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 11 +++++------ stackslib/src/chainstate/nakamoto/tests/mod.rs | 6 +++--- stackslib/src/chainstate/nakamoto/tests/node.rs | 4 ++-- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 015409a74c..b1a79810ee 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -404,7 +404,7 @@ fn replay_reward_cycle( &sortdb, &mut sort_handle, &mut node.chainstate, - block.clone(), + &block, None, ) .unwrap_or(false); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 50015dace4..4310c01aa4 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1835,7 +1835,7 @@ impl NakamotoChainState { /// Insert a Nakamoto block into the staging blocks DB pub(crate) fn store_block( staging_db_tx: &NakamotoStagingBlocksTx, - block: NakamotoBlock, + block: &NakamotoBlock, burn_attachable: bool, ) -> Result<(), ChainstateError> { let block_id = block.block_id(); @@ -1894,7 +1894,7 @@ impl NakamotoChainState { /// Returns true if we stored the block; false if not. pub fn accept_block( config: &ChainstateConfig, - block: NakamotoBlock, + block: &NakamotoBlock, db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, @@ -1927,14 +1927,14 @@ impl NakamotoChainState { // it's okay if this fails because we might not have the parent block yet. It will be // checked on `::append_block()` - let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, &block)?; + let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, block)?; // this block must be consistent with its miner's leader-key and block-commit, and must // contain only transactions that are valid in this epoch. if let Err(e) = Self::validate_nakamoto_block_burnchain( db_handle, expected_burn_opt, - &block, + block, config.mainnet, config.chain_id, ) { @@ -1958,9 +1958,8 @@ impl NakamotoChainState { // same sortition history as `db_handle` (and thus it must be burn_attachable) let burn_attachable = true; - let _block_id = block.block_id(); Self::store_block(staging_db_tx, block, burn_attachable)?; - test_debug!("Stored Nakamoto block {}", &_block_id); + test_debug!("Stored Nakamoto block {}", &block.block_id()); Ok(true) } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 0a2441d388..3fd8b7744e 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1069,7 +1069,7 @@ pub fn test_load_store_update_nakamoto_blocks() { 300, ) .unwrap(); - NakamotoChainState::store_block(&staging_tx, nakamoto_block.clone(), false).unwrap(); + NakamotoChainState::store_block(&staging_tx, &nakamoto_block, false).unwrap(); // tenure has one block assert_eq!( @@ -1102,7 +1102,7 @@ pub fn test_load_store_update_nakamoto_blocks() { ) .unwrap(); - NakamotoChainState::store_block(&staging_tx, nakamoto_block_2.clone(), false).unwrap(); + NakamotoChainState::store_block(&staging_tx, &nakamoto_block_2, false).unwrap(); // tenure has two blocks assert_eq!( @@ -1123,7 +1123,7 @@ pub fn test_load_store_update_nakamoto_blocks() { ); // store, but do not process, a block - NakamotoChainState::store_block(&staging_tx, nakamoto_block_3.clone(), false).unwrap(); + NakamotoChainState::store_block(&staging_tx, &nakamoto_block_3, false).unwrap(); staging_tx.commit().unwrap(); tx.commit().unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 5736258b11..215eed3cbf 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -632,7 +632,7 @@ impl TestStacksNode { sortdb, &mut sort_handle, chainstate, - nakamoto_block.clone(), + &nakamoto_block, None, ) { Ok(accepted) => accepted, @@ -1159,7 +1159,7 @@ impl<'a> TestPeer<'a> { &sortdb, &mut sort_handle, &mut node.chainstate, - block, + &block, None, ) .unwrap(); From afdbaadb210d88279a3206b2eacca528c9d7317c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 17:23:02 -0400 Subject: [PATCH 0303/1400] chore: address PR feedback --- stackslib/src/net/p2p.rs | 12 +- stackslib/src/net/relay.rs | 187 ++++++++++++---------- stackslib/src/net/tests/mod.rs | 2 +- stackslib/src/net/tests/relay/epoch2x.rs | 3 +- stackslib/src/net/tests/relay/mod.rs | 3 +- stackslib/src/net/tests/relay/nakamoto.rs | 150 ++++++++++++++++- stackslib/src/net/unsolicited.rs | 131 +++++++++------ 7 files changed, 339 insertions(+), 149 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index e92200832c..d5988a27ac 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -209,6 +209,7 @@ pub enum MempoolSyncState { } pub type PeerMap = HashMap; +pub type PendingMessages = HashMap>; pub struct ConnectingPeer { socket: mio_net::TcpStream, @@ -412,7 +413,7 @@ pub struct PeerNetwork { /// Pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks, /// NakamotoBlocks) that we can't process yet, but might be able to process on a subsequent /// chain view update. - pub pending_messages: HashMap>, + pub pending_messages: PendingMessages, // fault injection -- force disconnects fault_last_disconnect: u64, @@ -574,7 +575,7 @@ impl PeerNetwork { antientropy_last_push_ts: 0, antientropy_start_reward_cycle: 0, - pending_messages: HashMap::new(), + pending_messages: PendingMessages::new(), fault_last_disconnect: 0, @@ -1408,11 +1409,10 @@ impl PeerNetwork { // send to each neighbor that needs one let mut all_neighbors = HashSet::new(); for nakamoto_block in data.blocks.iter() { - let mut neighbors = + let neighbors = self.sample_broadcast_peers(&relay_hints, nakamoto_block)?; - for nk in neighbors.drain(..) { - all_neighbors.insert(nk); - } + + all_neighbors.extend(neighbors); } Ok(all_neighbors.into_iter().collect()) } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 6aaff981b2..5183d8c794 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -479,7 +479,7 @@ impl RelayerStats { sampled += 1; // sample without replacement - norm -= rankings_vec[i].1; + norm = norm.saturating_sub(rankings_vec[i].1); rankings_vec[i].1 = 0; break; } @@ -492,6 +492,12 @@ impl RelayerStats { } } +/// Processed result of pushed Nakamoto blocks +pub struct AcceptedNakamotoBlocks { + pub relayers: Vec, + pub blocks: Vec, +} + impl Relayer { pub fn new( handle: NetworkHandle, @@ -644,8 +650,12 @@ impl Relayer { }; if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { - info!( - "Signature verification failure for Nakamoto block {}/{} in reward cycle {}: {:?}", &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), sn_rc, &e + warn!( + "Signature verification failure for Nakamoto block"; + "consensus_hash" => %nakamoto_block.header.consensus_hash, + "block_hash" => %nakamoto_block.header.block_hash(), + "reward_cycle" => sn_rc, + "error" => %e.to_string() ); return Err(net_error::InvalidMessage); } @@ -782,7 +792,7 @@ impl Relayer { sortdb: &SortitionDB, sort_handle: &mut SortitionHandleConn, chainstate: &mut StacksChainState, - block: NakamotoBlock, + block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, ) -> Result { debug!( @@ -796,7 +806,7 @@ impl Relayer { .nakamoto_blocks_db() .has_nakamoto_block(&block.header.block_id()) .map_err(|e| { - debug!( + warn!( "Failed to determine if we have Nakamoto block {}/{}: {:?}", &block.header.consensus_hash, &block.header.block_hash(), @@ -947,7 +957,7 @@ impl Relayer { sortdb, &mut sort_handle, chainstate, - block.clone(), + &block, coord_comms, ) { warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); @@ -1528,16 +1538,17 @@ impl Relayer { sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, coord_comms: Option<&CoordinatorChannels>, - ) -> Result<(Vec<(Vec, Vec)>, Vec), net_error> { - let mut new_blocks_and_relayers = vec![]; + ) -> Result<(Vec, Vec), net_error> { + let mut pushed_blocks = vec![]; let mut bad_neighbors = vec![]; let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; // process Nakamoto blocks pushed to us. // If a neighbor sends us an invalid Nakamoto block, then ban them. - for (neighbor_key, relayers_and_block_data) in network_result.pushed_nakamoto_blocks.iter() + for (neighbor_key, relayers_and_block_data) in + network_result.pushed_nakamoto_blocks.iter_mut() { - for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter() { + for (relayers, nakamoto_blocks_data) in relayers_and_block_data.iter_mut() { let mut accepted_blocks = vec![]; if let Err(e) = Relayer::validate_nakamoto_blocks_push( burnchain, @@ -1556,7 +1567,7 @@ impl Relayer { break; } - for nakamoto_block in nakamoto_blocks_data.blocks.iter() { + for nakamoto_block in nakamoto_blocks_data.blocks.drain(..) { let block_id = nakamoto_block.block_id(); debug!( "Received pushed Nakamoto block {} from {}", @@ -1568,7 +1579,7 @@ impl Relayer { sortdb, &mut sort_handle, chainstate, - nakamoto_block.clone(), + &nakamoto_block, coord_comms, ) { Ok(accepted) => { @@ -1577,9 +1588,9 @@ impl Relayer { "Accepted Nakamoto block {} ({}) from {}", &block_id, &nakamoto_block.header.consensus_hash, neighbor_key ); - accepted_blocks.push(nakamoto_block.clone()); + accepted_blocks.push(nakamoto_block); } else { - debug!( + warn!( "Rejected Nakamoto block {} ({}) from {}", &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, ); @@ -1600,12 +1611,15 @@ impl Relayer { } if accepted_blocks.len() > 0 { - new_blocks_and_relayers.push((relayers.clone(), accepted_blocks)); + pushed_blocks.push(AcceptedNakamotoBlocks { + relayers: relayers.clone(), + blocks: accepted_blocks, + }); } } } - Ok((new_blocks_and_relayers, bad_neighbors)) + Ok((pushed_blocks, bad_neighbors)) } /// Verify that a relayed transaction is not problematic. This is a static check -- we only @@ -1908,7 +1922,7 @@ impl Relayer { sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, coord_comms: Option<&CoordinatorChannels>, - ) -> Result<(Vec<(Vec, Vec)>, Vec), net_error> { + ) -> Result<(Vec, Vec), net_error> { // process downloaded Nakamoto blocks. // We treat them as singleton blocks fetched via zero relayers let nakamoto_blocks = @@ -1921,10 +1935,10 @@ impl Relayer { nakamoto_blocks.into_values(), coord_comms, ) { - Ok(accepted) => accepted - .into_iter() - .map(|block| (vec![], vec![block])) - .collect(), + Ok(accepted) => vec![AcceptedNakamotoBlocks { + relayers: vec![], + blocks: accepted, + }], Err(e) => { warn!("Failed to process downloaded Nakamoto blocks: {:?}", &e); vec![] @@ -2475,13 +2489,13 @@ impl Relayer { _local_peer: &LocalPeer, sortdb: &SortitionDB, chainstate: &StacksChainState, - nakamoto_blocks_and_relayers: Vec<(Vec, Vec)>, + accepted_blocks: Vec, force_send: bool, ) { debug!( "{:?}: relay {} sets of Nakamoto blocks", _local_peer, - nakamoto_blocks_and_relayers.len() + accepted_blocks.len() ); // the relay strategy is to only send blocks that are within @@ -2502,7 +2516,9 @@ impl Relayer { .map(|sn| sn.consensus_hash) .collect(); - for (relayers, blocks) in nakamoto_blocks_and_relayers.into_iter() { + for blocks_and_relayers in accepted_blocks.into_iter() { + let AcceptedNakamotoBlocks { relayers, blocks } = blocks_and_relayers; + let relay_blocks: Vec<_> = blocks .into_iter() .filter(|blk| { @@ -2561,10 +2577,10 @@ impl Relayer { /// Process epoch3 data /// Relay new nakamoto blocks if not in ibd - /// Returns number of new nakamoto blocks + /// Returns number of new nakamoto blocks, up to u64::MAX pub fn process_new_epoch3_blocks( &mut self, - _local_peer: &LocalPeer, + local_peer: &LocalPeer, network_result: &mut NetworkResult, burnchain: &Burnchain, sortdb: &mut SortitionDB, @@ -2572,43 +2588,38 @@ impl Relayer { ibd: bool, coord_comms: Option<&CoordinatorChannels>, ) -> u64 { - let mut num_new_nakamoto_blocks = 0; - match Self::process_new_nakamoto_blocks( + let (accepted_blocks, bad_neighbors) = match Self::process_new_nakamoto_blocks( network_result, burnchain, sortdb, chainstate, coord_comms, ) { - Ok((nakamoto_blocks_and_relayers, bad_neighbors)) => { - num_new_nakamoto_blocks = nakamoto_blocks_and_relayers - .iter() - .fold(0, |acc, (_relayers, blocks)| acc + blocks.len()) - as u64; - - // punish bad peers - if bad_neighbors.len() > 0 { - debug!("{:?}: Ban {} peers", &_local_peer, bad_neighbors.len()); - if let Err(e) = self.p2p.ban_peers(bad_neighbors) { - warn!("Failed to ban bad-block peers: {:?}", &e); - } - } - - // relay if not IBD - if !ibd && nakamoto_blocks_and_relayers.len() > 0 { - self.relay_epoch3_blocks( - _local_peer, - sortdb, - chainstate, - nakamoto_blocks_and_relayers, - false, - ); - } - } + Ok(x) => x, Err(e) => { warn!("Failed to process new Nakamoto blocks: {:?}", &e); + return 0; + } + }; + + let num_new_nakamoto_blocks = accepted_blocks + .iter() + .fold(0, |acc, accepted| acc + accepted.blocks.len()) + .try_into() + .unwrap_or(u64::MAX); // don't panic if we somehow receive more than u64::MAX blocks + + // punish bad peers + if bad_neighbors.len() > 0 { + debug!("{:?}: Ban {} peers", &local_peer, bad_neighbors.len()); + if let Err(e) = self.p2p.ban_peers(bad_neighbors) { + warn!("Failed to ban bad-block peers: {:?}", &e); } } + + // relay if not IBD + if !ibd && accepted_blocks.len() > 0 { + self.relay_epoch3_blocks(local_peer, sortdb, chainstate, accepted_blocks, false); + } num_new_nakamoto_blocks } @@ -2624,40 +2635,42 @@ impl Relayer { ibd: bool, event_observer: Option<&dyn RelayEventDispatcher>, ) -> Vec { - // process new transactions + if ibd { + // don't do anything + return vec![]; + } + + // only care about transaction forwarding if not IBD. + // store all transactions, and forward the novel ones to neighbors let mut mempool_txs_added = vec![]; - if !ibd { - // only care about transaction forwarding if not IBD. - // store all transactions, and forward the novel ones to neighbors - test_debug!( - "{:?}: Process {} transaction(s)", + test_debug!( + "{:?}: Process {} transaction(s)", + &_local_peer, + network_result.pushed_transactions.len() + ); + let new_txs = Relayer::process_transactions( + network_result, + sortdb, + chainstate, + mempool, + event_observer.map(|obs| obs.as_mempool_event_dispatcher()), + ) + .unwrap_or(vec![]); + + if new_txs.len() > 0 { + debug!( + "{:?}: Send {} transactions to neighbors", &_local_peer, - network_result.pushed_transactions.len() + new_txs.len() ); - let new_txs = Relayer::process_transactions( - network_result, - sortdb, - chainstate, - mempool, - event_observer.map(|obs| obs.as_mempool_event_dispatcher()), - ) - .unwrap_or(vec![]); - - if new_txs.len() > 0 { - debug!( - "{:?}: Send {} transactions to neighbors", - &_local_peer, - new_txs.len() - ); - } + } - for (relayers, tx) in new_txs.into_iter() { - debug!("{:?}: Broadcast tx {}", &_local_peer, &tx.txid()); - mempool_txs_added.push(tx.clone()); - let msg = StacksMessageType::Transaction(tx); - if let Err(e) = self.p2p.broadcast_message(relayers, msg) { - warn!("Failed to broadcast transaction: {:?}", &e); - } + for (relayers, tx) in new_txs.into_iter() { + debug!("{:?}: Broadcast tx {}", &_local_peer, &tx.txid()); + mempool_txs_added.push(tx.clone()); + let msg = StacksMessageType::Transaction(tx); + if let Err(e) = self.p2p.broadcast_message(relayers, msg) { + warn!("Failed to broadcast transaction: {:?}", &e); } } mempool_txs_added @@ -2675,7 +2688,7 @@ impl Relayer { /// turned into peer bans. pub fn process_network_result( &mut self, - _local_peer: &LocalPeer, + local_peer: &LocalPeer, network_result: &mut NetworkResult, burnchain: &Burnchain, sortdb: &mut SortitionDB, @@ -2688,7 +2701,7 @@ impl Relayer { // process epoch2 data let (num_new_blocks, num_new_confirmed_microblocks, num_new_unconfirmed_microblocks) = self .process_new_epoch2_blocks( - _local_peer, + local_peer, network_result, sortdb, chainstate, @@ -2698,7 +2711,7 @@ impl Relayer { // process epoch3 data let num_new_nakamoto_blocks = self.process_new_epoch3_blocks( - _local_peer, + local_peer, network_result, burnchain, sortdb, @@ -2709,7 +2722,7 @@ impl Relayer { // process transactions let mempool_txs_added = self.process_new_transactions( - _local_peer, + local_peer, network_result, sortdb, chainstate, diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 1cb28e76fb..bd2c674bf1 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -235,7 +235,7 @@ impl NakamotoBootPlan { &sortdb, &mut sort_handle, &mut node.chainstate, - block.clone(), + &block, None, ) .unwrap(); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 1a383f7f87..fe69b6895a 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs index 04e8e0fd4f..c408e9ee60 100644 --- a/stackslib/src/net/tests/relay/mod.rs +++ b/stackslib/src/net/tests/relay/mod.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 2d286e157f..dc0c144e5c 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -29,7 +28,7 @@ use rand::Rng; use stacks_common::address::AddressHashMode; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, StacksWorkScore, TrieHash}; use stacks_common::types::Address; -use stacks_common::util::hash::MerkleTree; +use stacks_common::util::hash::{MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::VRFProof; @@ -39,12 +38,15 @@ use crate::burnchains::tests::TestMiner; use crate::chainstate::burn::operations::BlockstackOperationType; use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::NakamotoBlockHeader; use crate::chainstate::stacks::boot::test::{ key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, }; use crate::chainstate::stacks::db::blocks::{MINIMUM_TX_FEE, MINIMUM_TX_FEE_RATE_PER_BYTE}; use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; -use crate::chainstate::stacks::test::codec_all_transactions; +use crate::chainstate::stacks::test::{ + codec_all_transactions, make_codec_test_block, make_codec_test_microblock, +}; use crate::chainstate::stacks::tests::{ make_coinbase, make_coinbase_with_nonce, make_smart_contract_with_version, make_user_stacks_transfer, TestStacksNode, @@ -60,7 +62,7 @@ use crate::net::download::*; use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; use crate::net::httpcore::StacksHttpMessage; use crate::net::inv::inv2x::*; -use crate::net::relay::{ProcessedNetReceipts, Relayer}; +use crate::net::relay::{AcceptedNakamotoBlocks, ProcessedNetReceipts, Relayer}; use crate::net::test::*; use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; @@ -301,7 +303,10 @@ impl SeedNode { &local_peer, &sortdb, &stacks_node.chainstate, - vec![(vec![], blocks.clone())], + vec![AcceptedNakamotoBlocks { + relayers: vec![], + blocks: blocks.clone(), + }], true, ); @@ -374,6 +379,139 @@ impl SeedNode { } } +/// Test buffering limits +#[test] +fn test_buffer_data_message() { + let observer = TestEventObserver::new(); + let bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let (mut peer, _followers) = + make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 5, bitvecs.clone(), 1); + + let nakamoto_block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: ConsensusHash([0x55; 20]), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x05; 32]), + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + signer_bitvec: BitVec::zeros(1).unwrap(), + }, + txs: vec![], + }; + + let blocks_available = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::BlocksAvailable(BlocksAvailableData { + available: vec![ + (ConsensusHash([0x11; 20]), BurnchainHeaderHash([0x22; 32])), + (ConsensusHash([0x33; 20]), BurnchainHeaderHash([0x44; 32])), + ], + }), + ); + + let microblocks_available = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::MicroblocksAvailable(BlocksAvailableData { + available: vec![ + (ConsensusHash([0x11; 20]), BurnchainHeaderHash([0x22; 32])), + (ConsensusHash([0x33; 20]), BurnchainHeaderHash([0x44; 32])), + ], + }), + ); + + let block = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::Blocks(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x11; 20]), + make_codec_test_block(10, StacksEpochId::Epoch25), + )], + }), + ); + let microblocks = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::Microblocks(MicroblocksData { + index_anchor_block: StacksBlockId([0x55; 32]), + microblocks: vec![make_codec_test_microblock(10)], + }), + ); + let nakamoto_block = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: vec![nakamoto_block], + }), + ); + + for _ in 0..peer.network.connection_opts.max_buffered_blocks_available { + assert!(peer + .network + .buffer_data_message(0, blocks_available.clone())); + } + assert!(!peer + .network + .buffer_data_message(0, blocks_available.clone())); + + for _ in 0..peer + .network + .connection_opts + .max_buffered_microblocks_available + { + assert!(peer + .network + .buffer_data_message(0, microblocks_available.clone())); + } + assert!(!peer + .network + .buffer_data_message(0, microblocks_available.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_blocks { + assert!(peer.network.buffer_data_message(0, block.clone())); + } + assert!(!peer.network.buffer_data_message(0, block.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_microblocks { + assert!(peer.network.buffer_data_message(0, microblocks.clone())); + } + assert!(!peer.network.buffer_data_message(0, microblocks.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_nakamoto_blocks { + assert!(peer.network.buffer_data_message(0, nakamoto_block.clone())); + } + assert!(!peer.network.buffer_data_message(0, nakamoto_block.clone())); +} + /// Verify that Nakmaoto blocks whose sortitions are known will *not* be buffered, but instead /// forwarded to the relayer for processing. #[test] diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 8bed8e5312..88f6b5efc3 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -160,18 +160,15 @@ impl PeerNetwork { Ok(Some(block_sortition_height)) } - /// Buffer a message for re-processing once the burnchain view updates. - /// If there is no space for the message, then silently drop it. - fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) { - let Some(msgs) = self.pending_messages.get_mut(&event_id) else { - self.pending_messages.insert(event_id, vec![msg]); - debug!( - "{:?}: Event {} has 1 messages buffered", - &self.local_peer, event_id - ); - return; - }; - + /// Determine whether or not the system can buffer up this message, based on site-local + /// configuration options. + /// Return true if so, false if not + pub(crate) fn can_buffer_data_message( + &self, + event_id: usize, + msgs: &[StacksMessage], + msg: &StacksMessage, + ) -> bool { // check limits against connection opts, and if the limit is not met, then buffer up the // message. let mut blocks_available = 0; @@ -183,67 +180,103 @@ impl PeerNetwork { match &stored_msg.payload { StacksMessageType::BlocksAvailable(_) => { blocks_available += 1; - if blocks_available >= self.connection_opts.max_buffered_blocks_available { + if matches!(&msg.payload, StacksMessageType::BlocksAvailable(..)) + && blocks_available >= self.connection_opts.max_buffered_blocks_available + { debug!( - "{:?}: Drop BlocksAvailable from event {} -- already have {} buffered", + "{:?}: Cannot buffer BlocksAvailable from event {} -- already have {} buffered", &self.local_peer, event_id, blocks_available ); - return; + return false; } } StacksMessageType::MicroblocksAvailable(_) => { microblocks_available += 1; - if microblocks_available - >= self.connection_opts.max_buffered_microblocks_available + if matches!(&msg.payload, StacksMessageType::MicroblocksAvailable(..)) + && microblocks_available + >= self.connection_opts.max_buffered_microblocks_available { debug!( - "{:?}: Drop MicroblocksAvailable from event {} -- already have {} buffered", + "{:?}: Cannot buffer MicroblocksAvailable from event {} -- already have {} buffered", &self.local_peer, event_id, microblocks_available ); - return; + return false; } } StacksMessageType::Blocks(_) => { blocks_data += 1; - if blocks_data >= self.connection_opts.max_buffered_blocks { + if matches!(&msg.payload, StacksMessageType::Blocks(..)) + && blocks_data >= self.connection_opts.max_buffered_blocks + { debug!( - "{:?}: Drop BlocksData from event {} -- already have {} buffered", + "{:?}: Cannot buffer BlocksData from event {} -- already have {} buffered", &self.local_peer, event_id, blocks_data ); - return; + return false; } } StacksMessageType::Microblocks(_) => { microblocks_data += 1; - if microblocks_data >= self.connection_opts.max_buffered_microblocks { + if matches!(&msg.payload, StacksMessageType::Microblocks(..)) + && microblocks_data >= self.connection_opts.max_buffered_microblocks + { debug!( - "{:?}: Drop MicroblocksData from event {} -- already have {} buffered", + "{:?}: Cannot buffer MicroblocksData from event {} -- already have {} buffered", &self.local_peer, event_id, microblocks_data ); - return; + return false; } } StacksMessageType::NakamotoBlocks(_) => { nakamoto_blocks_data += 1; - if nakamoto_blocks_data >= self.connection_opts.max_buffered_nakamoto_blocks { + if matches!(&msg.payload, StacksMessageType::NakamotoBlocks(..)) + && nakamoto_blocks_data >= self.connection_opts.max_buffered_nakamoto_blocks + { debug!( - "{:?}: Drop NakamotoBlocksData from event {} -- already have {} buffered", + "{:?}: Cannot buffer NakamotoBlocksData from event {} -- already have {} buffered", &self.local_peer, event_id, nakamoto_blocks_data ); - return; + return false; } } _ => {} } } - msgs.push(msg); - debug!( - "{:?}: Event {} has {} messages buffered", - &self.local_peer, - event_id, - msgs.len() - ); + true + } + + /// Buffer a message for re-processing once the burnchain view updates. + /// If there is no space for the message, then silently drop it. + /// Returns true if buffered. + /// Returns false if not. + pub(crate) fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) -> bool { + let Some(msgs) = self.pending_messages.get(&event_id) else { + self.pending_messages.insert(event_id, vec![msg]); + debug!( + "{:?}: Event {} has 1 messages buffered", + &self.local_peer, event_id + ); + return true; + }; + + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + if !self.can_buffer_data_message(event_id, msgs, &msg) { + return false; + } + + if let Some(msgs) = self.pending_messages.get_mut(&event_id) { + // should always be reachable + msgs.push(msg); + debug!( + "{:?}: Event {} has {} messages buffered", + &self.local_peer, + event_id, + msgs.len() + ); + } + true } /// Do we need a block or microblock stream, given its sortition's consensus hash? @@ -712,12 +745,11 @@ impl PeerNetwork { sortdb: &SortitionDB, nakamoto_block: &NakamotoBlock, ) -> (Option, bool) { - let mut can_process = true; - let sn = match SortitionDB::get_block_snapshot_consensus( + let (reward_set_sn, can_process) = match SortitionDB::get_block_snapshot_consensus( &sortdb.conn(), &nakamoto_block.header.consensus_hash, ) { - Ok(Some(sn)) => sn, + Ok(Some(sn)) => (sn, true), Ok(None) => { debug!( "No sortition {} for block {}", @@ -726,9 +758,8 @@ impl PeerNetwork { ); // we don't have the sortition for this, so we can't process it yet (i.e. we need // to buffer) - can_process = false; // load the tip so we can load the current reward set data - self.burnchain_tip.clone() + (self.burnchain_tip.clone(), false) } Err(e) => { info!( @@ -741,7 +772,7 @@ impl PeerNetwork { } }; - if !sn.pox_valid { + if !reward_set_sn.pox_valid { info!( "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", self.get_local_peer(), @@ -750,12 +781,12 @@ impl PeerNetwork { return (None, false); } - let sn_rc = self + let reward_set_sn_rc = self .burnchain - .pox_reward_cycle(sn.block_height) + .pox_reward_cycle(reward_set_sn.block_height) .expect("FATAL: sortition has no reward cycle"); - return (Some(sn_rc), can_process); + return (Some(reward_set_sn_rc), can_process); } /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially @@ -1007,7 +1038,6 @@ impl PeerNetwork { continue; } }; - let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { convo.to_neighbor_key() } else { @@ -1023,6 +1053,17 @@ impl PeerNetwork { debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); for message in messages.into_iter() { + if buffer + && !self.can_buffer_data_message( + event_id, + self.pending_messages.get(&event_id).unwrap_or(&vec![]), + &message, + ) + { + // asked to buffer, but we don't have space + continue; + } + if !buffer { debug!( "{:?}: Re-try handling buffered message {} from {:?}", From 93b53dc3949d240304abd348ff53e7bf66805364 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 11 Jun 2024 22:08:15 -0400 Subject: [PATCH 0304/1400] fix: build error --- testnet/stacks-node/src/nakamoto_node/miner.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 9d20faaead..ffffb60f77 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -308,19 +308,6 @@ impl BlockMinerThread { )) })?; - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Building on a burn block that is before the first burn block".into(), - ) - })?; - let reward_info = match load_nakamoto_reward_set( self.burnchain .pox_reward_cycle(tip.block_height.saturating_add(1)) @@ -633,7 +620,7 @@ impl BlockMinerThread { let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, - block, + &block, &mut sortition_handle, &staging_tx, headers_conn, From c543231b87158a1f9079caf632220b4609b72bed Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 12 Jun 2024 09:37:50 -0500 Subject: [PATCH 0305/1400] chore: add backtrace to db busy handler --- stackslib/src/util_lib/db.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index aa947046f4..22509a8ac4 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::backtrace::Backtrace; use std::io::Error as IOError; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; @@ -684,7 +685,14 @@ pub fn tx_busy_handler(run_count: i32) -> bool { debug!( "Database is locked; sleeping {}ms and trying again", - &sleep_count + &sleep_count; + "backtrace" => ?{ + if run_count > 10 && run_count % 10 == 0 { + Some(Backtrace::capture()) + } else { + None + } + }, ); sleep_ms(sleep_count); From 04a9f8d9509a43222f3f162a01f2603c5247d63f Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 12 Jun 2024 18:42:16 +0300 Subject: [PATCH 0306/1400] skip timeout network mutants stackslib --- stackslib/src/net/codec.rs | 1 + stackslib/src/net/p2p.rs | 1 + stackslib/src/net/relay.rs | 8 ++++++++ stackslib/src/net/unsolicited.rs | 5 +++++ 4 files changed, 15 insertions(+) diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index c115a50d82..bd8154e414 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -355,6 +355,7 @@ impl NakamotoInvData { } impl StacksMessageCodec for NakamotoBlocksData { + #[cfg_attr(test, mutants::skip)] fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.blocks)?; Ok(()) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index d5988a27ac..9aaffcb8de 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1348,6 +1348,7 @@ impl PeerNetwork { Ok(ret) } + #[cfg_attr(test, mutants::skip)] /// Dispatch a single request from another thread. pub fn dispatch_request(&mut self, request: NetworkRequest) -> Result<(), net_error> { match request { diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 5183d8c794..6b34914bbb 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -937,6 +937,7 @@ impl Relayer { Ok(accepted) } + #[cfg_attr(test, mutants::skip)] /// Process nakamoto blocks that we downloaded. /// Log errors but do not return them. /// Returns the list of blocks we accepted. @@ -1529,6 +1530,7 @@ impl Relayer { Ok((mblock_datas, bad_neighbors)) } + #[cfg_attr(test, mutants::skip)] /// Preprocess all pushed Nakamoto blocks /// Return the Nakamoto blocks we can accept (and who relayed them), as well as the /// list of peers that served us invalid data. @@ -1913,6 +1915,7 @@ impl Relayer { )) } + #[cfg_attr(test, mutants::skip)] /// Process new Nakamoto blocks, both pushed and downloaded. /// Returns the list of Nakamoto blocks we stored, as well as the list of bad neighbors that /// sent us invalid blocks. @@ -2401,6 +2404,7 @@ impl Relayer { } } + #[cfg_attr(test, mutants::skip)] /// Process epoch2 block data. /// Relays blocks and microblocks as needed /// Returns (num new blocks, num new confirmed microblocks, num new unconfirmed microblocks) @@ -2460,6 +2464,7 @@ impl Relayer { ) } + #[cfg_attr(test, mutants::skip)] /// Get the last N sortitions, in order from the sortition tip to the n-1st ancestor pub fn get_last_n_sortitions( sortdb: &SortitionDB, @@ -2481,6 +2486,7 @@ impl Relayer { Ok(ret) } + #[cfg_attr(test, mutants::skip)] /// Relay Nakamoto blocks. /// By default, only sends them if we don't have them yet. /// This can be overridden by setting `force_send` to true. @@ -2575,6 +2581,7 @@ impl Relayer { } } + #[cfg_attr(test, mutants::skip)] /// Process epoch3 data /// Relay new nakamoto blocks if not in ibd /// Returns number of new nakamoto blocks, up to u64::MAX @@ -2623,6 +2630,7 @@ impl Relayer { num_new_nakamoto_blocks } + #[cfg_attr(test, mutants::skip)] /// Process new transactions /// Returns the list of accepted txs pub fn process_new_transactions( diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 88f6b5efc3..e444a4f633 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -50,6 +50,7 @@ use crate::net::{ /// synchronization state machine. impl PeerNetwork { + #[cfg_attr(test, mutants::skip)] /// Check that the sender is authenticated. /// Returns Some(remote sender address) if so /// Returns None otherwise @@ -160,6 +161,7 @@ impl PeerNetwork { Ok(Some(block_sortition_height)) } + #[cfg_attr(test, mutants::skip)] /// Determine whether or not the system can buffer up this message, based on site-local /// configuration options. /// Return true if so, false if not @@ -246,6 +248,7 @@ impl PeerNetwork { true } + #[cfg_attr(test, mutants::skip)] /// Buffer a message for re-processing once the burnchain view updates. /// If there is no space for the message, then silently drop it. /// Returns true if buffered. @@ -736,6 +739,7 @@ impl PeerNetwork { true } + #[cfg_attr(test, mutants::skip)] /// Find the reward cycle in which to validate the signature for this block. /// This may not actually correspond to the sortition for this block's tenure -- for example, /// it may be for a block whose sortition is about to be processed. As such, return both the @@ -824,6 +828,7 @@ impl PeerNetwork { !can_process } + #[cfg_attr(test, mutants::skip)] /// Handle an unsolicited NakamotoBlocksData message. /// /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place. From ae38a8622c46b914e483dff1834691b98dc8c022 Mon Sep 17 00:00:00 2001 From: ASuciuX <151519329+ASuciuX@users.noreply.github.com> Date: Wed, 12 Jun 2024 18:51:05 +0300 Subject: [PATCH 0307/1400] Add doc comment for ibd Co-authored-by: Jeff Bencin --- stackslib/src/net/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index c22ccce1fa..acda0cfc43 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -647,6 +647,7 @@ pub struct StacksNodeState<'a> { inner_mempool: Option<&'a mut MemPoolDB>, inner_rpc_args: Option<&'a RPCHandlerArgs<'a>>, relay_message: Option, + /// Are we in Initial Block Download (IBD) phase? ibd: bool, } From 5d56dd18b5891e6ae0fc640e21ce63cc0e15f5b9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 12 Jun 2024 12:57:11 -0400 Subject: [PATCH 0308/1400] feat: pass stacks block height to `get-tenure-info?` --- clarity/src/vm/docs/mod.rs | 10 +++++----- clarity/src/vm/functions/database.rs | 26 +++++++++++++------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index eb08b856d9..7bd2592fff 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1840,12 +1840,12 @@ the mining of this block started, but is not guaranteed to be accurate. This tim const GET_TENURE_INFO_API: SpecialAPI = SpecialAPI { input_type: "TenureInfoPropertyName, uint", - snippet: "get-tenure-info? ${1:prop} ${2:tenure-height}", + snippet: "get-tenure-info? ${1:prop} ${2:block-height}", output_type: "(optional buff) | (optional uint)", - signature: "(get-tenure-info? prop-name tenure-height)", - description: "The `get-tenure-info?` function fetches data for a tenure at the given height. The -value and type returned are determined by the specified `TenureInfoPropertyName`. If the provided `tenure-height` does -not correspond to an existing tenure prior to the current block, the function returns `none`. The currently available property names + signature: "(get-tenure-info? prop-name block-height)", + description: "The `get-tenure-info?` function fetches data for the tenure at the given block height. The +value and type returned are determined by the specified `TenureInfoPropertyName`. If the provided `block-height` does +not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names are as follows: - `burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index cea154aa12..2c38bcad42 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -742,7 +742,7 @@ pub fn special_get_block_info( env: &mut Environment, context: &LocalContext, ) -> Result { - // (get-block-info? property-name block-height-int) + // (get-block-info? property-name block-height-uint) runtime_cost(ClarityCostFunction::BlockInfo, env, 0)?; check_argument_count(2, args)?; @@ -1025,15 +1025,15 @@ pub fn special_get_stacks_block_info( } /// Handles the function `get-tenure-info?` special function. -/// Interprets `args` as variables `[property-name, tenure-height]`, and returns +/// Interprets `args` as variables `[property-name, block-height]`, and returns /// a property value determined by `property-name`: -/// - `time` returns the burn block time of the tenure at `tenure-height` -/// - `vrf-seed` returns the VRF seed of the tenure at `tenure-height` -/// - `burnchain-header-hash` returns header hash of the burnchain block corresponding to the tenure at `tenure-height` -/// - `miner-address` returns the address of the principal that mined the tenure at `tenure-height` -/// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the tenure at `tenure-height` -/// - `miner-spend-total` returns the total number of satoshis spent by all miners for the tenure at `tenure-height` -/// - `block-reward` returns the block reward for the tenure at `tenure-height` +/// - `time` returns the burn block time for the tenure of which `block-height` is a part +/// - `vrf-seed` returns the VRF seed for the tenure of which `block-height` is a part +/// - `burnchain-header-hash` returns header hash of the burnchain block corresponding to the tenure of which `block-height` is a part +/// - `miner-address` returns the address of the principal that mined the tenure of which `block-height` is a part +/// - `miner-spend-winner` returns the number of satoshis spent by the winning miner for the tenure of which `block-height` is a part +/// - `miner-spend-total` returns the total number of satoshis spent by all miners for the tenure of which `block-height` is a part +/// - `block-reward` returns the block reward for the tenure of which `block-height` is a part /// /// # Errors: /// - CheckErrors::IncorrectArgumentCount if there aren't 2 arguments. @@ -1045,7 +1045,7 @@ pub fn special_get_tenure_info( env: &mut Environment, context: &LocalContext, ) -> Result { - // (get-block-info? property-name block-height-int) + // (get-tenure-info? property-name block-height-uint) runtime_cost(ClarityCostFunction::BlockInfo, env, 0)?; check_argument_count(2, args)?; @@ -1058,7 +1058,7 @@ pub fn special_get_tenure_info( let block_info_prop = TenureInfoProperty::lookup_by_name(property_name) .ok_or(CheckErrors::GetTenureInfoExpectPropertyName)?; - // Handle the tenure-height input arg. + // Handle the block-height input arg. let height_eval = eval(&args[1], env, context)?; let height_value = match height_eval { Value::UInt(result) => Ok(result), @@ -1070,8 +1070,8 @@ pub fn special_get_tenure_info( _ => return Ok(Value::none()), }; - let current_tenure = env.global_context.database.get_tenure_height()?; - if height_value >= current_tenure { + let current_height = env.global_context.database.get_current_block_height(); + if height_value >= current_height { return Ok(Value::none()); } From 41a723c563fcacdd6cbdf7bc2af3acc218767614 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 12 Jun 2024 12:02:26 -0500 Subject: [PATCH 0309/1400] refactor: use net::tests::nakamoto boot functions --- .../chainstate/nakamoto/coordinator/tests.rs | 191 ++---------------- stackslib/src/net/tests/mod.rs | 97 +++++---- 2 files changed, 71 insertions(+), 217 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index bd4d4f664c..a0987b698c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -36,6 +36,7 @@ use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; +use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; @@ -65,6 +66,7 @@ use crate::core::StacksEpochExtension; use crate::net::relay::Relayer; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; +use crate::net::tests::NakamotoBootPlan; use crate::util_lib::boot::boot_code_id; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; @@ -140,102 +142,6 @@ fn advance_to_nakamoto( // peer is at the start of cycle 8 } -/// Bring a TestPeer into the Nakamoto Epoch -fn advance_to_nakamoto_long( - peer: &mut TestPeer, - test_signers: &mut TestSigners, - test_stackers: &[TestStacker], -) { - let mut peer_nonce = 0; - let private_key = peer.config.private_key.clone(); - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&private_key)], - ) - .unwrap(); - - let mut stacked_pox_4 = false; - let mut signer_voted = false; - let nakamoto_height = peer - .config - .epochs - .as_ref() - .unwrap() - .iter() - .find(|e| e.epoch_id == StacksEpochId::Epoch30) - .unwrap() - .start_height; - let mut tip = None; - loop { - let current_burn_height = peer.get_burn_block_height(); - if current_burn_height >= nakamoto_height - 1 { - info!("Booted to nakamoto"); - break; - } - let txs = if tip.is_none() { - // don't mine stack-stx txs in first block, because they cannot pass the burn block height - // validation - vec![] - } else if !stacked_pox_4 { - // Make all the test Stackers stack - stacked_pox_4 = true; - test_stackers - .iter() - .map(|test_stacker| { - let pox_addr = test_stacker.pox_address.clone().unwrap_or_else(|| { - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()) - }); - let reward_cycle = peer - .config - .burnchain - .block_height_to_reward_cycle(current_burn_height) - .unwrap(); - let signature = make_signer_key_signature( - &pox_addr, - &test_stacker.signer_private_key, - reward_cycle.into(), - &Pox4SignatureTopic::StackStx, - 12_u128, - u128::MAX, - 1, - ); - let signing_key = - StacksPublicKey::from_private(&test_stacker.signer_private_key); - make_pox_4_lockup( - &test_stacker.stacker_private_key, - 0, - test_stacker.amount, - &pox_addr, - 12, - &signing_key, - current_burn_height + 2, - Some(signature), - u128::MAX, - 1, - ) - }) - .collect() - } else if !signer_voted { - signer_voted = true; - with_sortdb(peer, |chainstate, sortdb| { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.unwrap(), - test_signers, - test_stackers, - 7, - ) - }) - } else { - vec![] - }; - tip = Some(peer.tenure_with_txs(&txs, &mut peer_nonce)); - } -} - pub fn make_all_signers_vote_for_aggregate_key( chainstate: &mut StacksChainState, sortdb: &SortitionDB, @@ -392,76 +298,6 @@ pub fn boot_nakamoto<'a>( peer } -/// Make a peer and transition it into the Nakamoto epoch. -/// The node needs to be stacking otherwise, Nakamoto can't activate. -pub fn boot_nakamoto_long_reward_sets<'a>( - test_name: &str, - mut initial_balances: Vec<(PrincipalData, u64)>, - test_signers: &mut TestSigners, - test_stackers: &[TestStacker], - observer: Option<&'a TestEventObserver>, -) -> TestPeer<'a> { - let aggregate_public_key = test_signers.aggregate_public_key.clone(); - let mut peer_config = TestPeerConfig::new(test_name, 0, 0); - let private_key = peer_config.private_key.clone(); - let addr = StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(&private_key)], - ) - .unwrap(); - - // reward cycles are 5 blocks long - // first 25 blocks are boot-up - // reward cycle 6 instantiates pox-3 - // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); - peer_config - .stacker_dbs - .push(boot_code_id(MINERS_NAME, false)); - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; - - // Create some balances for test Stackers - let mut stacker_balances = test_stackers - .iter() - .map(|test_stacker| { - ( - PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), - u64::try_from(test_stacker.amount + 10000).expect("Stacking amount too large"), - ) - }) - .collect(); - - // Create some balances for test Signers - let mut signer_balances = test_stackers - .iter() - .map(|stacker| { - ( - PrincipalData::from(p2pkh_from(&stacker.signer_private_key)), - 1000, - ) - }) - .collect(); - - peer_config.initial_balances.append(&mut stacker_balances); - peer_config.initial_balances.append(&mut signer_balances); - peer_config.initial_balances.append(&mut initial_balances); - peer_config.burnchain.pox_constants.reward_cycle_length = 10; - peer_config.burnchain.pox_constants.v2_unlock_height = 21; - peer_config.burnchain.pox_constants.pox_3_activation_height = 26; - peer_config.burnchain.pox_constants.v3_unlock_height = 27; - peer_config.burnchain.pox_constants.pox_4_activation_height = 28; - peer_config.test_stackers = Some(test_stackers.to_vec()); - peer_config.test_signers = Some(test_signers.clone()); - let mut peer = TestPeer::new_with_observer(peer_config, observer); - - advance_to_nakamoto_long(&mut peer, test_signers, test_stackers); - - peer -} - /// Make a replay peer, used for replaying the blockchain pub fn make_replay_peer<'a>(peer: &mut TestPeer<'a>) -> TestPeer<'a> { let mut replay_config = peer.config.clone(); @@ -887,14 +723,21 @@ fn pox_treatment() { )), }) .collect::>(); - let mut test_signers = TestSigners::new(vec![signing_key]); - let mut peer = boot_nakamoto_long_reward_sets( - function_name!(), - vec![(addr.into(), 100_000_000)], - &mut test_signers, - &test_stackers, - None, - ); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.pox_constants = pox_constants; + + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); let mut blocks = vec![]; let pox_constants = peer.sortdb().pox_constants.clone(); let first_burn_height = peer.sortdb().first_block_height; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 9e225d8f0d..9df1bcf564 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -28,7 +28,7 @@ use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_H use stacks_common::types::chainstate::{ StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; -use stacks_common::types::Address; +use stacks_common::types::{Address, StacksEpochId}; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; @@ -338,27 +338,35 @@ impl NakamotoBootPlan { let mut other_peer_nonces = vec![0; other_peers.len()]; let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); - let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - tip - }; - + let mut sortition_height = peer.get_burn_block_height(); debug!("\n\n======================"); debug!("PoxConstants = {:#?}", &peer.config.burnchain.pox_constants); - debug!("tip = {}", tip.block_height); + debug!("tip = {}", sortition_height); debug!("========================\n\n"); - // advance to just past pox-3 unlock - let mut sortition_height = tip.block_height; - while sortition_height - <= peer - .config - .burnchain - .pox_constants - .pox_4_activation_height - .into() - { + let epoch_25_height = peer + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height; + + let epoch_30_height = peer + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch30) + .unwrap() + .start_height; + + // advance to just past pox-4 instantiation + let mut blocks_produced = false; + while sortition_height <= epoch_25_height { peer.tenure_with_txs(&vec![], &mut peer_nonce); for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) @@ -366,12 +374,23 @@ impl NakamotoBootPlan { other_peer.tenure_with_txs(&vec![], other_peer_nonce); } - let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - tip - }; - sortition_height = tip.block_height; + sortition_height = peer.get_burn_block_height(); + blocks_produced = true; + } + + // need to produce at least 1 block before making pox-4 lockups: + // the way `burn-block-height` constant works in Epoch 2.5 is such + // that if its the first block produced, this will be 0 which will + // prevent the lockups from being valid. + if !blocks_produced { + peer.tenure_with_txs(&vec![], &mut peer_nonce); + for (other_peer, other_peer_nonce) in + other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) + { + other_peer.tenure_with_txs(&vec![], other_peer_nonce); + } + + sortition_height = peer.get_burn_block_height(); } debug!("\n\n======================"); @@ -392,8 +411,9 @@ impl NakamotoBootPlan { .unwrap_or(vec![]) .iter() .map(|test_stacker| { - let pox_addr = - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); + let pox_addr = test_stacker.pox_address.clone().unwrap_or_else(|| { + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()) + }); let signature = make_signer_key_signature( &pox_addr, &test_stacker.signer_private_key, @@ -410,7 +430,7 @@ impl NakamotoBootPlan { &pox_addr, 12, &StacksPublicKey::from_private(&test_stacker.signer_private_key), - 34, + sortition_height + 1, Some(signature), u128::MAX, 1, @@ -440,12 +460,7 @@ impl NakamotoBootPlan { .for_each(|(peer, nonce)| { peer.tenure_with_txs(&[], nonce); }); - let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - tip - }; - sortition_height = tip.block_height; + sortition_height = peer.get_burn_block_height(); } debug!("\n\n======================"); @@ -481,21 +496,14 @@ impl NakamotoBootPlan { debug!("========================\n\n"); // advance to the start of epoch 3.0 - while sortition_height - < Self::nakamoto_start_burn_height(&peer.config.burnchain.pox_constants) - { + while sortition_height < epoch_30_height - 1 { peer.tenure_with_txs(&vec![], &mut peer_nonce); for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { other_peer.tenure_with_txs(&vec![], other_peer_nonce); } - let tip = { - let sort_db = peer.sortdb.as_mut().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - tip - }; - sortition_height = tip.block_height; + sortition_height = peer.get_burn_block_height(); } debug!("\n\n======================"); @@ -514,12 +522,15 @@ impl NakamotoBootPlan { let (mut peer, mut other_peers) = self.boot_nakamoto(test_signers.aggregate_public_key.clone(), observer); + if boot_plan.is_empty() { + debug!("No boot plan steps supplied -- returning once nakamoto epoch has been reached"); + return (peer, other_peers); + } let mut all_blocks = vec![]; let mut consensus_hashes = vec![]; let mut last_tenure_change: Option = None; let mut blocks_since_last_tenure = 0; - let stx_miner_key = peer.miner.nakamoto_miner_key(); debug!("\n\nProcess plan with {} steps", boot_plan.len()); From 232274d9f2da1bf023f2de934fc39c9396a5f4f8 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 12 Jun 2024 13:20:42 -0500 Subject: [PATCH 0310/1400] fix stacks-signer unit tests --- stacks-signer/src/tests/chainstate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 35b6f71a6f..e496248f7f 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -95,7 +95,7 @@ fn setup_test_environment( state_index_root: TrieHash([0; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - pox_treatment: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::ones(1).unwrap(), }, txs: vec![], }; From 7b4578b1427972a81936862a33e9c76c6f7272e2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 12 Jun 2024 20:37:01 +0000 Subject: [PATCH 0311/1400] fix: typo Co-authored-by: Brice Dobry --- stackslib/src/net/tests/relay/nakamoto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index dc0c144e5c..7fe68b8975 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -70,7 +70,7 @@ use crate::net::tests::relay::epoch2x::broadcast_message; use crate::net::{Error as NetError, *}; use crate::util_lib::test::*; -/// Everything in a TestPeer, except the coordinator (which is encombered by the lifetime of its +/// Everything in a TestPeer, except the coordinator (which is encumbered by the lifetime of its /// chains coordinator's event observer) struct ExitedPeer { pub config: TestPeerConfig, From 1fad8d226e8081b4204c6a6ecc44c5ff8038013b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 12 Jun 2024 17:46:01 -0400 Subject: [PATCH 0312/1400] chore: minor edits from PR review and fix warnings --- .../analysis/type_checker/v2_1/natives/mod.rs | 4 +- clarity/src/vm/functions/database.rs | 10 ++-- stackslib/src/chainstate/nakamoto/mod.rs | 3 +- stackslib/src/net/api/postblock_proposal.rs | 3 -- .../stacks-node/src/nakamoto_node/miner.rs | 13 ----- .../src/tests/nakamoto_integrations.rs | 48 +++---------------- 6 files changed, 14 insertions(+), 67 deletions(-) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index 89a3bff24e..39915dca6b 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -705,7 +705,7 @@ fn check_get_stacks_block_info( args: &[SymbolicExpression], context: &TypingContext, ) -> TypeResult { - check_arguments_at_least(2, args)?; + check_argument_count(2, args)?; let block_info_prop_str = args[0].match_atom().ok_or(CheckError::new( CheckErrors::GetStacksBlockInfoExpectPropertyName, @@ -726,7 +726,7 @@ fn check_get_tenure_info( args: &[SymbolicExpression], context: &TypingContext, ) -> TypeResult { - check_arguments_at_least(2, args)?; + check_argument_count(2, args)?; let block_info_prop_str = args[0].match_atom().ok_or(CheckError::new( CheckErrors::GetTenureInfoExpectPropertyName, diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index 2c38bcad42..f479b69ef1 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -986,9 +986,8 @@ pub fn special_get_stacks_block_info( x => Err(CheckErrors::TypeValueError(TypeSignature::UIntType, x)), }?; - let height_value = match u32::try_from(height_value) { - Ok(result) => result, - _ => return Ok(Value::none()), + let Ok(height_value) = u32::try_from(height_value) else { + return Ok(Value::none()); }; let current_block_height = env.global_context.database.get_current_block_height(); @@ -1065,9 +1064,8 @@ pub fn special_get_tenure_info( x => Err(CheckErrors::TypeValueError(TypeSignature::UIntType, x)), }?; - let height_value = match u32::try_from(height_value) { - Ok(result) => result, - _ => return Ok(Value::none()), + let Ok(height_value) = u32::try_from(height_value) else { + return Ok(Value::none()); }; let current_height = env.global_context.database.get_current_block_height(); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ad0e3fee50..0317a9103c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -622,7 +622,8 @@ impl NakamotoBlockHeader { } /// Make an "empty" header whose block data needs to be filled in. - /// This is used by the miner code. + /// This is used by the miner code. The block's timestamp is set here, at + /// the time of creation. pub fn from_parent_empty( chain_length: u64, burn_spent: u64, diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index a1a1d6c85e..fea63eaed4 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -333,9 +333,6 @@ impl NakamotoBlockProposal { let size = builder.get_bytes_so_far(); let cost = builder.tenure_finish(tenure_tx)?; - println!("block header: {:?}", block.header); - println!("expected: {:?}", self.block.header); - // Clone signatures from block proposal // These have already been validated by `validate_nakamoto_block_burnchain()`` block.header.miner_signature = self.block.header.miner_signature.clone(); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index c100ab63cb..fc2b1c4537 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -308,19 +308,6 @@ impl BlockMinerThread { )) })?; - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Building on a burn block that is before the first burn block".into(), - ) - })?; - let reward_info = match load_nakamoto_reward_set( self.burnchain .pox_reward_cycle(tip.block_height.saturating_add(1)) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 18106c97bf..8bc16be722 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5032,8 +5032,7 @@ fn check_block_times() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let tenure_count = 5; - let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); @@ -5042,7 +5041,7 @@ fn check_block_times() { let deploy_fee = 3000; naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), - 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + 3 * deploy_fee + (send_amt + send_fee) * 2, ); naka_conf.add_initial_balance( PrincipalData::from(sender_signer_addr.clone()).to_string(), @@ -5311,25 +5310,6 @@ fn check_block_times() { "Time from pre- and post-epoch 3.0 contracts should match" ); - // TODO: enable access to current tenure. - // let time3a_tenure_value = call_read_only( - // &naka_conf, - // &sender_addr, - // contract3_name, - // "get-tenure-time", - // vec![&clarity::vm::Value::UInt(last_tenure_height)], - // ); - // let time3a_tenure = time3a_tenure_value - // .expect_optional() - // .unwrap() - // .unwrap() - // .expect_u128() - // .unwrap(); - // assert_eq!( - // time0a, time3a_tenure, - // "Tenure time should match Clarity 2 block time" - // ); - let time3a_block_value = call_read_only( &naka_conf, &sender_addr, @@ -5343,6 +5323,10 @@ fn check_block_times() { .unwrap() .expect_u128() .unwrap(); + assert!( + time3a_block - time3_block >= 1, + "get-stacks-block-info? time should have changed" + ); // Sleep to ensure the seconds have changed thread::sleep(Duration::from_secs(1)); @@ -5357,7 +5341,6 @@ fn check_block_times() { // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); loop { @@ -5407,25 +5390,6 @@ fn check_block_times() { "Time from pre- and post-epoch 3.0 contracts should match" ); - // TODO: Enable access to current tenure. - // let time3b_tenure_value = call_read_only( - // &naka_conf, - // &sender_addr, - // contract3_name, - // "get-tenure-time", - // vec![&clarity::vm::Value::UInt(last_tenure_height)], - // ); - // let time3b_tenure = time3b_tenure_value - // .expect_optional() - // .unwrap() - // .unwrap() - // .expect_u128() - // .unwrap(); - // assert_eq!( - // time0b, time3b_tenure, - // "Tenure time should match Clarity 2 block time" - // ); - let time3b_block_value = call_read_only( &naka_conf, &sender_addr, From e724d6cf5401c0a6eb5d62779b42252ba3ad9307 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 13 Jun 2024 11:38:03 -0500 Subject: [PATCH 0313/1400] fix merge artifact, add test coverage --- .../burn/operations/leader_block_commit.rs | 22 +++++++++++++++++++ .../src/chainstate/nakamoto/tests/node.rs | 2 +- stackslib/src/net/tests/relay/nakamoto.rs | 2 +- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 98d52efd5e..753d2c51ad 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -3297,6 +3297,28 @@ mod tests { Treatment::Punish(reward_addrs(0)), ]), ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(0), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Punish(reward_addrs(1)), + Treatment::Reward(reward_addrs(0)), + ]), + ), + ( + LeaderBlockCommitOp { + commit_outs: vec![reward_addrs(1), burn_addr_1.clone()], + ..default_block_commit.clone() + }, + Some(rs_pox_addrs.clone()), + Ok(vec![ + Treatment::Punish(reward_addrs(0)), + Treatment::Reward(reward_addrs(1)), + ]), + ), ( LeaderBlockCommitOp { commit_outs: vec![reward_addrs(0), reward_addrs(1)], diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index f2beff5076..1995bb2d0c 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -1051,7 +1051,7 @@ impl<'a> TestPeer<'a> { self.sortdb.as_ref().unwrap(), &mut sort_handle, &mut self.stacks_node.as_mut().unwrap().chainstate, - block.clone(), + block, None, )?; if !accepted { diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 7fe68b8975..1c456e539e 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -401,7 +401,7 @@ fn test_buffer_data_message() { state_index_root: TrieHash([0x07; 32]), miner_signature: MessageSignature::empty(), signer_signature: vec![], - signer_bitvec: BitVec::zeros(1).unwrap(), + pox_treatment: BitVec::zeros(1).unwrap(), }, txs: vec![], }; From b13bd4898fa156a373532448aee656db329acb6d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 13 Jun 2024 14:13:50 -0500 Subject: [PATCH 0314/1400] feat: in nakamoto, gc mempool by accept_time --- stacks-common/src/types/mod.rs | 20 ++++++++++ stackslib/src/core/mempool.rs | 70 ++++++++++++++++++++++++++++----- stackslib/src/core/tests/mod.rs | 21 ++++++++-- stackslib/src/main.rs | 13 +++--- stackslib/src/net/relay.rs | 18 +++------ 5 files changed, 109 insertions(+), 33 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 05b17e05a5..cfbc2dbb55 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -87,11 +87,31 @@ pub enum StacksEpochId { Epoch30 = 0x03000, } +pub enum MempoolCollectionBehavior { + ByStacksHeight, + ByReceiveTime, +} + impl StacksEpochId { pub fn latest() -> StacksEpochId { StacksEpochId::Epoch30 } + /// In this epoch, how should the mempool perform garbage collection? + pub fn mempool_garbage_behavior(&self) -> MempoolCollectionBehavior { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => MempoolCollectionBehavior::ByStacksHeight, + StacksEpochId::Epoch30 => MempoolCollectionBehavior::ByReceiveTime, + } + } + /// Returns whether or not this Epoch should perform /// memory checks during analysis pub fn analysis_memory(&self) -> bool { diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index dc67539573..9eb9b7cf80 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -21,7 +21,7 @@ use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::str::FromStr; -use std::time::Instant; +use std::time::{Duration, Instant, SystemTime}; use std::{fs, io}; use clarity::vm::types::PrincipalData; @@ -37,6 +37,7 @@ use stacks_common::codec::{ read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, }; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; +use stacks_common::types::MempoolCollectionBehavior; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::retry::{BoundReader, RetryReader}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; @@ -71,8 +72,10 @@ use crate::util_lib::db::{ use crate::{cost_estimates, monitoring}; // maximum number of confirmations a transaction can have before it's garbage-collected -pub const MEMPOOL_MAX_TRANSACTION_AGE: u64 = 256; -pub const MAXIMUM_MEMPOOL_TX_CHAINING: u64 = 25; +pub static MEMPOOL_MAX_TRANSACTION_AGE: u64 = 256; +pub static MAXIMUM_MEMPOOL_TX_CHAINING: u64 = 25; +pub static MEMPOOL_NAKAMOTO_MAX_TRANSACTION_AGE: Duration = + Duration::from_secs(MEMPOOL_MAX_TRANSACTION_AGE * 10 * 60); // name of table for storing the counting bloom filter pub const BLOOM_COUNTER_TABLE: &'static str = "txid_bloom_counter"; @@ -2206,10 +2209,58 @@ impl MemPoolDB { Ok(()) } - /// Garbage-collect the mempool. Remove transactions that have a given number of - /// confirmations. + /// Garbage-collect the mempool according to the behavior specified in `behavior`. pub fn garbage_collect( - tx: &mut MemPoolTx, + &mut self, + chain_height: u64, + behavior: &MempoolCollectionBehavior, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Result<(), db_error> { + let tx = self.tx_begin()?; + match behavior { + MempoolCollectionBehavior::ByStacksHeight => { + let Some(min_height) = chain_height.checked_sub(MEMPOOL_MAX_TRANSACTION_AGE) else { + return Ok(()); + }; + Self::garbage_collect_by_height(&tx, min_height, event_observer)?; + } + MempoolCollectionBehavior::ByReceiveTime => { + Self::garbage_collect_by_time( + &tx, + &MEMPOOL_NAKAMOTO_MAX_TRANSACTION_AGE, + event_observer, + )?; + } + }; + tx.commit() + } + + /// Garbage-collect the mempool. Remove transactions that were accepted more than `age` ago. + /// The granularity of this check is in seconds. + pub fn garbage_collect_by_time( + tx: &MemPoolTx, + age: &Duration, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Result<(), db_error> { + let threshold_time = get_epoch_time_secs().saturating_sub(age.as_secs()); + let args: &[&dyn ToSql] = &[&u64_to_sql(threshold_time)?]; + if let Some(event_observer) = event_observer { + let sql = "SELECT txid FROM mempool WHERE accept_time < ?1"; + let txids = query_rows(tx, sql, args)?; + event_observer.mempool_txs_dropped(txids, MemPoolDropReason::STALE_COLLECT); + } + + let sql = "DELETE FROM mempool WHERE accept_time < ?1"; + + tx.execute(sql, args)?; + increment_stx_mempool_gc(); + Ok(()) + } + + /// Garbage-collect the mempool. Remove transactions that were received `min_height` + /// blocks ago. + pub fn garbage_collect_by_height( + tx: &MemPoolTx, min_height: u64, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), db_error> { @@ -2230,10 +2281,9 @@ impl MemPoolDB { #[cfg(test)] pub fn clear_before_height(&mut self, min_height: u64) -> Result<(), db_error> { - let mut tx = self.tx_begin()?; - MemPoolDB::garbage_collect(&mut tx, min_height, None)?; - tx.commit()?; - Ok(()) + let tx = self.tx_begin()?; + MemPoolDB::garbage_collect_by_height(&tx, min_height, None)?; + tx.commit() } /// Scan the chain tip for all available transactions (but do not remove them!) diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 6a3b700186..d77724756d 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::collections::{HashMap, HashSet}; +use std::time::Duration; use std::{cmp, io}; use clarity::vm::costs::ExecutionCost; @@ -31,7 +32,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, StacksWorkScore, TrieHash, VRFSeed, }; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{MempoolCollectionBehavior, StacksEpochId}; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, *}; use stacks_common::util::secp256k1::{MessageSignature, *}; use stacks_common::util::vrf::VRFProof; @@ -1388,8 +1389,10 @@ fn mempool_do_not_replace_tx() { assert!(!MemPoolDB::db_has_tx(&mempool_tx, &txid).unwrap()); } -#[test] -fn mempool_db_load_store_replace_tx() { +#[rstest] +#[case(MempoolCollectionBehavior::ByStacksHeight)] +#[case(MempoolCollectionBehavior::ByReceiveTime)] +fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) { let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); let chainstate_path = chainstate_path(function_name!()); let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); @@ -1616,7 +1619,17 @@ fn mempool_db_load_store_replace_tx() { eprintln!("garbage-collect"); let mut mempool_tx = mempool.tx_begin().unwrap(); - MemPoolDB::garbage_collect(&mut mempool_tx, 101, None).unwrap(); + match behavior { + MempoolCollectionBehavior::ByStacksHeight => { + MemPoolDB::garbage_collect_by_height(&mut mempool_tx, 101, None) + } + MempoolCollectionBehavior::ByReceiveTime => { + let test_max_age = Duration::from_secs(1); + std::thread::sleep(2 * test_max_age); + MemPoolDB::garbage_collect_by_time(&mut mempool_tx, &test_max_age, None) + } + } + .unwrap(); mempool_tx.commit().unwrap(); let txs = MemPoolDB::get_txs_after( diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 2bb78a6282..54c14f3b31 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -26,6 +26,7 @@ extern crate stacks_common; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; +use stacks_common::types::MempoolCollectionBehavior; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -1373,13 +1374,11 @@ simulating a miner. let mut mempool_db = MemPoolDB::open(true, chain_id, &chain_state_path, estimator, metric) .expect("Failed to open mempool db"); - { - info!("Clearing mempool"); - let mut tx = mempool_db.tx_begin().unwrap(); - let min_height = u32::MAX as u64; - MemPoolDB::garbage_collect(&mut tx, min_height, None).unwrap(); - tx.commit().unwrap(); - } + info!("Clearing mempool"); + let min_height = u32::MAX as u64; + mempool_db + .garbage_collect(min_height, &MempoolCollectionBehavior::ByStacksHeight, None) + .unwrap(); let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) .unwrap() diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 6b34914bbb..11fa5f6364 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -28,7 +28,7 @@ use rand::{thread_rng, Rng}; use stacks_common::address::public_keys_to_address_hash; use stacks_common::codec::MAX_PAYLOAD_LEN; use stacks_common::types::chainstate::{BurnchainHeaderHash, PoxId, SortitionId, StacksBlockId}; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{MempoolCollectionBehavior, StacksEpochId}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -2101,18 +2101,12 @@ impl Relayer { ret.push((vec![], tx.clone())); } - // garbage-collect - if chain_height > MEMPOOL_MAX_TRANSACTION_AGE { - let min_height = chain_height.saturating_sub(MEMPOOL_MAX_TRANSACTION_AGE); - let mut mempool_tx = mempool.tx_begin()?; + mempool.garbage_collect( + chain_height, + &epoch_id.mempool_garbage_behavior(), + event_observer, + )?; - debug!( - "Remove all transactions beneath block height {}", - min_height - ); - MemPoolDB::garbage_collect(&mut mempool_tx, min_height, event_observer)?; - mempool_tx.commit()?; - } update_stacks_tip_height(chain_height as i64); Ok(ret) From e496acd2e4068216881c824dc087dab83dee632c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 13 Jun 2024 15:17:47 -0400 Subject: [PATCH 0315/1400] chore: rename `IdentityHeaderHash` to `IndexHeaderHash` --- clarity/src/vm/functions/database.rs | 2 +- clarity/src/vm/types/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index f479b69ef1..d779b1d4c9 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -1009,7 +1009,7 @@ pub fn special_get_stacks_block_info( data: header_hash.as_bytes().to_vec(), })) } - StacksBlockInfoProperty::IdentityHeaderHash => { + StacksBlockInfoProperty::IndexHeaderHash => { let id_header_hash = env .global_context .database diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 8684bf531c..e5914e6fb8 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -733,7 +733,7 @@ define_named_enum!(BurnBlockInfoProperty { }); define_named_enum!(StacksBlockInfoProperty { - IdentityHeaderHash("id-header-hash"), + IndexHeaderHash("id-header-hash"), HeaderHash("header-hash"), Time("time"), }); From 9f7f951f64d7056ae3f740c28fcdee727048358d Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 13 Jun 2024 14:41:06 -0400 Subject: [PATCH 0316/1400] fix: 2 blocks dead time between 2.5 -> 3.0 transition --- stackslib/src/chainstate/stacks/miner.rs | 8 ++--- testnet/stacks-node/src/globals.rs | 7 ++-- testnet/stacks-node/src/neon_node.rs | 3 +- .../stacks-node/src/run_loop/boot_nakamoto.rs | 33 +++++++++++++++++-- testnet/stacks-node/src/run_loop/nakamoto.rs | 14 ++++++-- testnet/stacks-node/src/run_loop/neon.rs | 17 +++++++--- 6 files changed, 64 insertions(+), 18 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index a0e52c8e39..93ffdda2b5 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -731,11 +731,11 @@ impl<'a> StacksMicroblockBuilder<'a> { anchor_block, anchor_block_consensus_hash, anchor_block_height, - runtime: runtime, + runtime, clarity_tx: Some(clarity_tx), header_reader, unconfirmed: false, - settings: settings, + settings, ast_rules, }) } @@ -809,11 +809,11 @@ impl<'a> StacksMicroblockBuilder<'a> { anchor_block: anchored_block_hash, anchor_block_consensus_hash: anchored_consensus_hash, anchor_block_height: anchored_block_height, - runtime: runtime, + runtime, clarity_tx: Some(clarity_tx), header_reader, unconfirmed: true, - settings: settings, + settings, ast_rules, }) } diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index a6a2fdad3c..de15126a96 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -59,7 +59,7 @@ pub struct Globals { /// Global flag to see if we should keep running pub should_keep_running: Arc, /// Status of our VRF key registration state (shared between the main thread and the relayer) - leader_key_registration_state: Arc>, + pub leader_key_registration_state: Arc>, /// Last miner config loaded last_miner_config: Arc>>, /// burnchain height at which we start mining @@ -103,6 +103,7 @@ impl Globals { sync_comms: PoxSyncWatchdogComms, should_keep_running: Arc, start_mining_height: u64, + leader_key_registration_state: LeaderKeyRegistrationState, ) -> Globals { Globals { last_sortition: Arc::new(Mutex::new(None)), @@ -113,9 +114,7 @@ impl Globals { counters, sync_comms, should_keep_running, - leader_key_registration_state: Arc::new(Mutex::new( - LeaderKeyRegistrationState::Inactive, - )), + leader_key_registration_state: Arc::new(Mutex::new(leader_key_registration_state)), last_miner_config: Arc::new(Mutex::new(None)), start_mining_height: Arc::new(Mutex::new(start_mining_height)), estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 282fc5daa8..e21e6dccc5 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -401,9 +401,10 @@ struct ParentStacksBlockInfo { coinbase_nonce: u64, } -#[derive(Clone)] +#[derive(Clone, Default)] pub enum LeaderKeyRegistrationState { /// Not started yet + #[default] Inactive, /// Waiting for burnchain confirmation /// `u64` is the target block height in which we intend this key to land diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 087e1424ee..97bf8dd4e0 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -23,13 +23,42 @@ use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::core::StacksEpochExtension; +use stacks::net::p2p::PeerNetwork; use stacks_common::types::{StacksEpoch, StacksEpochId}; +use crate::globals::NeonGlobals; use crate::neon::Counters; +use crate::neon_node::LeaderKeyRegistrationState; use crate::run_loop::nakamoto::RunLoop as NakaRunLoop; use crate::run_loop::neon::RunLoop as NeonRunLoop; use crate::Config; +/// Data which should persist through transition from Neon => Nakamoto run loop +#[derive(Default)] +pub struct Neon2NakaData { + pub leader_key_registration_state: LeaderKeyRegistrationState, + pub peer_network: Option, +} + +impl Neon2NakaData { + /// Take needed values from `NeonGlobals` and optionally `PeerNetwork`, consuming them + pub fn new(globals: NeonGlobals, peer_network: Option) -> Self { + let key_state = globals + .leader_key_registration_state + .lock() + .unwrap_or_else(|e| { + // can only happen due to a thread panic in the relayer + error!("FATAL: leader key registration mutex is poisoned: {e:?}"); + panic!(); + }); + + Self { + leader_key_registration_state: (*key_state).clone(), + peer_network, + } + } +} + /// This runloop handles booting to Nakamoto: /// During epochs [1.0, 2.5], it runs a neon run_loop. /// Once epoch 3.0 is reached, it stops the neon run_loop @@ -120,7 +149,7 @@ impl BootRunLoop { let boot_thread = Self::spawn_stopper(&self.config, neon_loop) .expect("FATAL: failed to spawn epoch-2/3-boot thread"); - let peer_network = neon_loop.start(burnchain_opt.clone(), mine_start); + let data_to_naka = neon_loop.start(burnchain_opt.clone(), mine_start); let monitoring_thread = neon_loop.take_monitoring_thread(); // did we exit because of the epoch-3.0 transition, or some other reason? @@ -150,7 +179,7 @@ impl BootRunLoop { let InnerLoops::Epoch3(ref mut naka_loop) = self.active_loop else { panic!("FATAL: unexpectedly found epoch2 loop after setting epoch3 active"); }; - naka_loop.start(burnchain_opt, mine_start, peer_network) + naka_loop.start(burnchain_opt, mine_start, data_to_naka) } fn spawn_stopper( diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 997327287d..112912ef37 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -31,7 +31,6 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; -use stacks::net::p2p::PeerNetwork; use stacks_common::types::PublicKey; use stacks_common::util::hash::Hash160; use stx_genesis::GenesisData; @@ -44,6 +43,7 @@ use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, }; +use crate::run_loop::boot_nakamoto::Neon2NakaData; use crate::run_loop::neon; use crate::run_loop::neon::Counters; use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; @@ -397,7 +397,7 @@ impl RunLoop { &mut self, burnchain_opt: Option, mut mine_start: u64, - peer_network: Option, + data_from_neon: Option, ) { let (coordinator_receivers, coordinator_senders) = self .coordinator_channels @@ -437,6 +437,8 @@ impl RunLoop { // relayer linkup let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); + let data_from_neon = data_from_neon.unwrap_or_default(); + // set up globals so other subsystems can instantiate off of the runloop state. let globals = Globals::new( coordinator_senders, @@ -446,6 +448,7 @@ impl RunLoop { self.pox_watchdog_comms.clone(), self.should_keep_running.clone(), mine_start, + data_from_neon.leader_key_registration_state, ); self.set_globals(globals.clone()); @@ -481,7 +484,12 @@ impl RunLoop { // Boot up the p2p network and relayer, and figure out how many sortitions we have so far // (it could be non-zero if the node is resuming from chainstate) - let mut node = StacksNode::spawn(self, globals.clone(), relay_recv, peer_network); + let mut node = StacksNode::spawn( + self, + globals.clone(), + relay_recv, + data_from_neon.peer_network, + ); // Wait for all pending sortitions to process let burnchain_db = burnchain_config diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 157fa71cd7..d4aea34f0e 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -21,7 +21,6 @@ use stacks::chainstate::stacks::db::{ChainStateBootData, StacksChainState}; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; -use stacks::net::p2p::PeerNetwork; use stacks::util_lib::db::Error as db_error; use stacks_common::deps_common::ctrlc as termination; use stacks_common::deps_common::ctrlc::SignalId; @@ -34,11 +33,14 @@ use super::RunLoopCallbacks; use crate::burnchains::{make_bitcoin_indexer, Error}; use crate::globals::NeonGlobals as Globals; use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError}; -use crate::neon_node::{StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon_node::{ + LeaderKeyRegistrationState, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER, +}; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, }; +use crate::run_loop::boot_nakamoto::Neon2NakaData; use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; use crate::{ run_loop, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain, @@ -1000,11 +1002,13 @@ impl RunLoop { /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and /// the nodes, taking turns on tenures. + /// + /// Returns `Option` so that data can be passed to `NakamotoNode` pub fn start( &mut self, burnchain_opt: Option, mut mine_start: u64, - ) -> Option { + ) -> Option { let (coordinator_receivers, coordinator_senders) = self .coordinator_channels .take() @@ -1051,6 +1055,7 @@ impl RunLoop { self.pox_watchdog_comms.clone(), self.should_keep_running.clone(), mine_start, + LeaderKeyRegistrationState::default(), ); self.set_globals(globals.clone()); @@ -1150,8 +1155,12 @@ impl RunLoop { let peer_network = node.join(); liveness_thread.join().unwrap(); + // Data that will be passed to Nakamoto run loop + // Only gets transfered on clean shutdown of neon run loop + let data_to_naka = Neon2NakaData::new(globals, peer_network); + info!("Exiting stacks-node"); - break peer_network; + break Some(data_to_naka); } let remote_chain_height = burnchain.get_headers_height() - 1; From 5b1191cf3927dabbc7516507915037f052425170 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 13 Jun 2024 15:25:54 -0400 Subject: [PATCH 0317/1400] refactor: add optional `id_bhh` to `get_burn_block_time` --- clarity/src/vm/database/clarity_db.rs | 11 ++++++++--- clarity/src/vm/functions/database.rs | 4 ++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 8507dec80e..6fc829a8f0 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -907,8 +907,13 @@ impl<'a> ClarityDatabase<'a> { .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } - pub fn get_burn_block_time(&mut self, block_height: u32) -> Result { - let id_bhh = self.get_index_block_header_hash(block_height)?; + pub fn get_burn_block_time( + &mut self, + block_height: u32, + id_bhh_opt: Option, + ) -> Result { + let id_bhh = + id_bhh_opt.unwrap_or_else(|| self.get_index_block_header_hash(block_height)?); self.headers_db .get_burn_block_time_for_block(&id_bhh) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) @@ -925,7 +930,7 @@ impl<'a> ClarityDatabase<'a> { ) })?; if !epoch.epoch_id.has_block_timestamps() { - return self.get_burn_block_time(block_height); + return self.get_burn_block_time(block_height, Some(id_bhh)); } self.headers_db diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index d779b1d4c9..f048a59536 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -779,7 +779,7 @@ pub fn special_get_block_info( let block_time = env .global_context .database - .get_burn_block_time(height_value)?; + .get_burn_block_time(height_value, None)?; Value::UInt(u128::from(block_time)) } BlockInfoProperty::VrfSeed => { @@ -1078,7 +1078,7 @@ pub fn special_get_tenure_info( let block_time = env .global_context .database - .get_burn_block_time(height_value)?; + .get_burn_block_time(height_value, None)?; Value::UInt(u128::from(block_time)) } TenureInfoProperty::VrfSeed => { From b09c4ac455363293ad6bd7416fe4bad6a75ed65b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 13 Jun 2024 15:29:15 -0400 Subject: [PATCH 0318/1400] fix: bugs in previous quick fixes --- clarity/src/vm/database/clarity_db.rs | 6 ++++-- clarity/src/vm/types/mod.rs | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 6fc829a8f0..519f1b3cfb 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -912,8 +912,10 @@ impl<'a> ClarityDatabase<'a> { block_height: u32, id_bhh_opt: Option, ) -> Result { - let id_bhh = - id_bhh_opt.unwrap_or_else(|| self.get_index_block_header_hash(block_height)?); + let id_bhh = match id_bhh_opt { + Some(x) => x, + None => self.get_index_block_header_hash(block_height)?, + }; self.headers_db .get_burn_block_time_for_block(&id_bhh) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index e5914e6fb8..e1837ee034 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -827,7 +827,7 @@ impl StacksBlockInfoProperty { use self::StacksBlockInfoProperty::*; match self { Time => TypeSignature::UIntType, - IdentityHeaderHash | HeaderHash => BUFF_32.clone(), + IndexHeaderHash | HeaderHash => BUFF_32.clone(), } } } From d5dc115ebab9307b8955ee8656def6bdb6012287 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 13 Jun 2024 16:48:12 -0400 Subject: [PATCH 0319/1400] chore: remove commented code --- stackslib/src/net/api/tests/postblock_proposal.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index a9defc6e3e..6ab465a683 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -240,7 +240,6 @@ fn test_try_make_response() { .unwrap() .unwrap(); - // let mut block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &tip, miner_privk, parent_stacks_header); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); From 67f0ae2ab34b398bba4d879926fd24e771b1f487 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 13 Jun 2024 20:37:38 -0400 Subject: [PATCH 0320/1400] refactor: rename `has_stacks_block_timestamps` --- clarity/src/vm/database/clarity_db.rs | 2 +- stacks-common/src/types/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 519f1b3cfb..53eae28652 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -931,7 +931,7 @@ impl<'a> ClarityDatabase<'a> { format!("Failed to get epoch for block height {block_height}.)").into(), ) })?; - if !epoch.epoch_id.has_block_timestamps() { + if !epoch.epoch_id.has_stacks_block_timestamps() { return self.get_burn_block_time(block_height, Some(id_bhh)); } diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index a4bd608fb7..abd1992b5d 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -154,7 +154,7 @@ impl StacksEpochId { /// Is a timestamp saved for each Stacks block in this epoch? /// If not, the timestamp is retrieved from the burn block. - pub fn has_block_timestamps(&self) -> bool { + pub fn has_stacks_block_timestamps(&self) -> bool { self >= &StacksEpochId::Epoch30 } From 0811895a9bbe19ceeb7d95eef4586a22172bf6dc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 13 Jun 2024 20:46:39 -0400 Subject: [PATCH 0321/1400] refactor: rename to `uses_nakamoto_blocks` This allows this method to be more generic, and can also be used to decide which database table to use when retrieving headers. --- clarity/src/vm/database/clarity_db.rs | 2 +- stacks-common/src/types/mod.rs | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 53eae28652..e2bb7b0f5b 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -931,7 +931,7 @@ impl<'a> ClarityDatabase<'a> { format!("Failed to get epoch for block height {block_height}.)").into(), ) })?; - if !epoch.epoch_id.has_stacks_block_timestamps() { + if !epoch.epoch_id.uses_nakamoto_blocks() { return self.get_burn_block_time(block_height, Some(id_bhh)); } diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index abd1992b5d..b9ff998c6a 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -152,9 +152,10 @@ impl StacksEpochId { } } - /// Is a timestamp saved for each Stacks block in this epoch? - /// If not, the timestamp is retrieved from the burn block. - pub fn has_stacks_block_timestamps(&self) -> bool { + /// Returns true for epochs which use Nakamoto blocks. These blocks use a + /// different header format than the previous Stacks blocks, which among + /// other changes includes a Stacks-specific timestamp. + pub fn uses_nakamoto_blocks(&self) -> bool { self >= &StacksEpochId::Epoch30 } From 3c744db72e094cc4305089c32472bd6c852a2fa9 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 14 Jun 2024 17:13:09 +0300 Subject: [PATCH 0322/1400] update API schema --- docs/rpc/api/core-node/get-info.example.json | 4 +++- docs/rpc/api/core-node/get-info.schema.json | 17 +++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/docs/rpc/api/core-node/get-info.example.json b/docs/rpc/api/core-node/get-info.example.json index 19bb6d20f4..4876841497 100644 --- a/docs/rpc/api/core-node/get-info.example.json +++ b/docs/rpc/api/core-node/get-info.example.json @@ -11,5 +11,7 @@ "stacks_tip": "b1807a2d3f7f8c7922f7c1d60d7c34145ade05d789640dc7dc9ec1021e07bb54", "stacks_tip_consensus_hash": "17f76e597bab45646956f38dd39573085d72cbc0", "unanchored_tip": "0000000000000000000000000000000000000000000000000000000000000000", - "exit_at_block_height": null + "exit_at_block_height": null, + "is_fully_synced": false, + "max_peer_height": 15 } diff --git a/docs/rpc/api/core-node/get-info.schema.json b/docs/rpc/api/core-node/get-info.schema.json index f37cd0893f..4190c67120 100644 --- a/docs/rpc/api/core-node/get-info.schema.json +++ b/docs/rpc/api/core-node/get-info.schema.json @@ -17,7 +17,9 @@ "stacks_tip", "stacks_tip_consensus_hash", "unanchored_tip", - "exit_at_block_height" + "exit_at_block_height", + "is_fully_synced", + "max_peer_height" ], "properties": { "peer_version": { @@ -71,6 +73,17 @@ "exit_at_block_height": { "type": "integer", "description": "the block height at which the testnet network will be reset. not applicable for mainnet" - } + }, + + "is_fully_synced": { + "type": "boolean", + "description": "indicates whether the node has fully synchronized with the network" + }, + "max_peer_height": { + "type": "integer", + "description": "the highest block height observed among all connected peers, indicating the most advanced state of the blockchain network known to the node" + } + + } } From 609156e9b295495be2bf53c3bb51c80e6629dbca Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 14 Jun 2024 11:16:24 -0400 Subject: [PATCH 0323/1400] chore: Add `memo` field to `RegisteredKey` struct --- testnet/stacks-node/src/globals.rs | 1 + testnet/stacks-node/src/nakamoto_node.rs | 2 ++ testnet/stacks-node/src/nakamoto_node/relayer.rs | 2 +- testnet/stacks-node/src/neon_node.rs | 1 + testnet/stacks-node/src/node.rs | 1 + testnet/stacks-node/src/run_loop/mod.rs | 3 +++ 6 files changed, 9 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index de15126a96..b0f338032a 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -286,6 +286,7 @@ impl Globals { vrf_public_key: op.public_key, block_height: op.block_height as u64, op_vtxindex: op.vtxindex as u32, + memo: op.memo, }; **leader_key_registration_state = diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 8a1d80de32..a21e36a9a0 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -28,6 +28,7 @@ use stacks::net::atlas::AtlasConfig; use stacks::net::p2p::PeerNetwork; use stacks::net::relay::Relayer; use stacks::net::stackerdb::StackerDBs; +use stacks::util::hash::Hash160; use stacks_common::types::chainstate::SortitionId; use stacks_common::types::StacksEpochId; @@ -178,6 +179,7 @@ impl StacksNode { block_height: 1, op_vtxindex: 1, vrf_public_key, + memo: keychain.get_nakamoto_pkh().as_bytes().to_vec(), }) } else { LeaderKeyRegistrationState::Inactive diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 5e8492a8a2..751649c627 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -44,7 +44,7 @@ use stacks::net::db::LocalPeer; use stacks::net::relay::Relayer; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, VRFSeed, }; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index e21e6dccc5..fb6dfc89c2 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4827,6 +4827,7 @@ impl StacksNode { block_height: 1, op_vtxindex: 1, vrf_public_key, + memo: vec![], }) } else { LeaderKeyRegistrationState::Inactive diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 3108485c00..2c78b4c187 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -580,6 +580,7 @@ impl Node { block_height: op.block_height as u64, op_vtxindex: op.vtxindex as u32, target_block_height: (op.block_height as u64) - 1, + memo: op.memo.clone(), }); } } diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 01f848c2e6..b824793e17 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -158,6 +158,9 @@ pub struct RegisteredKey { pub op_vtxindex: u32, /// the public key itself pub vrf_public_key: VRFPublicKey, + /// `memo` field that was used to register key + /// Could be `Hash160(miner_pubkey)`, or empty + pub memo: Vec, } pub fn announce_boot_receipts( From 24fe4e30f962f759c184059e760af2d38153ae17 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 14 Jun 2024 12:07:43 -0500 Subject: [PATCH 0324/1400] test: add `index-range` option to `stacks-inspect replay-blocks` --- stackslib/src/main.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 2bb78a6282..dfd153a068 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -37,6 +37,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; +use std::time::Instant; use std::{env, fs, io, process, thread}; use blockstack_lib::burnchains::bitcoin::indexer::{ @@ -65,7 +66,6 @@ use blockstack_lib::chainstate::stacks::{StacksBlockHeader, *}; use blockstack_lib::clarity::vm::costs::ExecutionCost; use blockstack_lib::clarity::vm::types::StacksAddressExtensions; use blockstack_lib::clarity::vm::ClarityVersion; -use blockstack_lib::clarity_cli; use blockstack_lib::clarity_cli::vm_execute; use blockstack_lib::core::{MemPoolDB, *}; use blockstack_lib::cost_estimates::metrics::UnitMetric; @@ -76,6 +76,7 @@ use blockstack_lib::net::relay::Relayer; use blockstack_lib::net::StacksMessage; use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; +use blockstack_lib::{clarity_cli, util_lib}; use libstackerdb::StackerDBChunkData; use rusqlite::types::ToSql; use rusqlite::{Connection, OpenFlags}; @@ -879,6 +880,7 @@ simulating a miner. eprintln!("Usage:"); eprintln!(" {n} "); eprintln!(" {n} prefix "); + eprintln!(" {n} index-range "); eprintln!(" {n} range "); eprintln!(" {n} "); process::exit(1); @@ -886,6 +888,7 @@ simulating a miner. if argv.len() < 2 { print_help_and_exit(); } + let start = Instant::now(); let stacks_path = &argv[2]; let mode = argv.get(3).map(String::as_str); let staging_blocks_db_path = format!("{stacks_path}/mainnet/chainstate/vm/index.sqlite"); @@ -911,6 +914,14 @@ simulating a miner. let blocks = arg5.saturating_sub(arg4); format!("SELECT index_block_hash FROM staging_blocks ORDER BY height ASC LIMIT {start}, {blocks}") } + Some("index-range") => { + let start = argv[4] + .parse::() + .expect(" not a valid u64"); + let end = argv[5].parse::().expect(" not a valid u64"); + let blocks = end.saturating_sub(start); + format!("SELECT index_block_hash FROM staging_blocks ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") + } Some("last") => format!( "SELECT index_block_hash FROM staging_blocks ORDER BY height DESC LIMIT {}", argv[4] @@ -936,7 +947,7 @@ simulating a miner. } replay_block(stacks_path, index_block_hash); } - println!("Finished!"); + println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); process::exit(0); } From 9d261eb6ad2dd8b820d687ec877123ae3c51e35c Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 14 Jun 2024 13:13:01 -0400 Subject: [PATCH 0325/1400] fix: Check if correct pubkey hash was used in `RegisteredKey` before using it in Nakamoto --- testnet/stacks-node/src/nakamoto_node.rs | 23 +++++++++++++++----- testnet/stacks-node/src/run_loop/nakamoto.rs | 12 +++------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index a21e36a9a0..a556a41093 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -25,16 +25,15 @@ use stacks::chainstate::stacks::Error as ChainstateError; use stacks::monitoring; use stacks::monitoring::update_active_miners_count_gauge; use stacks::net::atlas::AtlasConfig; -use stacks::net::p2p::PeerNetwork; use stacks::net::relay::Relayer; use stacks::net::stackerdb::StackerDBs; -use stacks::util::hash::Hash160; use stacks_common::types::chainstate::SortitionId; use stacks_common::types::StacksEpochId; use super::{Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::addr2str; use crate::neon_node::{LeaderKeyRegistrationState, StacksNode as NeonNode}; +use crate::run_loop::boot_nakamoto::Neon2NakaData; use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; @@ -134,7 +133,7 @@ impl StacksNode { globals: Globals, // relay receiver endpoint for the p2p thread, so the relayer can feed it data to push relay_recv: Receiver, - peer_network: Option, + data_from_neon: Option, ) -> StacksNode { let config = runloop.config().clone(); let is_miner = runloop.is_miner(); @@ -160,7 +159,10 @@ impl StacksNode { .connect_mempool_db() .expect("FATAL: database failure opening mempool"); - let mut p2p_net = peer_network + let data_from_neon = data_from_neon.unwrap_or_default(); + + let mut p2p_net = data_from_neon + .peer_network .unwrap_or_else(|| NeonNode::setup_peer_network(&config, &atlas_config, burnchain)); let stackerdbs = StackerDBs::connect(&config.get_stacker_db_file_path(), true) @@ -182,8 +184,19 @@ impl StacksNode { memo: keychain.get_nakamoto_pkh().as_bytes().to_vec(), }) } else { - LeaderKeyRegistrationState::Inactive + match &data_from_neon.leader_key_registration_state { + LeaderKeyRegistrationState::Active(registered_key) => { + let pubkey_hash = keychain.get_nakamoto_pkh(); + if pubkey_hash.as_ref() == ®istered_key.memo { + data_from_neon.leader_key_registration_state + } else { + LeaderKeyRegistrationState::Inactive + } + } + _ => LeaderKeyRegistrationState::Inactive, + } }; + globals.set_initial_leader_key_registration_state(leader_key_registration_state); let relayer_thread = diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 112912ef37..65c36e926c 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -39,6 +39,7 @@ use crate::burnchains::make_bitcoin_indexer; use crate::globals::Globals as GenericGlobals; use crate::monitoring::{start_serving_monitoring_metrics, MonitoringError}; use crate::nakamoto_node::{self, StacksNode, BLOCK_PROCESSOR_STACK_SIZE, RELAYER_MAX_BUFFER}; +use crate::neon_node::LeaderKeyRegistrationState; use crate::node::{ get_account_balances, get_account_lockups, get_names, get_namespaces, use_test_genesis_chainstate, @@ -437,8 +438,6 @@ impl RunLoop { // relayer linkup let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); - let data_from_neon = data_from_neon.unwrap_or_default(); - // set up globals so other subsystems can instantiate off of the runloop state. let globals = Globals::new( coordinator_senders, @@ -448,7 +447,7 @@ impl RunLoop { self.pox_watchdog_comms.clone(), self.should_keep_running.clone(), mine_start, - data_from_neon.leader_key_registration_state, + LeaderKeyRegistrationState::default(), ); self.set_globals(globals.clone()); @@ -484,12 +483,7 @@ impl RunLoop { // Boot up the p2p network and relayer, and figure out how many sortitions we have so far // (it could be non-zero if the node is resuming from chainstate) - let mut node = StacksNode::spawn( - self, - globals.clone(), - relay_recv, - data_from_neon.peer_network, - ); + let mut node = StacksNode::spawn(self, globals.clone(), relay_recv, data_from_neon); // Wait for all pending sortitions to process let burnchain_db = burnchain_config From d572b65b87262d29597c38f60a2ccaefd9a15949 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 14 Jun 2024 13:50:53 -0400 Subject: [PATCH 0326/1400] fix: always, always, always filter out stacks blocks on non-canonical burnchain forks --- testnet/stacks-node/src/neon_node.rs | 121 ++++++++++++++++----------- 1 file changed, 73 insertions(+), 48 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 282fc5daa8..e0a870a4e6 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -153,7 +153,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::{Burnchain, BurnchainSigner, PoxConstants, Txid}; -use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleConn}; use stacks::chainstate::burn::operations::leader_block_commit::{ RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, }; @@ -1148,6 +1148,50 @@ impl BlockMinerThread { ret } + /// Is a given Stacks staging block on the canonical burnchain fork? + pub(crate) fn is_on_canonical_burnchain_fork( + candidate: &StagingBlock, + sortdb_tip_handle: &SortitionHandleConn, + ) -> bool { + let candidate_ch = &candidate.consensus_hash; + let candidate_burn_ht = match SortitionDB::get_block_snapshot_consensus( + sortdb_tip_handle.conn(), + candidate_ch, + ) { + Ok(Some(x)) => x.block_height, + Ok(None) => { + warn!("Tried to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash); + return false; + } + Err(e) => { + warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash, + "err" => ?e); + return false; + } + }; + let tip_ch = match sortdb_tip_handle.get_consensus_at(candidate_burn_ht) { + Ok(Some(x)) => x, + Ok(None) => { + warn!("Tried to evaluate potential chain tip with a consensus hash ahead of canonical tip"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash); + return false; + } + Err(e) => { + warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; + "consensus_hash" => %candidate_ch, + "stacks_block_hash" => %candidate.anchored_block_hash, + "err" => ?e); + return false; + } + }; + &tip_ch == candidate_ch + } + /// Load all candidate tips upon which to build. This is all Stacks blocks whose heights are /// less than or equal to at `at_stacks_height` (or the canonical chain tip height, if not given), /// but greater than or equal to this end height minus `max_depth`. @@ -1177,61 +1221,42 @@ impl BlockMinerThread { let stacks_tips: Vec<_> = stacks_tips .into_iter() - .filter(|candidate| { - let candidate_ch = &candidate.consensus_hash; - let candidate_burn_ht = match SortitionDB::get_block_snapshot_consensus( - sortdb_tip_handle.conn(), - candidate_ch - ) { - Ok(Some(x)) => x.block_height, - Ok(None) => { - warn!("Tried to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash); - return false; - }, - Err(e) => { - warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash, - "err" => ?e); - return false; - }, - }; - let tip_ch = match sortdb_tip_handle.get_consensus_at(candidate_burn_ht) { - Ok(Some(x)) => x, - Ok(None) => { - warn!("Tried to evaluate potential chain tip with a consensus hash ahead of canonical tip"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash); - return false; - }, - Err(e) => { - warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash"; - "consensus_hash" => %candidate_ch, - "stacks_block_hash" => %candidate.anchored_block_hash, - "err" => ?e); - return false; - }, - }; - if &tip_ch != candidate_ch { - false - } else { - true - } - }) + .filter(|candidate| Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle)) .collect(); + if stacks_tips.len() == 0 { + return vec![]; + } + let mut considered = HashSet::new(); let mut candidates = vec![]; let end_height = stacks_tips[0].height; - for cur_height in end_height.saturating_sub(max_depth)..=end_height { - let stacks_tips = chain_state + // process these tips + for tip in stacks_tips.into_iter() { + let index_block_hash = + StacksBlockId::new(&tip.consensus_hash, &tip.anchored_block_hash); + let burn_height = burn_db + .get_consensus_hash_height(&tip.consensus_hash) + .expect("FATAL: could not query burnchain block height") + .expect("FATAL: no burnchain block height for Stacks tip"); + let candidate = TipCandidate::new(tip, burn_height); + candidates.push(candidate); + considered.insert(index_block_hash); + } + + // process earlier tips, back to max_depth + for cur_height in end_height.saturating_sub(max_depth)..end_height { + let stacks_tips: Vec<_> = chain_state .get_stacks_chain_tips_at_height(cur_height) - .expect("FATAL: could not query chain tips at height"); + .expect("FATAL: could not query chain tips at height") + .into_iter() + .filter(|candidate| { + Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle) + }) + .collect(); - for tip in stacks_tips { + for tip in stacks_tips.into_iter() { let index_block_hash = StacksBlockId::new(&tip.consensus_hash, &tip.anchored_block_hash); From 376ed5aacebc680da4c4b99b4fb4383f05ff0903 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 14 Jun 2024 17:06:16 -0400 Subject: [PATCH 0327/1400] chore: Add warning if mining and no `mining_key` is set --- testnet/stacks-node/src/neon_node.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index fb6dfc89c2..a27564a556 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -208,6 +208,7 @@ use crate::burnchains::bitcoin_regtest_controller::{ }; use crate::burnchains::make_bitcoin_indexer; use crate::chain_data::MinerStats; +use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; @@ -664,7 +665,7 @@ impl MicroblockMinerThread { frequency, last_mined: 0, quantity: 0, - cost_so_far: cost_so_far, + cost_so_far, settings, }) } @@ -4818,8 +4819,12 @@ impl StacksNode { let local_peer = p2p_net.local_peer.clone(); + let NodeConfig { + mock_mining, miner, .. + } = config.get_node_config(false); + // setup initial key registration - let leader_key_registration_state = if config.get_node_config(false).mock_mining { + let leader_key_registration_state = if mock_mining { // mock mining, pretend to have a registered key let (vrf_public_key, _) = keychain.make_vrf_keypair(VRF_MOCK_MINER_KEY); LeaderKeyRegistrationState::Active(RegisteredKey { @@ -4834,6 +4839,11 @@ impl StacksNode { }; globals.set_initial_leader_key_registration_state(leader_key_registration_state); + // Warn the user that they need to set up a miner key + if miner && !mock_mining && config.miner.mining_key.is_none() { + warn!("`[miner.mining_key]` not set in config file. This will be required to mine in Epoch 3.0!") + } + let relayer_thread = RelayerThread::new(runloop, local_peer.clone(), relayer); StacksNode::set_monitoring_miner_address(&keychain, &relayer_thread); From 81ef601a2b29a2bdd978f00710d5b6e074e48d88 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 15 Jun 2024 19:55:49 -0400 Subject: [PATCH 0328/1400] fix: fix tenure info properties and add integration test --- .github/workflows/bitcoin-tests.yml | 1 + clarity/src/vm/database/clarity_db.rs | 85 ++- clarity/src/vm/docs/mod.rs | 25 +- clarity/src/vm/test_util/mod.rs | 30 +- stacks-common/src/types/chainstate.rs | 7 + .../chainstate/stacks/boot/contract_tests.rs | 30 +- .../src/chainstate/stacks/db/accounts.rs | 19 +- .../src/chainstate/stacks/tests/accounting.rs | 8 +- stackslib/src/clarity_cli.rs | 30 +- stackslib/src/clarity_vm/database/mod.rs | 344 +++++++---- .../src/tests/nakamoto_integrations.rs | 546 +++++++++++++++++- 11 files changed, 977 insertions(+), 148 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index ea86772d8d..006190be0d 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -98,6 +98,7 @@ jobs: - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state - tests::nakamoto_integrations::check_block_times + - tests::nakamoto_integrations::check_block_info # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index e2bb7b0f5b..4bd6a9d5df 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -92,14 +92,34 @@ pub trait HeadersDB { fn get_burn_header_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option; fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option; - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_miner_address( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; } pub trait BurnStateDB { @@ -181,7 +201,11 @@ impl HeadersDB for NullHeadersDB { None } } - fn get_vrf_seed_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + _bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } fn get_stacks_block_header_hash_for_block( @@ -217,19 +241,32 @@ impl HeadersDB for NullHeadersDB { Some(1) } } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_burnchain_tokens_spent_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } fn get_burnchain_tokens_spent_for_winning_block( &self, _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { None } - fn get_tokens_earned_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } } @@ -1037,22 +1074,24 @@ impl<'a> ClarityDatabase<'a> { .get_pox_payout_addrs(burnchain_block_height, &sortition_id)) } - pub fn get_burnchain_block_height(&mut self, id_bhh: &StacksBlockId) -> Option { + pub fn get_burnchain_block_height(&self, id_bhh: &StacksBlockId) -> Option { self.headers_db.get_burn_block_height_for_block(id_bhh) } pub fn get_block_vrf_seed(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; self.headers_db - .get_vrf_seed_for_block(&id_bhh) + .get_vrf_seed_for_block(&id_bhh, &epoch) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } pub fn get_miner_address(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; Ok(self .headers_db - .get_miner_address(&id_bhh) + .get_miner_address(&id_bhh, &epoch) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()))? .into()) } @@ -1063,9 +1102,10 @@ impl<'a> ClarityDatabase<'a> { } let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; Ok(self .headers_db - .get_burnchain_tokens_spent_for_winning_block(&id_bhh) + .get_burnchain_tokens_spent_for_winning_block(&id_bhh, &epoch) .ok_or_else(|| { InterpreterError::Expect( "FATAL: no winning burnchain token spend record for block".into(), @@ -1080,9 +1120,10 @@ impl<'a> ClarityDatabase<'a> { } let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; Ok(self .headers_db - .get_burnchain_tokens_spent_for_block(&id_bhh) + .get_burnchain_tokens_spent_for_block(&id_bhh, &epoch) .ok_or_else(|| { InterpreterError::Expect( "FATAL: no total burnchain token spend record for block".into(), @@ -1105,9 +1146,10 @@ impl<'a> ClarityDatabase<'a> { } let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; let reward: u128 = self .headers_db - .get_tokens_earned_for_block(&id_bhh) + .get_tokens_earned_for_block(&id_bhh, &epoch) .map(|x| x.into()) .ok_or_else(|| { InterpreterError::Expect("FATAL: matured block has no recorded reward".into()) @@ -2118,4 +2160,17 @@ impl<'a> ClarityDatabase<'a> { pub fn get_stacks_epoch(&self, height: u32) -> Option { self.burn_state_db.get_stacks_epoch(height) } + + pub fn get_stacks_epoch_for_block(&self, id_bhh: &StacksBlockId) -> Result { + let burn_block = self.get_burnchain_block_height(&id_bhh).ok_or_else(|| { + InterpreterError::Expect(format!( + "FATAL: no burnchain block height found for Stacks block {}", + id_bhh + )) + })?; + let epoch = self + .get_stacks_epoch(burn_block) + .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()))?; + Ok(epoch.epoch_id) + } } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 7bd2592fff..6026d1c2b7 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2792,7 +2792,11 @@ mod test { fn get_consensus_hash_for_block(&self, _bhh: &StacksBlockId) -> Option { Some(ConsensusHash([0; 20])) } - fn get_vrf_seed_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + _bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { Some( VRFSeed::from_hex( "f490de2920c8a35fabeb13208852aa28c76f9be9b03a4dd2b3c075f7a26923b4", @@ -2820,21 +2824,34 @@ mod test { fn get_burn_block_height_for_block(&self, _id_bhh: &StacksBlockId) -> Option { Some(567890) } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { Some(12345) } fn get_burnchain_tokens_spent_for_winning_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { Some(2345) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { Some(12000) } } diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 3c449ef984..49f7d76c41 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -136,7 +136,11 @@ impl HeadersDB for UnitTestHeaderDB { None } } - fn get_vrf_seed_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + _bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } fn get_stacks_block_header_hash_for_block( @@ -172,7 +176,11 @@ impl HeadersDB for UnitTestHeaderDB { Some(1 + id_bhh.as_bytes()[0] as u32) } } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } @@ -185,17 +193,29 @@ impl HeadersDB for UnitTestHeaderDB { } } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 2000) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 1000) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 3000) } diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index bc82c20c1c..d61f78581f 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -247,6 +247,13 @@ impl_array_hexstring_fmt!(StacksBlockId); impl_byte_array_newtype!(StacksBlockId, u8, 32); impl_byte_array_serde!(StacksBlockId); +pub struct TenureBlockId(pub StacksBlockId); +impl From for TenureBlockId { + fn from(id: StacksBlockId) -> TenureBlockId { + TenureBlockId(id) + } +} + pub struct ConsensusHash(pub [u8; 20]); impl_array_newtype!(ConsensusHash, u8, 20); impl_array_hexstring_fmt!(ConsensusHash); diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 851c80b322..233fb3f23a 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -541,7 +541,11 @@ impl HeadersDB for TestSimHeadersDB { } } - fn get_vrf_seed_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + _bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } @@ -602,21 +606,37 @@ impl HeadersDB for TestSimHeadersDB { } } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { Some(MINER_ADDR.clone()) } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 2000) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 1000) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 3000) } diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 7a10503b87..affc0bb9d8 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -16,6 +16,7 @@ use std::collections::HashMap; +use clarity::types::chainstate::TenureBlockId; use clarity::vm::database::clarity_store::*; use clarity::vm::database::*; use clarity::vm::types::*; @@ -472,8 +473,8 @@ impl StacksChainState { // trying to store the same matured rewards for a common ancestor block. let cur_rewards = StacksChainState::inner_get_matured_miner_payments( tx, - parent_block_id, - child_block_id, + &(*parent_block_id).into(), + &(*child_block_id).into(), )?; if cur_rewards.len() > 0 { let mut present = false; @@ -608,11 +609,11 @@ impl StacksChainState { fn inner_get_matured_miner_payments( conn: &DBConn, - parent_block_id: &StacksBlockId, - child_block_id: &StacksBlockId, + parent_block_id: &TenureBlockId, + child_block_id: &TenureBlockId, ) -> Result, Error> { let sql = "SELECT * FROM matured_rewards WHERE parent_index_block_hash = ?1 AND child_index_block_hash = ?2 AND vtxindex = 0"; - let args: &[&dyn ToSql] = &[parent_block_id, child_block_id]; + let args: &[&dyn ToSql] = &[&parent_block_id.0, &child_block_id.0]; let ret: Vec = query_rows(conn, sql, args).map_err(|e| Error::DBError(e))?; Ok(ret) } @@ -621,8 +622,8 @@ impl StacksChainState { /// You'd be querying for the `child_block_id`'s reward. pub fn get_matured_miner_payment( conn: &DBConn, - parent_block_id: &StacksBlockId, - child_block_id: &StacksBlockId, + parent_block_id: &TenureBlockId, + child_block_id: &TenureBlockId, ) -> Result, Error> { let config = StacksChainState::load_db_config(conn)?; let ret = StacksChainState::inner_get_matured_miner_payments( @@ -643,8 +644,8 @@ impl StacksChainState { panic!("FATAL: got two parent rewards"); }; Ok(Some(reward)) - } else if child_block_id - == &StacksBlockHeader::make_index_block_hash( + } else if child_block_id.0 + == StacksBlockHeader::make_index_block_hash( &FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH, ) diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 69dddd742c..9033803325 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -357,8 +357,8 @@ fn test_bad_microblock_fees_pre_v210() { let matured_reward_opt = StacksChainState::get_matured_miner_payment( peer.chainstate().db(), - &parent_block_id, - &block_id, + &parent_block_id.into(), + &block_id.into(), ) .unwrap(); @@ -680,8 +680,8 @@ fn test_bad_microblock_fees_fix_transition() { let matured_reward_opt = StacksChainState::get_matured_miner_payment( peer.chainstate().db(), - &parent_block_id, - &block_id, + &parent_block_id.into(), + &block_id.into(), ) .unwrap(); diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 7e508602e3..89c507390c 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -666,7 +666,11 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { let conn = self.conn(); if let Some(_) = get_cli_block_height(&conn, id_bhh) { // mock it, but make it unique @@ -721,21 +725,37 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_miner_address( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant get_cli_block_height(&self.conn(), id_bhh).map(|_| 2000) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant get_cli_block_height(&self.conn(), id_bhh).map(|_| 1000) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // if the block is defined at all, then return a constant get_cli_block_height(&self.conn(), id_bhh).map(|_| 3000) } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 4fe4a9dcd9..4720539229 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -15,7 +15,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, TupleData}; use rusqlite::{Connection, OptionalExtension, Row, ToSql}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, - VRFSeed, + TenureBlockId, VRFSeed, }; use stacks_common::types::Address; use stacks_common::util::vrf::VRFProof; @@ -73,9 +73,13 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { } fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column_nakamoto(self.0, id_bhh, "timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed timestamp") - }) + get_stacks_header_column_from_table( + self.0, + id_bhh, + "timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed timestamp"), + true, + ) } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { @@ -87,37 +91,73 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { }) } - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "proof", |r| { - let proof = VRFProof::from_column(r, "proof").expect("FATAL: malformed proof"); - VRFSeed::from_proof(&proof) - }) + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { + ("vrf_proof", true) + } else { + ("proof", false) + }; + get_stacks_header_column_from_table( + self.0, + &tenure_id_bhh.0, + column_name, + &|r| { + let proof = VRFProof::from_column(r, column_name).expect("FATAL: malformed proof"); + VRFSeed::from_proof(&proof) + }, + nakamoto, + ) } - fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.0, id_bhh, "address", |r| { + fn get_miner_address( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + get_miner_column(self.0, &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); addr }) } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.0, id_bhh, "burnchain_sortition_burn", |r| { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + get_miner_column(self.0, &tenure_id_bhh, "burnchain_sortition_burn", |r| { u64::from_row(r).expect("FATAL: malformed sortition burn") }) .map(|x| x.into()) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.0, id_bhh, "burnchain_commit_burn", |r| { + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + get_miner_column(self.0, &tenure_id_bhh, "burnchain_commit_burn", |r| { u64::from_row(r).expect("FATAL: malformed commit burn") }) .map(|x| x.into()) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_matured_reward(self.0, id_bhh).map(|x| x.total().into()) + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + get_matured_reward(self.0, &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -153,9 +193,13 @@ impl<'a> HeadersDB for ChainstateTx<'a> { } fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column_nakamoto(self.deref().deref(), id_bhh, "timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed timestamp") - }) + get_stacks_header_column_from_table( + self.deref().deref(), + id_bhh, + "timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed timestamp"), + true, + ) } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { @@ -167,40 +211,79 @@ impl<'a> HeadersDB for ChainstateTx<'a> { }) } - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "proof", |r| { - let proof = VRFProof::from_column(r, "proof").expect("FATAL: malformed proof"); - VRFSeed::from_proof(&proof) - }) + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { + ("vrf_proof", true) + } else { + ("proof", false) + }; + get_stacks_header_column_from_table( + self.deref().deref(), + &tenure_id_bhh.0, + column_name, + &|r| { + let proof = VRFProof::from_column(r, column_name).expect("FATAL: malformed proof"); + VRFSeed::from_proof(&proof) + }, + nakamoto, + ) } - fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.deref().deref(), id_bhh, "address", |r| { + fn get_miner_address( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + get_miner_column(self.deref().deref(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); addr }) } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); get_miner_column( self.deref().deref(), - id_bhh, + &tenure_id_bhh, "burnchain_sortition_burn", |r| u64::from_row(r).expect("FATAL: malformed sortition burn"), ) .map(|x| x.into()) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.deref().deref(), id_bhh, "burnchain_commit_burn", |r| { - u64::from_row(r).expect("FATAL: malformed commit burn") - }) + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + get_miner_column( + self.deref().deref(), + &tenure_id_bhh, + "burnchain_commit_burn", + |r| u64::from_row(r).expect("FATAL: malformed commit burn"), + ) .map(|x| x.into()) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_matured_reward(self.deref().deref(), id_bhh).map(|x| x.total().into()) + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + get_matured_reward(self.deref().deref(), &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -236,9 +319,13 @@ impl HeadersDB for MARF { } fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column_nakamoto(self.sqlite_conn(), id_bhh, "timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed timestamp") - }) + get_stacks_header_column_from_table( + self.sqlite_conn(), + id_bhh, + "timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed timestamp"), + true, + ) } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { @@ -250,86 +337,108 @@ impl HeadersDB for MARF { }) } - fn get_vrf_seed_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "proof", |r| { - let proof = VRFProof::from_column(r, "proof").expect("FATAL: malformed proof"); - VRFSeed::from_proof(&proof) - }) + fn get_vrf_seed_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { + ("vrf_proof", true) + } else { + ("proof", false) + }; + get_stacks_header_column_from_table( + self.sqlite_conn(), + &tenure_id_bhh.0, + column_name, + &|r| { + let proof = VRFProof::from_column(r, column_name).expect("FATAL: malformed proof"); + VRFSeed::from_proof(&proof) + }, + nakamoto, + ) } - fn get_miner_address(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.sqlite_conn(), id_bhh, "address", |r| { + fn get_miner_address( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + get_miner_column(self.sqlite_conn(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); addr }) } - fn get_burnchain_tokens_spent_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burnchain_tokens_spent_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); get_miner_column( self.sqlite_conn(), - id_bhh, + &tenure_id_bhh, "burnchain_sortition_burn", |r| u64::from_row(r).expect("FATAL: malformed sortition burn"), ) .map(|x| x.into()) } - fn get_burnchain_tokens_spent_for_winning_block(&self, id_bhh: &StacksBlockId) -> Option { - get_miner_column(self.sqlite_conn(), id_bhh, "burnchain_commit_burn", |r| { - u64::from_row(r).expect("FATAL: malformed commit burn") - }) + fn get_burnchain_tokens_spent_for_winning_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + get_miner_column( + self.sqlite_conn(), + &tenure_id_bhh, + "burnchain_commit_burn", + |r| u64::from_row(r).expect("FATAL: malformed commit burn"), + ) .map(|x| x.into()) } - fn get_tokens_earned_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_matured_reward(self.sqlite_conn(), id_bhh).map(|x| x.total().into()) + fn get_tokens_earned_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + get_matured_reward(self.sqlite_conn(), &tenure_id_bhh, epoch).map(|x| x.total().into()) } } -fn get_stacks_header_column_internal( +/// Select a specific column from the headers table, specifying whether to use +/// the original block headers table or the Nakamoto block headers table. +pub fn get_stacks_header_column_from_table( conn: &DBConn, id_bhh: &StacksBlockId, column_name: &str, - loader: F, - nakamoto_only: bool, + loader: &F, + nakamoto: bool, ) -> Option where F: Fn(&Row) -> R, { let args: &[&dyn ToSql] = &[id_bhh]; - if !nakamoto_only { - if let Some(result) = conn - .query_row( - &format!( - "SELECT {} FROM block_headers WHERE index_block_hash = ?", - column_name - ), - args, - |x| Ok(loader(x)), - ) - .optional() - .unwrap_or_else(|_| { - panic!( - "Unexpected SQL failure querying block header table for '{}'", - column_name - ) - }) - { - return Some(result); - } - } - // if `nakamoto_only` or nothing was found in `block_headers`, try `nakamoto_block_headers` + let table_name = if nakamoto { + "nakamoto_block_headers" + } else { + "block_headers" + }; + conn.query_row( - &format!( - "SELECT {} FROM nakamoto_block_headers WHERE index_block_hash = ?", - column_name - ), + &format!("SELECT {column_name} FROM {table_name} WHERE index_block_hash = ?",), args, |x| Ok(loader(x)), ) .optional() - .unwrap_or_else(|_| { + .unwrap_or_else(|e| { panic!( "Unexpected SQL failure querying block header table for '{}'", column_name @@ -346,31 +455,60 @@ fn get_stacks_header_column( where F: Fn(&Row) -> R, { - get_stacks_header_column_internal(conn, id_bhh, column_name, loader, false) + match get_stacks_header_column_from_table(conn, id_bhh, column_name, &loader, false) { + Some(x) => Some(x), + None => get_stacks_header_column_from_table(conn, id_bhh, column_name, &loader, true), + } } -fn get_stacks_header_column_nakamoto( +fn get_first_block_in_tenure( conn: &DBConn, id_bhh: &StacksBlockId, - column_name: &str, - loader: F, -) -> Option -where - F: Fn(&Row) -> R, -{ - get_stacks_header_column_internal(conn, id_bhh, column_name, loader, true) + epoch: &StacksEpochId, +) -> TenureBlockId { + // Pre-nakamoto, each block is a tenure + if !epoch.uses_nakamoto_blocks() { + return id_bhh.clone().into(); + } + + let args: &[&dyn ToSql] = &[id_bhh]; + conn.query_row( + " + WITH BlockInTenure AS ( + SELECT consensus_hash + FROM nakamoto_block_headers + WHERE index_block_hash = ? + ) + SELECT nbh.index_block_hash + FROM nakamoto_block_headers nbh + JOIN BlockInTenure bit ON nbh.consensus_hash = bit.consensus_hash + WHERE nbh.block_height = ( + SELECT MIN(block_height) + FROM nakamoto_block_headers + WHERE consensus_hash = bit.consensus_hash + )", + args, + |x| { + Ok(StacksBlockId::from_column(x, "index_block_hash") + .expect("Bad index_block_hash in database") + .into()) + }, + ) + .unwrap_or_else(|e| { + panic!("Unexpected SQL failure querying block header table for 'index_block_hash'") + }) } fn get_miner_column( conn: &DBConn, - id_bhh: &StacksBlockId, + id_bhh: &TenureBlockId, column_name: &str, loader: F, ) -> Option where F: FnOnce(&Row) -> R, { - let args: &[&dyn ToSql] = &[id_bhh]; + let args: &[&dyn ToSql] = &[&id_bhh.0]; conn.query_row( &format!( "SELECT {} FROM payments WHERE index_block_hash = ? AND miner = 1", @@ -388,11 +526,20 @@ where }) } -fn get_matured_reward(conn: &DBConn, child_id_bhh: &StacksBlockId) -> Option { +fn get_matured_reward( + conn: &DBConn, + child_id_bhh: &TenureBlockId, + epoch: &StacksEpochId, +) -> Option { + let table_name = if epoch.uses_nakamoto_blocks() { + "nakamoto_block_headers" + } else { + "block_headers" + }; let parent_id_bhh = conn .query_row( - "SELECT parent_block_id FROM block_headers WHERE index_block_hash = ?", - [child_id_bhh].iter(), + &format!("SELECT parent_block_id FROM {table_name} WHERE index_block_hash = ?"), + [child_id_bhh.0].iter(), |x| { Ok(StacksBlockId::from_column(x, "parent_block_id") .expect("Bad parent_block_id in database")) @@ -402,7 +549,8 @@ fn get_matured_reward(conn: &DBConn, child_id_bhh: &StacksBlockId) -> Option. -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::net::ToSocketAddrs; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -25,7 +25,7 @@ use std::{env, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use clarity::vm::{ClarityVersion, Value}; +use clarity::vm::{ClarityName, ClarityVersion, Value}; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; @@ -49,7 +49,8 @@ use stacks::chainstate::stacks::boot::{ }; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ - BlockBuilder, BlockLimitFunction, TransactionEvent, TransactionResult, TransactionSuccessEvent, + self, BlockBuilder, BlockLimitFunction, TransactionEvent, TransactionResult, + TransactionSuccessEvent, }; use stacks::chainstate::stacks::{ SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangePayload, @@ -5417,3 +5418,542 @@ fn check_block_times() { run_loop_thread.join().unwrap(); } + +fn assert_block_info( + tuple0: &BTreeMap, + miner: &clarity::vm::Value, + miner_spend: &clarity::vm::Value, +) { + assert!(tuple0 + .get("burnchain-header-hash") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert!(tuple0 + .get("id-header-hash") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert!(tuple0 + .get("header-hash") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert_eq!( + &tuple0 + .get("miner-address") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + miner + ); + assert!(tuple0 + .get("time") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert!(tuple0 + .get("vrf-seed") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + assert!(tuple0 + .get("block-reward") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_none()); // not yet mature + assert_eq!( + &tuple0 + .get("miner-spend-total") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + miner_spend + ); + assert_eq!( + &tuple0 + .get("miner-spend-winner") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + miner_spend + ); +} + +#[test] +#[ignore] +/// Verify all properties in `get-block-info?`, `get-stacks-block-info?`, and `get-tenure-info?`. +fn check_block_info() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 3 * deploy_fee + (send_amt + send_fee) * 2, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut sender_nonce = 0; + + let miner = clarity::vm::Value::Principal( + PrincipalData::parse_standard_principal("ST25WA53N4PWF8XZGQH2J5A4CGCWV4JADPM8MHTRV") + .unwrap() + .into(), + ); + let miner_spend = clarity::vm::Value::UInt(20000); + + // Deploy this version with the Clarity 1 / 2 before epoch 3 + let contract0_name = "test-contract-0"; + let contract_clarity1 = "(define-read-only (get-info (height uint)) + { + burnchain-header-hash: (get-block-info? burnchain-header-hash height), + id-header-hash: (get-block-info? id-header-hash height), + header-hash: (get-block-info? header-hash height), + miner-address: (get-block-info? miner-address height), + time: (get-block-info? time height), + vrf-seed: (get-block-info? vrf-seed height), + block-reward: (get-block-info? block-reward height), + miner-spend-total: (get-block-info? miner-spend-total height), + miner-spend-winner: (get-block-info? miner-spend-winner height), + } + )"; + + let contract_tx0 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract0_name, + contract_clarity1, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx0); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + info!("Info from pre-epoch 3.0: {:?}", tuple0); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // This version uses the Clarity 1 / 2 function + let contract1_name = "test-contract-1"; + let contract_tx1 = make_contract_publish_versioned( + &sender_sk, + sender_nonce, + deploy_fee, + contract1_name, + contract_clarity1, + Some(ClarityVersion::Clarity2), + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx1); + + // This version uses the Clarity 3 functions + let contract3_name = "test-contract-3"; + let contract_clarity3 = "(define-read-only (get-block-info (height uint)) + { + id-header-hash: (get-stacks-block-info? id-header-hash height), + header-hash: (get-stacks-block-info? header-hash height), + time: (get-stacks-block-info? time height), + } + ) + (define-read-only (get-tenure-info (height uint)) + { + burnchain-header-hash: (get-tenure-info? burnchain-header-hash height), + miner-address: (get-tenure-info? miner-address height), + time: (get-tenure-info? time height), + vrf-seed: (get-tenure-info? vrf-seed height), + block-reward: (get-tenure-info? block-reward height), + miner-spend-total: (get-tenure-info? miner-spend-total height), + miner-spend-winner: (get-tenure-info? miner-spend-winner height), + } + )"; + + let contract_tx3 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract3_name, + contract_clarity3, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx3); + + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_block_info(&tuple0, &miner, &miner_spend); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_tenure0 = result3_tenure.expect_tuple().unwrap().data_map; + assert_eq!( + tuple3_tenure0.get("burnchain-header-hash"), + tuple0.get("burnchain-header-hash") + ); + assert_eq!( + tuple3_tenure0.get("miner-address"), + tuple0.get("miner-address") + ); + assert_eq!(tuple3_tenure0.get("time"), tuple0.get("time")); + assert_eq!(tuple3_tenure0.get("vrf-seed"), tuple0.get("vrf-seed")); + assert_eq!( + tuple3_tenure0.get("block-reward"), + tuple0.get("block-reward") + ); + assert_eq!( + tuple3_tenure0.get("miner-spend-total"), + tuple0.get("miner-spend-total") + ); + assert_eq!( + tuple3_tenure0.get("miner-spend-winner"), + tuple0.get("miner-spend-winner") + ); + + let result3_block = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_block1 = result3_block.expect_tuple().unwrap().data_map; + assert_eq!( + tuple3_block1.get("id-header-hash"), + tuple0.get("id-header-hash") + ); + assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); + assert!(tuple3_block1 + .get("time") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_block_info(&tuple0, &miner, &miner_spend); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_tenure1 = result3_tenure.expect_tuple().unwrap().data_map; + // There should have been a tenure change, so these should be different. + assert_ne!(tuple3_tenure0, tuple3_tenure1); + assert_eq!( + tuple3_tenure1.get("burnchain-header-hash"), + tuple0.get("burnchain-header-hash") + ); + assert_eq!( + tuple3_tenure1.get("miner-address"), + tuple0.get("miner-address") + ); + assert_eq!(tuple3_tenure1.get("time"), tuple0.get("time")); + assert_eq!(tuple3_tenure1.get("vrf-seed"), tuple0.get("vrf-seed")); + assert_eq!( + tuple3_tenure1.get("block-reward"), + tuple0.get("block-reward") + ); + assert_eq!( + tuple3_tenure1.get("miner-spend-total"), + tuple0.get("miner-spend-total") + ); + assert_eq!( + tuple3_tenure1.get("miner-spend-winner"), + tuple0.get("miner-spend-winner") + ); + + let result3_block = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_block2 = result3_block.expect_tuple().unwrap().data_map; + // There should have been a block change, so these should be different. + assert_ne!(tuple3_block1, tuple3_block2); + assert_eq!( + tuple3_block2.get("id-header-hash"), + tuple0.get("id-header-hash") + ); + assert_eq!(tuple3_block2.get("header-hash"), tuple0.get("header-hash")); + assert!(tuple3_block2 + .get("time") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_block_info(&tuple0, &miner, &miner_spend); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_tenure1a = result3_tenure.expect_tuple().unwrap().data_map; + assert_eq!(tuple3_tenure1, tuple3_tenure1a); + + let result3_block = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + ); + let tuple3_block3 = result3_block.expect_tuple().unwrap().data_map; + // There should have been a block change, so these should be different. + assert_ne!(tuple3_block3, tuple3_block2); + assert_eq!( + tuple3_block3.get("id-header-hash"), + tuple0.get("id-header-hash") + ); + assert_eq!(tuple3_block3.get("header-hash"), tuple0.get("header-hash")); + assert!(tuple3_block3 + .get("time") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .is_some()); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From 9ab01981f367bb00cd6bcc948855b6fdbde67a5c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 16 Jun 2024 12:30:23 -0400 Subject: [PATCH 0329/1400] refactor: avoid searching both headers DBs when possible --- clarity/src/vm/database/clarity_db.rs | 52 ++++-- clarity/src/vm/docs/mod.rs | 8 +- clarity/src/vm/test_util/mod.rs | 15 +- .../chainstate/stacks/boot/contract_tests.rs | 8 +- .../src/chainstate/stacks/db/unconfirmed.rs | 2 +- stackslib/src/clarity_cli.rs | 15 +- stackslib/src/clarity_vm/database/mod.rs | 163 ++++++++++++++---- .../src/tests/nakamoto_integrations.rs | 3 +- 8 files changed, 196 insertions(+), 70 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4bd6a9d5df..7c2dce309a 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -88,17 +88,26 @@ pub trait HeadersDB { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + epoch: &StacksEpochId, ) -> Option; fn get_burn_header_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option; fn get_vrf_seed_for_block( &self, id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option; - fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option; + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: Option<&StacksEpochId>, + ) -> Option; fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option; fn get_miner_address( &self, @@ -211,6 +220,7 @@ impl HeadersDB for NullHeadersDB { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -219,10 +229,18 @@ impl HeadersDB for NullHeadersDB { None } } - fn get_consensus_hash_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { None } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { Some(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64) @@ -230,7 +248,7 @@ impl HeadersDB for NullHeadersDB { None } } - fn get_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_stacks_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { None } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { @@ -939,8 +957,9 @@ impl<'a> ClarityDatabase<'a> { pub fn get_block_header_hash(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; self.headers_db - .get_stacks_block_header_hash_for_block(&id_bhh) + .get_stacks_block_header_hash_for_block(&id_bhh, &epoch) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } @@ -953,27 +972,21 @@ impl<'a> ClarityDatabase<'a> { Some(x) => x, None => self.get_index_block_header_hash(block_height)?, }; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; self.headers_db - .get_burn_block_time_for_block(&id_bhh) + .get_burn_block_time_for_block(&id_bhh, Some(&epoch)) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } pub fn get_block_time(&mut self, block_height: u32) -> Result { let id_bhh = self.get_index_block_header_hash(block_height)?; - let burn_block_height = self - .get_burnchain_block_height(&id_bhh) - .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()))?; - let epoch = self.get_stacks_epoch(burn_block_height).ok_or_else(|| { - InterpreterError::Expect( - format!("Failed to get epoch for block height {block_height}.)").into(), - ) - })?; - if !epoch.epoch_id.uses_nakamoto_blocks() { + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; + if !epoch.uses_nakamoto_blocks() { return self.get_burn_block_time(block_height, Some(id_bhh)); } self.headers_db - .get_block_time_for_block(&id_bhh) + .get_stacks_block_time_for_block(&id_bhh) .ok_or_else(|| InterpreterError::Expect("Failed to get block data.".into()).into()) } @@ -1009,12 +1022,13 @@ impl<'a> ClarityDatabase<'a> { // this is the StacksBlockId of the last block evaluated in this fork let parent_id_bhh = self.get_index_block_header_hash(current_stacks_height - 1)?; + let epoch = self.get_stacks_epoch_for_block(&parent_id_bhh)?; // infallible, since we always store the consensus hash with the StacksBlockId in the // headers DB let consensus_hash = self .headers_db - .get_consensus_hash_for_block(&parent_id_bhh) + .get_consensus_hash_for_block(&parent_id_bhh, &epoch) .ok_or_else(|| { InterpreterError::Expect(format!( "FATAL: no consensus hash found for StacksBlockId {}", diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 6026d1c2b7..3e1530de6f 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2815,10 +2815,14 @@ mod test { .unwrap(), ) } - fn get_burn_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + _id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { Some(1557860301) } - fn get_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { + fn get_stacks_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { Some(1557860302) } fn get_burn_block_height_for_block(&self, _id_bhh: &StacksBlockId) -> Option { diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 49f7d76c41..605180d005 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -146,6 +146,7 @@ impl HeadersDB for UnitTestHeaderDB { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -154,7 +155,11 @@ impl HeadersDB for UnitTestHeaderDB { None } } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { Some(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64) @@ -165,7 +170,7 @@ impl HeadersDB for UnitTestHeaderDB { Some(1 + 10 * (id_bhh.as_bytes()[0] as u64)) } } - fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { Some(1713799973 + 10 * (id_bhh.as_bytes()[0] as u64)) } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { @@ -184,7 +189,11 @@ impl HeadersDB for UnitTestHeaderDB { None } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { if *id_bhh == StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { Some(FIRST_BURNCHAIN_CONSENSUS_HASH) diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 233fb3f23a..fba155edd6 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -571,7 +571,11 @@ impl HeadersDB for TestSimHeadersDB { } } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { Some(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64) } else { @@ -583,7 +587,7 @@ impl HeadersDB for TestSimHeadersDB { } } - fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { let block_height = test_sim_hash_to_height(&id_bhh.0)?; Some(1713799973 + block_height) } diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index fc928fa196..f986503ec6 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -240,7 +240,7 @@ impl UnconfirmedState { .get_burn_block_height_for_block(&self.confirmed_chain_tip) .expect("BUG: unable to get burn block height based on chain tip"); let burn_block_timestamp = headers_db - .get_burn_block_time_for_block(&self.confirmed_chain_tip) + .get_burn_block_time_for_block(&self.confirmed_chain_tip, None) .expect("BUG: unable to get burn block timestamp based on chain tip"); let ast_rules = burn_dbconn.get_ast_rules(burn_block_height); diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 89c507390c..7d2b53eb0e 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -655,7 +655,11 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // mock it let conn = self.conn(); if let Some(_) = get_cli_block_height(&conn, id_bhh) { @@ -685,6 +689,7 @@ impl HeadersDB for CLIHeadersDB { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); if let Some(_) = get_cli_block_height(&conn, id_bhh) { @@ -698,7 +703,11 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + _epoch: Option<&StacksEpochId>, + ) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { Some((height * 600 + 1231006505) as u64) @@ -707,7 +716,7 @@ impl HeadersDB for CLIHeadersDB { } } - fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { let conn = self.conn(); if let Some(height) = get_cli_block_height(&conn, id_bhh) { Some((height * 10 + 1713799973) as u64) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 4720539229..ed60af1762 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -45,10 +45,17 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + epoch: &StacksEpochId, ) -> Option { - get_stacks_header_column(self.0, id_bhh, "block_hash", |r| { - BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") - }) + get_stacks_header_column_from_table( + self.0, + id_bhh, + "block_hash", + &|r| { + BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") + }, + epoch.uses_nakamoto_blocks(), + ) } fn get_burn_header_hash_for_block( @@ -60,19 +67,41 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { }) } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "consensus_hash", |r| { - ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash") - }) + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + get_stacks_header_column_from_table( + self.0, + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + epoch.uses_nakamoto_blocks(), + ) } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "burn_header_timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") - }) + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + epoch_opt: Option<&StacksEpochId>, + ) -> Option { + if let Some(epoch) = epoch_opt { + get_stacks_header_column_from_table( + self.0, + id_bhh, + "burn_header_timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed burn_header_timestamp"), + epoch.uses_nakamoto_blocks(), + ) + } else { + get_stacks_header_column(self.0, id_bhh, "burn_header_timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") + }) + } } - fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { get_stacks_header_column_from_table( self.0, id_bhh, @@ -165,10 +194,17 @@ impl<'a> HeadersDB for ChainstateTx<'a> { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + epoch: &StacksEpochId, ) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "block_hash", |r| { - BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") - }) + get_stacks_header_column_from_table( + self.deref().deref(), + id_bhh, + "block_hash", + &|r| { + BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") + }, + epoch.uses_nakamoto_blocks(), + ) } fn get_burn_header_hash_for_block( @@ -180,19 +216,41 @@ impl<'a> HeadersDB for ChainstateTx<'a> { }) } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "consensus_hash", |r| { - ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash") - }) + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + get_stacks_header_column_from_table( + self.deref().deref(), + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + epoch.uses_nakamoto_blocks(), + ) } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.deref().deref(), id_bhh, "burn_header_timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") - }) + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + epoch_opt: Option<&StacksEpochId>, + ) -> Option { + if let Some(epoch) = epoch_opt { + get_stacks_header_column_from_table( + self.deref().deref(), + id_bhh, + "burn_header_timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed burn_header_timestamp"), + epoch.uses_nakamoto_blocks(), + ) + } else { + get_stacks_header_column(self.deref().deref(), id_bhh, "burn_header_timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") + }) + } } - fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { get_stacks_header_column_from_table( self.deref().deref(), id_bhh, @@ -291,10 +349,17 @@ impl HeadersDB for MARF { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + epoch: &StacksEpochId, ) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "block_hash", |r| { - BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") - }) + get_stacks_header_column_from_table( + self.sqlite_conn(), + id_bhh, + "block_hash", + &|r| { + BlockHeaderHash::from_column(r, "block_hash").expect("FATAL: malformed block hash") + }, + epoch.uses_nakamoto_blocks(), + ) } fn get_burn_header_hash_for_block( @@ -306,19 +371,41 @@ impl HeadersDB for MARF { }) } - fn get_consensus_hash_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "consensus_hash", |r| { - ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash") - }) + fn get_consensus_hash_for_block( + &self, + id_bhh: &StacksBlockId, + epoch: &StacksEpochId, + ) -> Option { + get_stacks_header_column_from_table( + self.sqlite_conn(), + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + epoch.uses_nakamoto_blocks(), + ) } - fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.sqlite_conn(), id_bhh, "burn_header_timestamp", |r| { - u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") - }) + fn get_burn_block_time_for_block( + &self, + id_bhh: &StacksBlockId, + epoch_opt: Option<&StacksEpochId>, + ) -> Option { + if let Some(epoch) = epoch_opt { + get_stacks_header_column_from_table( + self.sqlite_conn(), + id_bhh, + "burn_header_timestamp", + &|r| u64::from_row(r).expect("FATAL: malformed burn_header_timestamp"), + epoch.uses_nakamoto_blocks(), + ) + } else { + get_stacks_header_column(self.sqlite_conn(), id_bhh, "burn_header_timestamp", |r| { + u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") + }) + } } - fn get_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { + fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { get_stacks_header_column_from_table( self.sqlite_conn(), id_bhh, @@ -438,7 +525,7 @@ where |x| Ok(loader(x)), ) .optional() - .unwrap_or_else(|e| { + .unwrap_or_else(|_| { panic!( "Unexpected SQL failure querying block header table for '{}'", column_name @@ -494,7 +581,7 @@ fn get_first_block_in_tenure( .into()) }, ) - .unwrap_or_else(|e| { + .unwrap_or_else(|_| { panic!("Unexpected SQL failure querying block header table for 'index_block_hash'") }) } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 48cc105290..69bb00ef2f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -49,8 +49,7 @@ use stacks::chainstate::stacks::boot::{ }; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ - self, BlockBuilder, BlockLimitFunction, TransactionEvent, TransactionResult, - TransactionSuccessEvent, + BlockBuilder, BlockLimitFunction, TransactionEvent, TransactionResult, TransactionSuccessEvent, }; use stacks::chainstate::stacks::{ SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangePayload, From 945515efa4549508115a0828c21647a7d2d21ec4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Jun 2024 08:30:33 -0400 Subject: [PATCH 0330/1400] fix: missed change from last refactor --- clarity/src/vm/docs/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 3e1530de6f..0c660d7e67 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2789,7 +2789,11 @@ mod test { ) -> Option { None } - fn get_consensus_hash_for_block(&self, _bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + _bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { Some(ConsensusHash([0; 20])) } fn get_vrf_seed_for_block( @@ -2807,6 +2811,7 @@ mod test { fn get_stacks_block_header_hash_for_block( &self, _id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { Some( BlockHeaderHash::from_hex( From da357507d497bbe4a6135af12ac7cc1a5283cd37 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Jun 2024 09:25:59 -0400 Subject: [PATCH 0331/1400] fix: missed change from last refactor --- stackslib/src/chainstate/stacks/boot/contract_tests.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index fba155edd6..1a47613c89 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -549,7 +549,11 @@ impl HeadersDB for TestSimHeadersDB { None } - fn get_consensus_hash_for_block(&self, bhh: &StacksBlockId) -> Option { + fn get_consensus_hash_for_block( + &self, + bhh: &StacksBlockId, + _epoch: &StacksEpochId, + ) -> Option { // capture the first 20 bytes of the block ID, which in this case captures the height and // fork ID. let mut bytes_20 = [0u8; 20]; @@ -560,6 +564,7 @@ impl HeadersDB for TestSimHeadersDB { fn get_stacks_block_header_hash_for_block( &self, id_bhh: &StacksBlockId, + _epoch: &StacksEpochId, ) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { Some(FIRST_STACKS_BLOCK_HASH) From 63946394047b4a59ed79e0db86bb096c63e40e90 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Jun 2024 09:31:44 -0400 Subject: [PATCH 0332/1400] fix: resolve errors after merge --- stackslib/src/chainstate/stacks/mod.rs | 2 +- stackslib/src/net/tests/relay/nakamoto.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 3c117de018..92da8ac283 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1723,7 +1723,7 @@ pub mod test { timestamp: get_epoch_time_secs(), miner_signature: MessageSignature::empty(), signer_signature: Vec::new(), - signer_bitvec: BitVec::ones(8).unwrap(), + pox_treatment: BitVec::ones(8).unwrap(), }; NakamotoBlock { diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 1c456e539e..4df3171474 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -399,6 +399,7 @@ fn test_buffer_data_message() { parent_block_id: StacksBlockId([0x03; 32]), tx_merkle_root: Sha512Trunc256Sum([0x05; 32]), state_index_root: TrieHash([0x07; 32]), + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![], pox_treatment: BitVec::zeros(1).unwrap(), From b4d88b745f8b6cb263fb5067cd4986a3b784c946 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Jun 2024 13:05:04 -0400 Subject: [PATCH 0333/1400] fix: get first block in tenure when epoch is not known --- stackslib/src/clarity_vm/database/mod.rs | 95 +++++++++++++++--------- 1 file changed, 59 insertions(+), 36 deletions(-) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index ed60af1762..ab859da00f 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -125,7 +125,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { ("vrf_proof", true) } else { @@ -148,7 +148,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); get_miner_column(self.0, &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); @@ -161,7 +161,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); get_miner_column(self.0, &tenure_id_bhh, "burnchain_sortition_burn", |r| { u64::from_row(r).expect("FATAL: malformed sortition burn") }) @@ -173,7 +173,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); get_miner_column(self.0, &tenure_id_bhh, "burnchain_commit_burn", |r| { u64::from_row(r).expect("FATAL: malformed commit burn") }) @@ -185,7 +185,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); get_matured_reward(self.0, &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -274,7 +274,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { ("vrf_proof", true) } else { @@ -297,7 +297,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); get_miner_column(self.deref().deref(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); @@ -310,7 +310,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); get_miner_column( self.deref().deref(), &tenure_id_bhh, @@ -325,7 +325,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); get_miner_column( self.deref().deref(), &tenure_id_bhh, @@ -340,7 +340,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); get_matured_reward(self.deref().deref(), &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -429,7 +429,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { ("vrf_proof", true) } else { @@ -452,7 +452,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); get_miner_column(self.sqlite_conn(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); @@ -465,7 +465,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); get_miner_column( self.sqlite_conn(), &tenure_id_bhh, @@ -480,7 +480,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); get_miner_column( self.sqlite_conn(), &tenure_id_bhh, @@ -495,7 +495,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, epoch); + let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); get_matured_reward(self.sqlite_conn(), &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -551,29 +551,52 @@ where fn get_first_block_in_tenure( conn: &DBConn, id_bhh: &StacksBlockId, - epoch: &StacksEpochId, + epoch_opt: Option<&StacksEpochId>, ) -> TenureBlockId { - // Pre-nakamoto, each block is a tenure - if !epoch.uses_nakamoto_blocks() { - return id_bhh.clone().into(); - } - - let args: &[&dyn ToSql] = &[id_bhh]; + let consensus_hash = match epoch_opt { + Some(epoch) => { + if !epoch.uses_nakamoto_blocks() { + return id_bhh.clone().into(); + } else { + get_stacks_header_column_from_table( + conn, + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + true, + ) + } + } + None => { + if let Some(ch) = get_stacks_header_column_from_table( + conn, + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + false, + ) { + return id_bhh.clone().into(); + } else { + get_stacks_header_column_from_table( + conn, + id_bhh, + "consensus_hash", + &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), + true, + ) + } + } + }; + let ch = consensus_hash + .expect("Unexpected SQL failure querying block header table for 'consensus_hash'"); + let args: &[&dyn ToSql] = &[&ch]; conn.query_row( " - WITH BlockInTenure AS ( - SELECT consensus_hash - FROM nakamoto_block_headers - WHERE index_block_hash = ? - ) - SELECT nbh.index_block_hash - FROM nakamoto_block_headers nbh - JOIN BlockInTenure bit ON nbh.consensus_hash = bit.consensus_hash - WHERE nbh.block_height = ( - SELECT MIN(block_height) - FROM nakamoto_block_headers - WHERE consensus_hash = bit.consensus_hash - )", + SELECT index_block_hash + FROM nakamoto_block_headers + WHERE consensus_hash = ? + ORDER BY block_height ASC + LIMIT 1;", args, |x| { Ok(StacksBlockId::from_column(x, "index_block_hash") @@ -636,7 +659,7 @@ fn get_matured_reward( .expect("Unexpected SQL failure querying parent block ID"); if let Some(parent_id_bhh) = parent_id_bhh { - let parent_tenure_id = get_first_block_in_tenure(conn, &parent_id_bhh, epoch); + let parent_tenure_id = get_first_block_in_tenure(conn, &parent_id_bhh, None); StacksChainState::get_matured_miner_payment(conn, &parent_tenure_id, child_id_bhh) .expect("Unexpected SQL failure querying miner reward table") } else { From 1fbbdf1db03115b9bb50162d18a5f985fc97dd5a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Jun 2024 13:26:59 -0400 Subject: [PATCH 0334/1400] test: add test for `block-reward` property This is separated out as a "slow" test because it requires mining 100 blocks to wait for the rewards to mature. --- .github/workflows/slow-tests.yml | 1 + .../src/tests/nakamoto_integrations.rs | 344 ++++++++++++++++++ 2 files changed, 345 insertions(+) diff --git a/.github/workflows/slow-tests.yml b/.github/workflows/slow-tests.yml index bce6a15a1f..02c5bdf552 100644 --- a/.github/workflows/slow-tests.yml +++ b/.github/workflows/slow-tests.yml @@ -32,6 +32,7 @@ jobs: test-name: - tests::epoch_21::test_pox_reorg_flap_duel - tests::epoch_21::test_pox_reorg_flap_reward_cycles + - tests::nakamoto_integrations::check_block_info_rewards steps: ## Setup test environment - name: Setup Test Environment diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 59f1f3c1ca..c75f18d619 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5956,3 +5956,347 @@ fn check_block_info() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// Verify `block-reward` property in `get-block-info?` and `get-tenure-info?`. +/// This test is separated from `check_block_info` above because it needs to +/// mine 100+ blocks to mature the block reward, so it is slow. +fn check_block_info_rewards() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signers = TestSigners::default(); + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let deploy_fee = 3000; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 3 * deploy_fee + (send_amt + send_fee) * 2, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_vrfs: vrfs_submitted, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut sender_nonce = 0; + + // Deploy this version with the Clarity 1 / 2 before epoch 3 + let contract0_name = "test-contract-0"; + let contract_clarity1 = "(define-read-only (get-info (height uint)) + { + burnchain-header-hash: (get-block-info? burnchain-header-hash height), + id-header-hash: (get-block-info? id-header-hash height), + header-hash: (get-block-info? header-hash height), + miner-address: (get-block-info? miner-address height), + time: (get-block-info? time height), + vrf-seed: (get-block-info? vrf-seed height), + block-reward: (get-block-info? block-reward height), + miner-spend-total: (get-block-info? miner-spend-total height), + miner-spend-winner: (get-block-info? miner-spend-winner height), + } + )"; + + let contract_tx0 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract0_name, + contract_clarity1, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx0); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(1)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + info!("Info from pre-epoch 3.0: {:?}", tuple0); + + // first block wakes up the run loop, wait until a key registration has been submitted. + next_block_and(&mut btc_regtest_controller, 60, || { + let vrf_count = vrfs_submitted.load(Ordering::SeqCst); + Ok(vrf_count >= 1) + }) + .unwrap(); + + // second block should confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // This version uses the Clarity 1 / 2 function + let contract1_name = "test-contract-1"; + let contract_tx1 = make_contract_publish_versioned( + &sender_sk, + sender_nonce, + deploy_fee, + contract1_name, + contract_clarity1, + Some(ClarityVersion::Clarity2), + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx1); + + // This version uses the Clarity 3 functions + let contract3_name = "test-contract-3"; + let contract_clarity3 = "(define-read-only (get-tenure-info (height uint)) + { + burnchain-header-hash: (get-tenure-info? burnchain-header-hash height), + miner-address: (get-tenure-info? miner-address height), + time: (get-tenure-info? time height), + vrf-seed: (get-tenure-info? vrf-seed height), + block-reward: (get-tenure-info? block-reward height), + miner-spend-total: (get-tenure-info? miner-spend-total height), + miner-spend-winner: (get-tenure-info? miner-spend-winner height), + } + )"; + + let contract_tx3 = make_contract_publish( + &sender_sk, + sender_nonce, + deploy_fee, + contract3_name, + contract_clarity3, + ); + sender_nonce += 1; + submit_tx(&http_origin, &contract_tx3); + + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + // Sleep to ensure the seconds have changed + thread::sleep(Duration::from_secs(1)); + + // Mine a Nakamoto block + info!("Mining Nakamoto block"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + // Mining 100 blocks takes a while, so only run this test if the env var is set + let last_nakamoto_block = last_stacks_block_height; + + // Mine 100+ burn blocks to get the block reward matured + info!("Mining 102 tenures to mature the block reward"); + for i in 0..102 { + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= commits_before + 1) + }) + .unwrap(); + info!("Mined a block ({i})"); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + info!("Chain info: {:?}", info); + let last_stacks_block_height = info.stacks_tip_height as u128; + + // Check the block reward is now matured in one of the tenure-change blocks + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 100)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_eq!( + tuple0 + .get("block-reward") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + Value::UInt(2040806360) + ); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 100)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 100)], + ); + let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; + assert_eq!( + tuple3_tenure.get("block-reward"), + tuple0.get("block-reward") + ); + + // Check the block reward is now matured in one of the Nakamoto blocks + let result0 = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_nakamoto_block)], + ); + let tuple0 = result0.expect_tuple().unwrap().data_map; + assert_eq!( + tuple0 + .get("block-reward") + .unwrap() + .clone() + .expect_optional() + .unwrap() + .unwrap(), + Value::UInt(3061200000) + ); + + let result1 = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-info", + vec![&clarity::vm::Value::UInt(last_nakamoto_block)], + ); + let tuple1 = result1.expect_tuple().unwrap().data_map; + assert_eq!(tuple0, tuple1); + + let result3_tenure = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(last_nakamoto_block)], + ); + let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; + assert_eq!( + tuple3_tenure.get("block-reward"), + tuple0.get("block-reward") + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From 8281f17150ce6f8337868d503690199643e20768 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 17 Jun 2024 13:31:00 -0400 Subject: [PATCH 0335/1400] CRC: use the last snapshot with a sortition to determine if we should proceed with a continue_tenure Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 14 ++++++- .../src/tests/nakamoto_integrations.rs | 42 +++++++++++++++++-- 2 files changed, 50 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ddad31ad72..0552851437 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -698,10 +698,20 @@ impl RelayerThread { return Ok(()); }; let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); - if block_election_snapshot.miner_pk_hash != Some(mining_pkh) { + + let last_winner_snapshot = { + let ih = self.sortdb.index_handle(&burn_tip.sortition_id); + ih.get_last_snapshot_with_sortition(burn_tip.block_height) + .map_err(|e| { + error!("Relayer: failed to get last snapshot with sortition: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + }; + + if last_winner_snapshot.miner_pk_hash != Some(mining_pkh) { debug!("Relayer: the miner did not win the last sortition. No tenure to continue."; "current_mining_pkh" => %mining_pkh, - "block_snapshot.miner_pk_hash" => ?block_election_snapshot.miner_pk_hash, + "last_winner_snapshot.miner_pk_hash" => ?last_winner_snapshot.miner_pk_hash, ); return Ok(()); } else { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d9045a820c..33daed875b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -52,9 +52,10 @@ use stacks::chainstate::stacks::miner::{ BlockBuilder, BlockLimitFunction, TransactionEvent, TransactionResult, TransactionSuccessEvent, }; use stacks::chainstate::stacks::{ - SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangePayload, - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionPublicKeyEncoding, TransactionSpendingCondition, TransactionVersion, MAX_BLOCK_LEN, + SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangeCause, + TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, + TransactionVersion, MAX_BLOCK_LEN, }; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ @@ -5233,6 +5234,40 @@ fn continue_tenure_extend() { .unwrap() .unwrap(); + // assert that the tenure extend tx was observed + let extend_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| { + let raw_tx = tx_json["raw_tx"].as_str().unwrap(); + if raw_tx == "0x00" { + return false; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match parsed.payload { + TransactionPayload::TenureChange(payload) => { + if payload.cause == TenureChangeCause::Extended { + return true; + } + } + _ => {} + }; + false + }) + .is_some() + }) + .is_some(); + assert!( + extend_tx_included, + "Nakamoto node failed to include the tenure extend tx" + ); + // assert that the transfer tx was observed let transfer_tx_included = test_observer::get_blocks() .into_iter() @@ -5245,7 +5280,6 @@ fn continue_tenure_extend() { .is_some() }) .is_some(); - assert!( transfer_tx_included, "Nakamoto node failed to include the transfer tx" From 11a6262651a82ad4d64f4a1148e9badfee209b7f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 17 Jun 2024 21:13:11 -0400 Subject: [PATCH 0336/1400] CRC: update continue tenure extend to include a check for block found tenure reasons Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 82 +++++++++---------- 1 file changed, 37 insertions(+), 45 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 33daed875b..4b74f6af8b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5010,10 +5010,13 @@ fn signer_chainstate() { /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches /// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop -/// struct handles the epoch-2/3 tear-down and spin-up. +/// struct handles the epoch-2/3 tear-down and spin-up. It mines a regular Nakamoto tenure +/// before pausing the commit op to produce an empty sortition, forcing a tenure extend. +/// Commit ops are resumed, and an additional 15 nakamoto tenures mined. /// This test makes three assertions: -/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * 15 blocks are mined after 3.0 starts. /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * A tenure extend transaction was successfully mined in 3.0 /// * The final chain tip is a nakamoto block fn continue_tenure_extend() { if env::var("BITCOIND_TEST") != Ok("1".into()) { @@ -5235,58 +5238,47 @@ fn continue_tenure_extend() { .unwrap(); // assert that the tenure extend tx was observed - let extend_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| { - let raw_tx = tx_json["raw_tx"].as_str().unwrap(); - if raw_tx == "0x00" { - return false; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = - StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match parsed.payload { - TransactionPayload::TenureChange(payload) => { - if payload.cause == TenureChangeCause::Extended { - return true; - } - } - _ => {} - }; - false - }) - .is_some() - }) - .is_some(); + let mut tenure_extends = vec![]; + let mut tenure_block_founds = vec![]; + let mut transfer_tx_included = false; + for block in test_observer::get_blocks() { + for tx in block["transactions"].as_array().unwrap() { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + if raw_tx == &transfer_tx_hex { + transfer_tx_included = true; + continue; + } + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => match payload.cause { + TenureChangeCause::Extended => tenure_extends.push(parsed), + TenureChangeCause::BlockFound => tenure_block_founds.push(parsed), + }, + _ => {} + }; + } + } assert!( - extend_tx_included, - "Nakamoto node failed to include the tenure extend tx" + !tenure_extends.is_empty(), + "Nakamoto node failed to include the tenure extend txs" + ); + + assert!( + tenure_block_founds.len() >= 17 - tenure_extends.len(), + "Nakamoto node failed to include the block found tx per winning sortition" ); - // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); assert!( transfer_tx_included, "Nakamoto node failed to include the transfer tx" ); assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); - assert!(tip.stacks_block_height >= block_height_pre_3_0 + 15); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 17); // make sure prometheus returns an updated height #[cfg(feature = "monitoring_prom")] From f460834a2b70b31e7b54e2fed97303e1372f8cff Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 18 Jun 2024 14:21:55 +0300 Subject: [PATCH 0337/1400] Remove types from `err_Commands` `check` functions This update simplifies the err_Commands generators file. --- .../tests/pox-4/err_Commands.ts | 245 ++++-------------- 1 file changed, 49 insertions(+), 196 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index c24c028f8d..8cac237fc7 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -73,10 +73,7 @@ export function ErrCommands( r.authId, r.period, r.margin, - function ( - this: StackStxAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -111,10 +108,7 @@ export function ErrCommands( r.authId, r.period, r.margin, - function ( - this: StackStxAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -149,10 +143,7 @@ export function ErrCommands( r.authId, r.period, r.margin, - function ( - this: StackStxAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -187,10 +178,7 @@ export function ErrCommands( r.authId, r.period, r.margin, - function ( - this: StackStxSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -225,10 +213,7 @@ export function ErrCommands( r.authId, r.period, r.margin, - function ( - this: StackStxSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -263,10 +248,7 @@ export function ErrCommands( r.authId, r.period, r.margin, - function ( - this: StackStxSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -292,10 +274,7 @@ export function ErrCommands( ) => new RevokeDelegateStxCommand_Err( r.wallet, - function ( - this: RevokeDelegateStxCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -330,10 +309,7 @@ export function ErrCommands( r.delegateTo, r.untilBurnHt, r.amount, - function ( - this: DelegateStxCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -357,10 +333,7 @@ export function ErrCommands( new StackAggregationCommitSigCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -386,10 +359,7 @@ export function ErrCommands( new StackAggregationCommitSigCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -415,10 +385,7 @@ export function ErrCommands( new StackAggregationCommitSigCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -443,10 +410,7 @@ export function ErrCommands( new StackAggregationCommitAuthCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -472,10 +436,7 @@ export function ErrCommands( new StackAggregationCommitAuthCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -501,10 +462,7 @@ export function ErrCommands( new StackAggregationCommitAuthCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -529,10 +487,7 @@ export function ErrCommands( new StackAggregationCommitIndexedSigCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitIndexedSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -558,10 +513,7 @@ export function ErrCommands( new StackAggregationCommitIndexedSigCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitIndexedSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -587,10 +539,7 @@ export function ErrCommands( new StackAggregationCommitIndexedSigCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitIndexedSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -615,10 +564,7 @@ export function ErrCommands( new StackAggregationCommitIndexedAuthCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitIndexedAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -644,10 +590,7 @@ export function ErrCommands( new StackAggregationCommitIndexedAuthCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitIndexedAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -672,10 +615,7 @@ export function ErrCommands( new StackAggregationCommitIndexedAuthCommand_Err( r.wallet, r.authId, - function ( - this: StackAggregationCommitIndexedAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( @@ -715,10 +655,7 @@ export function ErrCommands( r.wallet, r.rewardCycleIndex, r.authId, - function ( - this: StackAggregationIncreaseCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( operator.lockedAddresses.length > 0 && @@ -782,10 +719,7 @@ export function ErrCommands( finalResult.period, finalResult.amount, finalResult.unlockBurnHt, - function ( - this: DelegateStackStxCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( @@ -859,10 +793,7 @@ export function ErrCommands( finalResult.period, finalResult.amount, finalResult.unlockBurnHt, - function ( - this: DelegateStackStxCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( @@ -933,10 +864,7 @@ export function ErrCommands( finalResult.period, finalResult.amount, finalResult.unlockBurnHt, - function ( - this: DelegateStackStxCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( @@ -970,10 +898,7 @@ export function ErrCommands( r.operator, r.increaseBy, r.authId, - function ( - this: StackIncreaseSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -1004,10 +929,7 @@ export function ErrCommands( r.operator, r.increaseBy, r.authId, - function ( - this: StackIncreaseSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -1038,10 +960,7 @@ export function ErrCommands( r.operator, r.increaseBy, r.authId, - function ( - this: StackIncreaseSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -1072,10 +991,7 @@ export function ErrCommands( r.operator, r.increaseBy, r.authId, - function ( - this: StackIncreaseAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -1106,10 +1022,7 @@ export function ErrCommands( r.operator, r.increaseBy, r.authId, - function ( - this: StackIncreaseAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -1140,10 +1053,7 @@ export function ErrCommands( r.operator, r.increaseBy, r.authId, - function ( - this: StackIncreaseAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( model.stackingMinimum > 0 && @@ -1181,10 +1091,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1233,10 +1140,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1285,10 +1189,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1337,10 +1238,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1389,10 +1287,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendSigCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1441,10 +1336,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1493,10 +1385,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1545,10 +1434,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1597,10 +1483,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1649,10 +1532,7 @@ export function ErrCommands( r.extendCount, r.authId, r.currentCycle, - function ( - this: StackExtendAuthCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; const firstRewardCycle = @@ -1714,10 +1594,7 @@ export function ErrCommands( final.stacker, final.extendCount, final.currentCycle, - function ( - this: DelegateStackExtendCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get( this.operator.stxAddress, )!; @@ -1796,10 +1673,7 @@ export function ErrCommands( final.stacker, final.extendCount, final.currentCycle, - function ( - this: DelegateStackExtendCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get( this.operator.stxAddress, )!; @@ -1877,10 +1751,7 @@ export function ErrCommands( final.stacker, final.extendCount, final.currentCycle, - function ( - this: DelegateStackExtendCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get( this.operator.stxAddress, )!; @@ -1938,10 +1809,7 @@ export function ErrCommands( final.stacker, final.extendCount, final.currentCycle, - function ( - this: DelegateStackExtendCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get( this.operator.stxAddress, )!; @@ -2016,10 +1884,7 @@ export function ErrCommands( final.operator, final.stacker, final.increaseBy, - function ( - this: DelegateStackIncreaseCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get( this.operator.stxAddress, )!; @@ -2080,10 +1945,7 @@ export function ErrCommands( final.operator, final.stacker, final.increaseBy, - function ( - this: DelegateStackIncreaseCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get( this.operator.stxAddress, )!; @@ -2142,10 +2004,7 @@ export function ErrCommands( final.operator, final.stacker, final.increaseBy, - function ( - this: DelegateStackIncreaseCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get( this.operator.stxAddress, )!; @@ -2209,10 +2068,7 @@ export function ErrCommands( final.operator, final.stacker, final.increaseBy, - function ( - this: DelegateStackIncreaseCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const operatorWallet = model.stackers.get( this.operator.stxAddress, )!; @@ -2253,10 +2109,7 @@ export function ErrCommands( new DisallowContractCallerCommand_Err( r.stacker, r.callerToDisallow, - function ( - this: DisallowContractCallerCommand_Err, - model: Readonly, - ): boolean { + function (this, model) { const stacker = model.stackers.get(this.stacker.stxAddress)!; const callerToDisallow = model.stackers.get( this.callerToDisallow.stxAddress, From 7859b7112f181e702ef52d2ef933b3c5c1c26b88 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 18 Jun 2024 09:52:23 -0500 Subject: [PATCH 0338/1400] fix: signer slots count in staging --- stacks-common/src/libcommon.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index fef8f0bbad..b93c78c50f 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -63,5 +63,20 @@ pub mod consts { /// The number of StackerDB slots each signing key needs /// to use to participate in DKG and block validation signing. - pub const SIGNER_SLOTS_PER_USER: u32 = 14; + pub const SIGNER_SLOTS_PER_USER: u32 = 13; +} + +/// This test asserts that the constant above doesn't change. +/// This exists because the constant above is used by Epoch 2.5 instantiation code. +/// +/// Adding more slots will require instantiating more .signers contracts through either +/// consensus changes (i.e., a new epoch) or through non-consensus-critical contract +/// deployments. +#[test] +fn signer_slots_count_2_5() { + assert_eq!( + consts::SIGNER_SLOTS_PER_USER, + 13, + "The .signers-x-y contracts in Epoch 2.5 were instantiated with 13 slots" + ); } From 28a48b8d9aab601c5b2134d2798d7652b086f705 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 18 Jun 2024 11:01:46 -0500 Subject: [PATCH 0339/1400] test: update tests --- libsigner/src/v1/messages.rs | 2 ++ stacks-signer/src/client/stacks_client.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/libsigner/src/v1/messages.rs b/libsigner/src/v1/messages.rs index c345cc3e13..30bd4a5769 100644 --- a/libsigner/src/v1/messages.rs +++ b/libsigner/src/v1/messages.rs @@ -1464,6 +1464,8 @@ mod test { use super::{StacksMessageCodecExtensions, *}; #[test] + #[should_panic] + // V1 signer slots do not have enough slots in Epoch 2.5. Something will need to be updated! fn signer_slots_count_is_sane() { let slot_identifiers_len = MessageSlotID::ALL.len(); assert!( diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 17a5916f6f..d85f6d9b7a 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1205,7 +1205,7 @@ mod tests { let principal_data = StacksAddress::from_string(signer).unwrap().into(); let data_map = [ - ("num-slots".into(), ClarityValue::UInt(14)), + ("num-slots".into(), ClarityValue::UInt(13)), ( "signer".into(), ClarityValue::Principal(PrincipalData::Standard(principal_data)), From 212fa51bc171bc55a1f476e47d5d882d29f543fc Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 18 Jun 2024 11:56:48 -0500 Subject: [PATCH 0340/1400] test: miner correctly sets bitvec to reward set length in naka_node now --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c630776d5c..d87f1256f4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2309,7 +2309,7 @@ fn miner_writes_proposed_block_to_stackerdb() { let signer_bitvec = BitVec::<4000>::consensus_deserialize(&mut signer_bitvec_bytes.as_slice()) .expect("Failed to deserialize signer bitvec"); - assert_eq!(signer_bitvec.len(), 1); + assert_eq!(signer_bitvec.len(), 30); assert_eq!( format!("0x{}", observed_block.block_hash), From 452d7bd89dda08d5af27167cfc951250121b148c Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 20 Jun 2024 14:58:34 +0300 Subject: [PATCH 0341/1400] Add an additional condition inside `DelegateStackIncreaseCommand` The added condition ensures that an active lock does not expire at the end of the current cycle, and avoids the PoX-4 Clarity error 2 - ERR_STACKING_INVALID_LOCK_PERIOD --- .../tests/pox-4/pox_DelegateStackIncreaseCommand.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts index 6a5837f48b..b78fe187bb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts @@ -8,6 +8,7 @@ import { import { poxAddressToTuple } from "@stacks/stacking"; import { expect } from "vitest"; import { Cl } from "@stacks/transactions"; +import { REWARD_CYCLE_LENGTH } from "./pox_Commands.ts"; /** * The `DelegateStackIncreaseCommand` allows a pool operator to @@ -69,7 +70,8 @@ export class DelegateStackIncreaseCommand implements PoxCommand { stackerWallet.amountUnlocked >= this.increaseBy && stackerWallet.delegatedMaxAmount >= this.increaseBy + stackerWallet.amountLocked && - operatorWallet.lockedAddresses.indexOf(this.stacker.stxAddress) > -1 + operatorWallet.lockedAddresses.indexOf(this.stacker.stxAddress) > -1 && + stackerWallet.unlockHeight > model.burnBlockHeight + REWARD_CYCLE_LENGTH ); } From 90d4a2bb418abee7b08c83be38df1b0c94a09c39 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 20 Jun 2024 15:01:37 +0300 Subject: [PATCH 0342/1400] Fix typo in `DelegateStackStxCommand_Err` logging --- .../tests/pox-4/pox_DelegateStackStxCommand_Err.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts index d064d8b4cd..fdec28a355 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand_Err.ts @@ -86,7 +86,7 @@ export class DelegateStackStxCommand_Err implements PoxCommand { // test to pass but it is useful for debugging and eyeballing the test. logCommand( `₿ ${model.burnBlockHeight}`, - `✓ ${this.operator.label} Ӿ ${this.stacker.label}`, + `✗ ${this.operator.label} Ӿ ${this.stacker.label}`, "delegate-stack-stx", "lock-amount", this.amountUstx.toString(), From fa5b78f919b440db2f6b058bbd545f1bb6b50a83 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 20 Jun 2024 15:17:45 +0300 Subject: [PATCH 0343/1400] Update the err_Commands check functions to return the false first This commit refactors all the unhappy path check functions, returning early the false and removing the else branch. --- .../tests/pox-4/err_Commands.ts | 1172 +++++++++-------- 1 file changed, 590 insertions(+), 582 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 8cac237fc7..3d9738fefb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -76,15 +76,15 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - !stacker.hasDelegated - ) { - model.trackCommandRun( - "StackStxAuthCommand_Err_Stacking_Already_Stacked_1", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + stacker.hasDelegated + ) return false; + + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Stacked_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, ) @@ -111,15 +111,15 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.hasDelegated - ) { - model.trackCommandRun( - "StackStxAuthCommand_Err_Stacking_Already_Stacked_2", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.hasDelegated + ) return false; + + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Stacked_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, ) @@ -146,15 +146,15 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - !stacker.isStacking && - stacker.hasDelegated - ) { - model.trackCommandRun( - "StackStxAuthCommand_Err_Stacking_Already_Delegated", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + stacker.isStacking || + !stacker.hasDelegated + ) return false; + + model.trackCommandRun( + "StackStxAuthCommand_Err_Stacking_Already_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) @@ -181,15 +181,15 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - !stacker.hasDelegated - ) { - model.trackCommandRun( - "StackStxSigCommand_Err_Stacking_Already_Stacked_1", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + stacker.hasDelegated + ) return false; + + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Stacked_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, ) @@ -216,15 +216,15 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.hasDelegated - ) { - model.trackCommandRun( - "StackStxSigCommand_Err_Stacking_Already_Stacked_2", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.hasDelegated + ) return false; + + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Stacked_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_STACKED, ) @@ -251,15 +251,15 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - !stacker.isStacking && - stacker.hasDelegated - ) { - model.trackCommandRun( - "StackStxSigCommand_Err_Stacking_Already_Delegated", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + stacker.isStacking || + !stacker.hasDelegated + ) return false; + + model.trackCommandRun( + "StackStxSigCommand_Err_Stacking_Already_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) @@ -277,14 +277,14 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - !stacker.hasDelegated - ) { - model.trackCommandRun( - "RevokeDelegateStxCommand_Err_Delegation_Already_Revoked", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + stacker.hasDelegated + ) return false; + + model.trackCommandRun( + "RevokeDelegateStxCommand_Err_Delegation_Already_Revoked", + ); + return true; }, POX_4_ERRORS.ERR_DELEGATION_ALREADY_REVOKED, ) @@ -312,14 +312,14 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.hasDelegated - ) { - model.trackCommandRun( - "DelegateStxCommand_Err_Stacking_Already_Delegated", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.hasDelegated + ) return false; + + model.trackCommandRun( + "DelegateStxCommand_Err_Stacking_Already_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ) @@ -337,15 +337,15 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - !(operator.amountToCommit >= model.stackingMinimum) && - operator.amountToCommit > 0 - ) { - model.trackCommandRun( - "StackAggregationCommitSigCommand_Err_Stacking_Threshold_Not_Met", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + operator.amountToCommit >= model.stackingMinimum || + !(operator.amountToCommit > 0) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, ), @@ -363,15 +363,15 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - !(operator.amountToCommit >= model.stackingMinimum) && - operator.amountToCommit === 0 - ) { - model.trackCommandRun( - "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + (operator.amountToCommit >= model.stackingMinimum) || + operator.amountToCommit !== 0 + ) return false; + + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -389,14 +389,14 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) && - !(operator.amountToCommit >= model.stackingMinimum) - ) { - model.trackCommandRun( - "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_2", - ); - return true; - } else return false; + operator.lockedAddresses.length > 0 || + operator.amountToCommit >= model.stackingMinimum + ) return false; + + model.trackCommandRun( + "StackAggregationCommitSigCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -414,15 +414,15 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - !(operator.amountToCommit >= model.stackingMinimum) && - operator.amountToCommit > 0 - ) { - model.trackCommandRun( - "StackAggregationCommitAuthCommand_Err_Stacking_Threshold_Not_Met", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + operator.amountToCommit >= model.stackingMinimum || + !(operator.amountToCommit > 0) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, ), @@ -440,15 +440,15 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - !(operator.amountToCommit >= model.stackingMinimum) && - operator.amountToCommit === 0 - ) { - model.trackCommandRun( - "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_1", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + operator.amountToCommit >= model.stackingMinimum || + !(operator.amountToCommit === 0) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -466,14 +466,14 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) && - !(operator.amountToCommit >= model.stackingMinimum) - ) { - model.trackCommandRun( - "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_2", - ); - return true; - } else return false; + operator.lockedAddresses.length > 0 || + operator.amountToCommit >= model.stackingMinimum + ) return false; + + model.trackCommandRun( + "StackAggregationCommitAuthCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -491,15 +491,15 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - !(operator.amountToCommit >= model.stackingMinimum) && - operator.amountToCommit > 0 - ) { - model.trackCommandRun( - "StackAggregationCommitIndexedSigCommand_Err_Stacking_Threshold_Not_Met", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + operator.amountToCommit >= model.stackingMinimum || + !(operator.amountToCommit > 0) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, ), @@ -517,15 +517,15 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - !(operator.amountToCommit >= model.stackingMinimum) && - !(operator.amountToCommit > 0) - ) { - model.trackCommandRun( - "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_1", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + operator.amountToCommit >= model.stackingMinimum || + operator.amountToCommit > 0 + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -543,14 +543,14 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) && - !(operator.amountToCommit >= model.stackingMinimum) - ) { - model.trackCommandRun( - "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_2", - ); - return true; - } else return false; + operator.lockedAddresses.length > 0 || + operator.amountToCommit >= model.stackingMinimum + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedSigCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -568,15 +568,15 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - !(operator.amountToCommit >= model.stackingMinimum) && - !(operator.amountToCommit > 0) - ) { - model.trackCommandRun( - "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_1", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + operator.amountToCommit >= model.stackingMinimum || + operator.amountToCommit > 0 + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -594,14 +594,14 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) && - !(operator.amountToCommit >= model.stackingMinimum) - ) { - model.trackCommandRun( - "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_2", - ); - return true; - } else return false; + operator.lockedAddresses.length > 0 || + operator.amountToCommit >= model.stackingMinimum + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_No_Such_Principal_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -619,15 +619,15 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - !(operator.amountToCommit >= model.stackingMinimum) && - operator.amountToCommit > 0 - ) { - model.trackCommandRun( - "StackAggregationCommitIndexedAuthCommand_Err_Stacking_Threshold_Not_Met", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + operator.amountToCommit >= model.stackingMinimum || + !(operator.amountToCommit > 0) + ) return false; + + model.trackCommandRun( + "StackAggregationCommitIndexedAuthCommand_Err_Stacking_Threshold_Not_Met", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_THRESHOLD_NOT_MET, ), @@ -658,15 +658,15 @@ export function ErrCommands( function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 && - this.rewardCycleIndex >= 0 && - !(operator.amountToCommit > 0) - ) { - model.trackCommandRun( - "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", - ); - return true; - } else return false; + !(operator.lockedAddresses.length > 0) || + !(this.rewardCycleIndex >= 0) || + operator.amountToCommit > 0 + ) return false; + + model.trackCommandRun( + "StackAggregationIncreaseCommand_Err_Stacking_No_Such_Principal", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NO_SUCH_PRINCIPAL, ), @@ -723,21 +723,21 @@ export function ErrCommands( const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( - model.stackingMinimum > 0 && - !stackerWallet.isStacking && - stackerWallet.hasDelegated && - !(stackerWallet.delegatedMaxAmount >= Number(this.amountUstx)) && - Number(this.amountUstx) <= stackerWallet.ustxBalance && - Number(this.amountUstx) >= model.stackingMinimum && - operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - (stackerWallet.delegatedUntilBurnHt === undefined || + !(model.stackingMinimum > 0) || + stackerWallet.isStacking || + !(stackerWallet.hasDelegated) || + stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) || + !(Number(this.amountUstx) <= stackerWallet.ustxBalance) || + !(Number(this.amountUstx) >= model.stackingMinimum) || + !(operatorWallet.poolMembers.includes(this.stacker.stxAddress)) || + !(stackerWallet.delegatedUntilBurnHt === undefined || this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) - ) { - model.trackCommandRun( - "DelegateStackStxCommand_Err_Delegation_Too_Much_Locked", - ); - return true; - } else return false; + ) return false; + + model.trackCommandRun( + "DelegateStackStxCommand_Err_Delegation_Too_Much_Locked", + ); + return true; }, POX_4_ERRORS.ERR_DELEGATION_TOO_MUCH_LOCKED, ); @@ -797,21 +797,21 @@ export function ErrCommands( const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( - model.stackingMinimum > 0 && - !stackerWallet.isStacking && - stackerWallet.hasDelegated && - stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) && - Number(this.amountUstx) <= stackerWallet.ustxBalance && - Number(this.amountUstx) >= model.stackingMinimum && - !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - (stackerWallet.delegatedUntilBurnHt === undefined || + !(model.stackingMinimum > 0) || + stackerWallet.isStacking || + !stackerWallet.hasDelegated || + !(stackerWallet.delegatedMaxAmount >= Number(this.amountUstx)) || + !(Number(this.amountUstx) <= stackerWallet.ustxBalance) || + !(Number(this.amountUstx) >= model.stackingMinimum) || + operatorWallet.poolMembers.includes(this.stacker.stxAddress) || + !(stackerWallet.delegatedUntilBurnHt === undefined || this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) - ) { - model.trackCommandRun( - "DelegateStackStxCommand_Err_Stacking_Permission_Denied_1", - ); - return true; - } else return false; + ) return false; + + model.trackCommandRun( + "DelegateStackStxCommand_Err_Stacking_Permission_Denied_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, ); @@ -868,21 +868,21 @@ export function ErrCommands( const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( - model.stackingMinimum > 0 && - !stackerWallet.isStacking && - !(stackerWallet.hasDelegated) && - !(stackerWallet.delegatedMaxAmount >= Number(this.amountUstx)) && - Number(this.amountUstx) <= stackerWallet.ustxBalance && - Number(this.amountUstx) >= model.stackingMinimum && - !(operatorWallet.poolMembers.includes(this.stacker.stxAddress)) && - !(stackerWallet.delegatedUntilBurnHt === undefined || - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) - ) { - model.trackCommandRun( - "DelegateStackStxCommand_Err_Stacking_Permission_Denied_2", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + stackerWallet.isStacking || + stackerWallet.hasDelegated || + stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) || + !(Number(this.amountUstx) <= stackerWallet.ustxBalance) || + !(Number(this.amountUstx) >= model.stackingMinimum) || + operatorWallet.poolMembers.includes(this.stacker.stxAddress) && + (stackerWallet.delegatedUntilBurnHt === undefined || + this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) + ) return false; + + model.trackCommandRun( + "DelegateStackStxCommand_Err_Stacking_Permission_Denied_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, ); @@ -901,19 +901,19 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - !stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - this.increaseBy <= stacker.amountUnlocked && - this.increaseBy >= 1 - ) { - model.trackCommandRun( - "StackIncreaseSigCommand_Err_Stacking_Is_Delegated", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(this.increaseBy <= stacker.amountUnlocked) || + !(this.increaseBy >= 1) + ) return false; + + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Is_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, ), @@ -932,19 +932,19 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - !(this.increaseBy <= stacker.amountUnlocked) && - this.increaseBy >= 1 - ) { - model.trackCommandRun( - "StackIncreaseSigCommand_Err_Stacking_Insufficient_Funds", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + this.increaseBy <= stacker.amountUnlocked || + !(this.increaseBy >= 1) + ) return false; + + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Insufficient_Funds", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, ), @@ -963,19 +963,19 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - this.increaseBy <= stacker.amountUnlocked && - !(this.increaseBy >= 1) - ) { - model.trackCommandRun( - "StackIncreaseSigCommand_Err_Stacking_Invalid_Amount", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(this.increaseBy <= stacker.amountUnlocked) || + this.increaseBy >= 1 + ) return false; + + model.trackCommandRun( + "StackIncreaseSigCommand_Err_Stacking_Invalid_Amount", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, ), @@ -994,19 +994,19 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - !stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - this.increaseBy <= stacker.amountUnlocked && - this.increaseBy >= 1 - ) { - model.trackCommandRun( - "StackIncreaseAuthCommand_Err_Stacking_Is_Delegated", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(this.increaseBy <= stacker.amountUnlocked) || + !(this.increaseBy >= 1) + ) return false; + + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Is_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, ), @@ -1025,19 +1025,19 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - !(this.increaseBy <= stacker.amountUnlocked) && - this.increaseBy >= 1 - ) { - model.trackCommandRun( - "StackIncreaseAuthCommand_Err_Stacking_Insufficient_Funds", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + this.increaseBy <= stacker.amountUnlocked || + !(this.increaseBy >= 1) + ) return false; + + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Insufficient_Funds", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, ), @@ -1056,19 +1056,19 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - this.increaseBy <= stacker.amountUnlocked && - !(this.increaseBy >= 1) - ) { - model.trackCommandRun( - "StackIncreaseAuthCommand_Err_Stacking_Invalid_Amount", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(this.increaseBy <= stacker.amountUnlocked) || + this.increaseBy >= 1 + ) return false; + + model.trackCommandRun( + "StackIncreaseAuthCommand_Err_Stacking_Invalid_Amount", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, ), @@ -1105,19 +1105,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - !stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - stacker.poolMembers.length === 0 && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "StackExtendSigCommand_Err_Stacking_Is_Delegated_1", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(stacker.poolMembers.length === 0) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Is_Delegated_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, ), @@ -1154,19 +1154,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - !stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - !(stacker.poolMembers.length === 0) && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "StackExtendSigCommand_Err_Stacking_Is_Delegated_2", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + stacker.poolMembers.length === 0 || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Is_Delegated_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, ), @@ -1203,19 +1203,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - stacker.hasDelegated && - stacker.amountLocked > 0 && - stacker.poolMembers.length === 0 && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "StackExtendSigCommand_Err_Stacking_Already_Delegated", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.isStackingSolo || + !stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(stacker.poolMembers.length === 0) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Already_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ), @@ -1252,19 +1252,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - stacker.poolMembers.length === 0 && - !(totalPeriod <= 12) - ) { - model.trackCommandRun( - "StackExtendSigCommand_Err_Stacking_Invalid_Lock_Period", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(stacker.poolMembers.length === 0) || + totalPeriod <= 12 + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, ), @@ -1301,19 +1301,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - !stacker.isStacking && - !stacker.isStackingSolo && - !stacker.hasDelegated && - !(stacker.amountLocked > 0) && - stacker.poolMembers.length === 0 && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "StackExtendSigCommand_Err_Stack_Extend_Not_Locked", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + stacker.isStacking || + stacker.isStackingSolo || + stacker.hasDelegated || + stacker.amountLocked > 0 || + !(stacker.poolMembers.length === 0) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "StackExtendSigCommand_Err_Stack_Extend_Not_Locked", + ); + return true; }, POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, ), @@ -1350,19 +1350,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - !stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - stacker.poolMembers.length === 0 && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "StackExtendAuthCommand_Err_Stacking_Is_Delegated_1", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(stacker.poolMembers.length === 0) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Is_Delegated_1", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, ), @@ -1399,19 +1399,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - !stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - !(stacker.poolMembers.length === 0) && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "StackExtendAuthCommand_Err_Stacking_Is_Delegated_2", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + stacker.poolMembers.length === 0 || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Is_Delegated_2", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_IS_DELEGATED, ), @@ -1448,19 +1448,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - stacker.hasDelegated && - stacker.amountLocked > 0 && - stacker.poolMembers.length === 0 && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "StackExtendAuthCommand_Err_Stacking_Already_Delegated", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.isStackingSolo || + !stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(stacker.poolMembers.length === 0) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Already_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_ALREADY_DELEGATED, ), @@ -1497,19 +1497,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - stacker.poolMembers.length === 0 && - !(totalPeriod <= 12) - ) { - model.trackCommandRun( - "StackExtendAuthCommand_Err_Stacking_Invalid_Lock_Period", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + !stacker.isStacking || + !stacker.isStackingSolo || + stacker.hasDelegated || + !(stacker.amountLocked > 0) || + !(stacker.poolMembers.length === 0) || + totalPeriod <= 12 + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, ), @@ -1546,19 +1546,19 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - model.stackingMinimum > 0 && - !stacker.isStacking && - !stacker.isStackingSolo && - !stacker.hasDelegated && - !(stacker.amountLocked > 0) && - stacker.poolMembers.length === 0 && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "StackExtendAuthCommand_Err_Stack_Extend_Not_Locked", - ); - return true; - } else return false; + !(model.stackingMinimum > 0) || + stacker.isStacking || + stacker.isStackingSolo || + stacker.hasDelegated || + stacker.amountLocked > 0 || + !(stacker.poolMembers.length === 0) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "StackExtendAuthCommand_Err_Stack_Extend_Not_Locked", + ); + return true; }, POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, ), @@ -1618,24 +1618,24 @@ export function ErrCommands( const stackedAmount = stackerWallet.amountLocked; if ( - stackerWallet.amountLocked > 0 && - stackerWallet.hasDelegated === true && - stackerWallet.isStacking === true && - stackerWallet.delegatedTo === this.operator.stxAddress && - !(stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && - stackerWallet.delegatedMaxAmount >= stackedAmount && - operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - operatorWallet.lockedAddresses.includes( + !(stackerWallet.amountLocked > 0) || + !stackerWallet.hasDelegated || + !stackerWallet.isStacking || + !(stackerWallet.delegatedTo === this.operator.stxAddress) || + (stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) || + !(stackerWallet.delegatedMaxAmount >= stackedAmount) || + !(operatorWallet.poolMembers.includes(this.stacker.stxAddress)) || + !operatorWallet.lockedAddresses.includes( this.stacker.stxAddress, - ) && - !(totalPeriod <= 12) - ) { - model.trackCommandRun( - "DelegateStackExtendCommand_Err_Stacking_Invalid_Lock_Period", - ); - return true; - } else return false; + ) || + totalPeriod <= 12 + ) return false; + + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Invalid_Lock_Period", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INVALID_LOCK_PERIOD, ), @@ -1697,25 +1697,25 @@ export function ErrCommands( const stackedAmount = stackerWallet.amountLocked; if ( - stackerWallet.amountLocked > 0 && - !(stackerWallet.hasDelegated === true) && - stackerWallet.isStacking === true && - stackerWallet.isStackingSolo === true && - !(stackerWallet.delegatedTo === this.operator.stxAddress) && - !(stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && - !(stackerWallet.delegatedMaxAmount >= stackedAmount) && - !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - !operatorWallet.lockedAddresses.includes( + !(stackerWallet.amountLocked > 0) || + stackerWallet.hasDelegated || + !stackerWallet.isStacking || + !stackerWallet.isStackingSolo || + stackerWallet.delegatedTo === this.operator.stxAddress || + (stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) || + stackerWallet.delegatedMaxAmount >= stackedAmount || + operatorWallet.poolMembers.includes(this.stacker.stxAddress) || + operatorWallet.lockedAddresses.includes( this.stacker.stxAddress, - ) && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "DelegateStackExtendCommand_Err_Stacking_Not_Delegated", - ); - return true; - } else return false; + ) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Not_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NOT_DELEGATED, ), @@ -1774,24 +1774,24 @@ export function ErrCommands( FIRST_BURNCHAIN_BLOCK_HEIGHT; const stackedAmount = stackerWallet.amountLocked; if ( - !(stackerWallet.amountLocked > 0) && - stackerWallet.hasDelegated === true && - !(stackerWallet.isStacking === true) && - !(stackerWallet.delegatedTo === this.operator.stxAddress) && - (stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && - stackerWallet.delegatedMaxAmount >= stackedAmount && - !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - !operatorWallet.lockedAddresses.includes( + stackerWallet.amountLocked > 0 || + !stackerWallet.hasDelegated || + stackerWallet.isStacking || + stackerWallet.delegatedTo === this.operator.stxAddress || + !(stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) || + stackerWallet.delegatedMaxAmount >= stackedAmount || + operatorWallet.poolMembers.includes(this.stacker.stxAddress) || + operatorWallet.lockedAddresses.includes( this.stacker.stxAddress, - ) && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "DelegateStackExtendCommand_Err_Stack_Extend_Not_Locked", - ); - return true; - } else return false; + ) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stack_Extend_Not_Locked", + ); + return true; }, POX_4_ERRORS.ERR_STACK_EXTEND_NOT_LOCKED, ), @@ -1833,24 +1833,24 @@ export function ErrCommands( const stackedAmount = stackerWallet.amountLocked; if ( - stackerWallet.amountLocked > 0 && - !(stackerWallet.hasDelegated === true) && - stackerWallet.isStacking === true && - !(stackerWallet.delegatedTo === this.operator.stxAddress) && - !(stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && - !(stackerWallet.delegatedMaxAmount >= stackedAmount) && - !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - operatorWallet.lockedAddresses.includes( + !(stackerWallet.amountLocked > 0) || + stackerWallet.hasDelegated || + !stackerWallet.isStacking || + stackerWallet.delegatedTo === this.operator.stxAddress || + (stackerWallet.delegatedUntilBurnHt === undefined || + stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) || + stackerWallet.delegatedMaxAmount >= stackedAmount || + operatorWallet.poolMembers.includes(this.stacker.stxAddress) || + !operatorWallet.lockedAddresses.includes( this.stacker.stxAddress, - ) && - totalPeriod <= 12 - ) { - model.trackCommandRun( - "DelegateStackExtendCommand_Err_Stacking_Permission_Denied", - ); - return true; - } else return false; + ) || + !(totalPeriod <= 12) + ) return false; + + model.trackCommandRun( + "DelegateStackExtendCommand_Err_Stacking_Permission_Denied", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, ), @@ -1893,25 +1893,27 @@ export function ErrCommands( )!; if ( - stackerWallet.amountLocked > 0 && - stackerWallet.hasDelegated === true && - stackerWallet.isStacking === true && - this.increaseBy > 0 && - operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - !(stackerWallet.amountUnlocked >= this.increaseBy) && - !( + !(stackerWallet.amountLocked > 0) || + !(stackerWallet.hasDelegated) || + !stackerWallet.isStacking || + !(this.increaseBy > 0) || + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) || + stackerWallet.amountUnlocked >= this.increaseBy || + ( stackerWallet.delegatedMaxAmount >= this.increaseBy + stackerWallet.amountLocked - ) && - operatorWallet.lockedAddresses.indexOf( - this.stacker.stxAddress, - ) > -1 - ) { - model.trackCommandRun( - "DelegateStackIncreaseCommand_Err_Stacking_Insufficient_Funds", - ); - return true; - } else return false; + ) || + !(operatorWallet.lockedAddresses.indexOf( + this.stacker.stxAddress, + ) > -1) || + !(stackerWallet.unlockHeight > + model.burnBlockHeight + REWARD_CYCLE_LENGTH) + ) return false; + + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Insufficient_Funds", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INSUFFICIENT_FUNDS, ), @@ -1954,23 +1956,25 @@ export function ErrCommands( )!; if ( - stackerWallet.amountLocked > 0 && - stackerWallet.hasDelegated === true && - stackerWallet.isStacking === true && - !(this.increaseBy > 0) && - operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - stackerWallet.amountUnlocked >= this.increaseBy && - stackerWallet.delegatedMaxAmount >= - this.increaseBy + stackerWallet.amountLocked && - operatorWallet.lockedAddresses.indexOf( - this.stacker.stxAddress, - ) > -1 - ) { - model.trackCommandRun( - "DelegateStackIncreaseCommand_Err_Stacking_Invalid_Amount", - ); - return true; - } else return false; + !(stackerWallet.amountLocked > 0) || + !stackerWallet.hasDelegated || + !stackerWallet.isStacking || + this.increaseBy > 0 || + !operatorWallet.poolMembers.includes(this.stacker.stxAddress) || + !(stackerWallet.amountUnlocked >= this.increaseBy) || + !(stackerWallet.delegatedMaxAmount >= + this.increaseBy + stackerWallet.amountLocked) || + !(operatorWallet.lockedAddresses.indexOf( + this.stacker.stxAddress, + ) > -1) || + !(stackerWallet.unlockHeight > + model.burnBlockHeight + REWARD_CYCLE_LENGTH) + ) return false; + + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Invalid_Amount", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_INVALID_AMOUNT, ), @@ -2013,28 +2017,30 @@ export function ErrCommands( )!; if ( - stackerWallet.amountLocked > 0 && - !(stackerWallet.hasDelegated === true) && - stackerWallet.isStacking === true && - stackerWallet.isStackingSolo === true && - this.increaseBy > 0 && - !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - stackerWallet.amountUnlocked >= this.increaseBy && - !( + !(stackerWallet.amountLocked > 0) || + stackerWallet.hasDelegated || + !stackerWallet.isStacking || + !stackerWallet.isStackingSolo || + !(this.increaseBy > 0) || + operatorWallet.poolMembers.includes(this.stacker.stxAddress) || + !(stackerWallet.amountUnlocked >= this.increaseBy) || + ( stackerWallet.delegatedMaxAmount >= this.increaseBy + stackerWallet.amountLocked - ) && - !( + ) || + ( operatorWallet.lockedAddresses.indexOf( this.stacker.stxAddress, ) > -1 - ) - ) { - model.trackCommandRun( - "DelegateStackIncreaseCommand_Err_Stacking_Not_Delegated", - ); - return true; - } else return false; + ) || + !(stackerWallet.unlockHeight > + model.burnBlockHeight + REWARD_CYCLE_LENGTH) + ) return false; + + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Not_Delegated", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_NOT_DELEGATED, ), @@ -2077,25 +2083,27 @@ export function ErrCommands( )!; if ( - stackerWallet.amountLocked > 0 && - !(stackerWallet.hasDelegated === true) && - stackerWallet.isStacking === true && - this.increaseBy > 0 && - !operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - stackerWallet.amountUnlocked >= this.increaseBy && - !( + !(stackerWallet.amountLocked > 0) || + stackerWallet.hasDelegated || + !stackerWallet.isStacking || + !(this.increaseBy > 0) || + operatorWallet.poolMembers.includes(this.stacker.stxAddress) || + !(stackerWallet.amountUnlocked >= this.increaseBy) || + ( stackerWallet.delegatedMaxAmount >= this.increaseBy + stackerWallet.amountLocked - ) && - operatorWallet.lockedAddresses.indexOf( - this.stacker.stxAddress, - ) > -1 - ) { - model.trackCommandRun( - "DelegateStackIncreaseCommand_Err_Stacking_Permission_Denied", - ); - return true; - } else return false; + ) || + !(operatorWallet.lockedAddresses.indexOf( + this.stacker.stxAddress, + ) > -1) || + !(stackerWallet.unlockHeight > + model.burnBlockHeight + REWARD_CYCLE_LENGTH) + ) return false; + + model.trackCommandRun( + "DelegateStackIncreaseCommand_Err_Stacking_Permission_Denied", + ); + return true; }, POX_4_ERRORS.ERR_STACKING_PERMISSION_DENIED, ), @@ -2115,18 +2123,18 @@ export function ErrCommands( this.callerToDisallow.stxAddress, )!; if ( - !stacker.allowedContractCallers.includes( + stacker.allowedContractCallers.includes( this.callerToDisallow.stxAddress, - ) && - !callerToDisallow.callerAllowedBy.includes( - this.stacker.stxAddress, - ) === true - ) { - model.trackCommandRun( - "DisallowContractCallerCommand_Err", - ); - return true; - } else return false; + ) || + callerToDisallow.callerAllowedBy.includes( + this.stacker.stxAddress, + ) + ) return false; + + model.trackCommandRun( + "DisallowContractCallerCommand_Err", + ); + return true; }, ), ), From d10d7600a69470220b736f01e9a4635e3248c6ce Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 20 Jun 2024 15:30:59 +0300 Subject: [PATCH 0344/1400] Update the way `firstRewardCycle` is calculated This commit: - simplifies the way firstRewardCycle is calculated, replacing the ternary operator with the max Math method. - standardizes the const name of the stacker retrieved from the model. --- .../tests/pox-4/err_Commands.ts | 218 +++++++++--------- 1 file changed, 109 insertions(+), 109 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 3d9738fefb..916c3cba09 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -1094,10 +1094,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1143,10 +1143,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1192,10 +1192,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1241,10 +1241,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1290,10 +1290,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1339,10 +1339,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1388,10 +1388,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1437,10 +1437,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1486,10 +1486,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1535,10 +1535,10 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = - stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, @@ -1595,19 +1595,19 @@ export function ErrCommands( final.extendCount, final.currentCycle, function (this, model) { - const operatorWallet = model.stackers.get( + const operator = model.stackers.get( this.operator.stxAddress, )!; - const stackerWallet = model.stackers.get( + const stacker = model.stackers.get( this.stacker.stxAddress, )!; - const firstRewardCycle = - this.currentCycle > stackerWallet.firstLockedRewardCycle - ? this.currentCycle - : stackerWallet.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( - (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, ); const lastExtendCycle = firstExtendCycle + this.extendCount - 1; @@ -1615,18 +1615,18 @@ export function ErrCommands( const newUnlockHeight = REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + FIRST_BURNCHAIN_BLOCK_HEIGHT; - const stackedAmount = stackerWallet.amountLocked; + const stackedAmount = stacker.amountLocked; if ( - !(stackerWallet.amountLocked > 0) || - !stackerWallet.hasDelegated || - !stackerWallet.isStacking || - !(stackerWallet.delegatedTo === this.operator.stxAddress) || - (stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) || - !(stackerWallet.delegatedMaxAmount >= stackedAmount) || - !(operatorWallet.poolMembers.includes(this.stacker.stxAddress)) || - !operatorWallet.lockedAddresses.includes( + !(stacker.amountLocked > 0) || + !stacker.hasDelegated || + !stacker.isStacking || + !(stacker.delegatedTo === this.operator.stxAddress) || + (stacker.delegatedUntilBurnHt === undefined || + stacker.delegatedUntilBurnHt >= newUnlockHeight) || + !(stacker.delegatedMaxAmount >= stackedAmount) || + !(operator.poolMembers.includes(this.stacker.stxAddress)) || + !operator.lockedAddresses.includes( this.stacker.stxAddress, ) || totalPeriod <= 12 @@ -1674,19 +1674,19 @@ export function ErrCommands( final.extendCount, final.currentCycle, function (this, model) { - const operatorWallet = model.stackers.get( + const operator = model.stackers.get( this.operator.stxAddress, )!; - const stackerWallet = model.stackers.get( + const stacker = model.stackers.get( this.stacker.stxAddress, )!; - const firstRewardCycle = - this.currentCycle > stackerWallet.firstLockedRewardCycle - ? this.currentCycle - : stackerWallet.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( - (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, ); const lastExtendCycle = firstExtendCycle + this.extendCount - 1; @@ -1694,19 +1694,19 @@ export function ErrCommands( const newUnlockHeight = REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + FIRST_BURNCHAIN_BLOCK_HEIGHT; - const stackedAmount = stackerWallet.amountLocked; + const stackedAmount = stacker.amountLocked; if ( - !(stackerWallet.amountLocked > 0) || - stackerWallet.hasDelegated || - !stackerWallet.isStacking || - !stackerWallet.isStackingSolo || - stackerWallet.delegatedTo === this.operator.stxAddress || - (stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) || - stackerWallet.delegatedMaxAmount >= stackedAmount || - operatorWallet.poolMembers.includes(this.stacker.stxAddress) || - operatorWallet.lockedAddresses.includes( + !(stacker.amountLocked > 0) || + stacker.hasDelegated || + !stacker.isStacking || + !stacker.isStackingSolo || + stacker.delegatedTo === this.operator.stxAddress || + (stacker.delegatedUntilBurnHt === undefined || + stacker.delegatedUntilBurnHt >= newUnlockHeight) || + stacker.delegatedMaxAmount >= stackedAmount || + operator.poolMembers.includes(this.stacker.stxAddress) || + operator.lockedAddresses.includes( this.stacker.stxAddress, ) || !(totalPeriod <= 12) @@ -1752,19 +1752,19 @@ export function ErrCommands( final.extendCount, final.currentCycle, function (this, model) { - const operatorWallet = model.stackers.get( + const operator = model.stackers.get( this.operator.stxAddress, )!; - const stackerWallet = model.stackers.get( + const stacker = model.stackers.get( this.stacker.stxAddress, )!; - const firstRewardCycle = - this.currentCycle > stackerWallet.firstLockedRewardCycle - ? this.currentCycle - : stackerWallet.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( - (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, ); const lastExtendCycle = firstExtendCycle + this.extendCount - 1; @@ -1772,17 +1772,17 @@ export function ErrCommands( const newUnlockHeight = REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + FIRST_BURNCHAIN_BLOCK_HEIGHT; - const stackedAmount = stackerWallet.amountLocked; + const stackedAmount = stacker.amountLocked; if ( - stackerWallet.amountLocked > 0 || - !stackerWallet.hasDelegated || - stackerWallet.isStacking || - stackerWallet.delegatedTo === this.operator.stxAddress || - !(stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) || - stackerWallet.delegatedMaxAmount >= stackedAmount || - operatorWallet.poolMembers.includes(this.stacker.stxAddress) || - operatorWallet.lockedAddresses.includes( + stacker.amountLocked > 0 || + !stacker.hasDelegated || + stacker.isStacking || + stacker.delegatedTo === this.operator.stxAddress || + !(stacker.delegatedUntilBurnHt === undefined || + stacker.delegatedUntilBurnHt >= newUnlockHeight) || + stacker.delegatedMaxAmount >= stackedAmount || + operator.poolMembers.includes(this.stacker.stxAddress) || + operator.lockedAddresses.includes( this.stacker.stxAddress, ) || !(totalPeriod <= 12) @@ -1810,19 +1810,19 @@ export function ErrCommands( final.extendCount, final.currentCycle, function (this, model) { - const operatorWallet = model.stackers.get( + const operator = model.stackers.get( this.operator.stxAddress, )!; - const stackerWallet = model.stackers.get( + const stacker = model.stackers.get( this.stacker.stxAddress, )!; - const firstRewardCycle = - this.currentCycle > stackerWallet.firstLockedRewardCycle - ? this.currentCycle - : stackerWallet.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( - (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / + (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, ); const lastExtendCycle = firstExtendCycle + this.extendCount - 1; @@ -1830,18 +1830,18 @@ export function ErrCommands( const newUnlockHeight = REWARD_CYCLE_LENGTH * (firstRewardCycle + totalPeriod - 1) + FIRST_BURNCHAIN_BLOCK_HEIGHT; - const stackedAmount = stackerWallet.amountLocked; + const stackedAmount = stacker.amountLocked; if ( - !(stackerWallet.amountLocked > 0) || - stackerWallet.hasDelegated || - !stackerWallet.isStacking || - stackerWallet.delegatedTo === this.operator.stxAddress || - (stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) || - stackerWallet.delegatedMaxAmount >= stackedAmount || - operatorWallet.poolMembers.includes(this.stacker.stxAddress) || - !operatorWallet.lockedAddresses.includes( + !(stacker.amountLocked > 0) || + stacker.hasDelegated || + !stacker.isStacking || + stacker.delegatedTo === this.operator.stxAddress || + (stacker.delegatedUntilBurnHt === undefined || + stacker.delegatedUntilBurnHt >= newUnlockHeight) || + stacker.delegatedMaxAmount >= stackedAmount || + operator.poolMembers.includes(this.stacker.stxAddress) || + !operator.lockedAddresses.includes( this.stacker.stxAddress, ) || !(totalPeriod <= 12) From 7c8c540192f6917a7a6925ed822eb000d74466c4 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 20 Jun 2024 16:29:32 +0300 Subject: [PATCH 0345/1400] Remove unused `Stub` import inside `err_Commands` --- .../tests/pox-4/err_Commands.ts | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index 916c3cba09..cbf657fd96 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -1,12 +1,6 @@ import fc from "fast-check"; import { Simnet } from "@hirosystems/clarinet-sdk"; -import { - PoxCommand, - Stacker, - Stub, - StxAddress, - Wallet, -} from "./pox_CommandModel"; +import { PoxCommand, Stacker, StxAddress, Wallet } from "./pox_CommandModel"; import { currentCycle, currentCycleFirstBlock, From 934d55423a7d2ba6122b1cc05148ba07c3ff41f3 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Thu, 20 Jun 2024 16:35:03 +0300 Subject: [PATCH 0346/1400] Update the way `firstRewardCycle` is calculated all over the stateful testing environment --- .../tests/pox-4/pox_DelegateStackExtendCommand.ts | 8 ++++---- .../tests/pox-4/pox_StackExtendAuthCommand.ts | 7 ++++--- .../tests/pox-4/pox_StackExtendSigCommand.ts | 7 ++++--- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts index 0151967932..2db12e5f40 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts @@ -64,10 +64,10 @@ export class DelegateStackExtendCommand implements PoxCommand { const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; - const firstRewardCycle = - this.currentCycle > stackerWallet.firstLockedRewardCycle - ? this.currentCycle - : stackerWallet.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stackerWallet.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stackerWallet.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts index 13a835347f..ffcae01512 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts @@ -51,9 +51,10 @@ export class StackExtendAuthCommand implements PoxCommand { // - The new lock period must be less than or equal to 12. const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts index 5b23d021f6..4ac87cd853 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts @@ -50,9 +50,10 @@ export class StackExtendSigCommand implements PoxCommand { // - The new lock period must be less than or equal to 12. const stacker = model.stackers.get(this.wallet.stxAddress)!; - const firstRewardCycle = stacker.firstLockedRewardCycle < this.currentCycle - ? this.currentCycle - : stacker.firstLockedRewardCycle; + const firstRewardCycle = Math.max( + stacker.firstLockedRewardCycle, + this.currentCycle, + ); const firstExtendCycle = Math.floor( (stacker.unlockHeight - FIRST_BURNCHAIN_BLOCK_HEIGHT) / REWARD_CYCLE_LENGTH, From e8487e7f540671fd540baa2b9321bfe97cbacc5a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 20 Jun 2024 10:39:09 -0500 Subject: [PATCH 0347/1400] chore: do not consider orphaned blocks during replay-block, they wont be loaded by the staging db in any event --- stackslib/src/main.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 99d56a4361..63a788cdf6 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -899,11 +899,11 @@ simulating a miner. let query = match mode { Some("prefix") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE index_block_hash LIKE \"{}%\"", + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", argv[4] ), Some("first") => format!( - "SELECT index_block_hash FROM staging_blocks ORDER BY height ASC LIMIT {}", + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", argv[4] ), Some("range") => { @@ -913,7 +913,7 @@ simulating a miner. let arg5 = argv[5].parse::().expect(" not a valid u64"); let start = arg4.saturating_sub(1); let blocks = arg5.saturating_sub(arg4); - format!("SELECT index_block_hash FROM staging_blocks ORDER BY height ASC LIMIT {start}, {blocks}") + format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") } Some("index-range") => { let start = argv[4] @@ -921,15 +921,15 @@ simulating a miner. .expect(" not a valid u64"); let end = argv[5].parse::().expect(" not a valid u64"); let blocks = end.saturating_sub(start); - format!("SELECT index_block_hash FROM staging_blocks ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") + format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") } Some("last") => format!( - "SELECT index_block_hash FROM staging_blocks ORDER BY height DESC LIMIT {}", + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", argv[4] ), Some(_) => print_help_and_exit(), // Default to ALL blocks - None => "SELECT index_block_hash FROM staging_blocks".into(), + None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(), }; let mut stmt = conn.prepare(&query).unwrap(); From 86360aeaefaa220bd7001b85ae07e9f7fd01ebba Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 20 Jun 2024 16:53:11 -0400 Subject: [PATCH 0348/1400] refactor: Minor improvements --- stackslib/src/chainstate/coordinator/comm.rs | 4 +-- .../stacks-node/src/nakamoto_node/relayer.rs | 33 +++++++++---------- testnet/stacks-node/src/neon_node.rs | 9 +++-- 3 files changed, 21 insertions(+), 25 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/comm.rs b/stackslib/src/chainstate/coordinator/comm.rs index 374ab72996..cc6c2f1b3b 100644 --- a/stackslib/src/chainstate/coordinator/comm.rs +++ b/stackslib/src/chainstate/coordinator/comm.rs @@ -247,8 +247,8 @@ impl CoordinatorCommunication { }; let rcvrs = CoordinatorReceivers { - signal_bools: signal_bools, - signal_wakeup: signal_wakeup, + signal_bools, + signal_wakeup, stacks_blocks_processed, sortitions_processed, refresh_stacker_db, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 751649c627..89a5a13a0a 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -741,25 +741,22 @@ impl RelayerThread { return None; } - // do we need a VRF key registration? - if matches!( - self.globals.get_leader_key_registration_state(), - LeaderKeyRegistrationState::Inactive - ) { - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { - warn!("Failed to fetch sortition tip while needing to register VRF key"); + match self.globals.get_leader_key_registration_state() { + // do we need a VRF key registration? + LeaderKeyRegistrationState::Inactive => { + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + else { + warn!("Failed to fetch sortition tip while needing to register VRF key"); + return None; + }; + return Some(RelayerDirective::RegisterKey(sort_tip)); + } + // are we still waiting on a pending registration? + LeaderKeyRegistrationState::Pending(..) => { return None; - }; - return Some(RelayerDirective::RegisterKey(sort_tip)); - } - - // are we still waiting on a pending registration? - if !matches!( - self.globals.get_leader_key_registration_state(), - LeaderKeyRegistrationState::Active(_) - ) { - return None; - } + } + LeaderKeyRegistrationState::Active(_) => {} + }; // has there been a new sortition let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index a27564a556..aa88f6bb00 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4835,15 +4835,14 @@ impl StacksNode { memo: vec![], }) } else { + // Warn the user that they need to set up a miner key + if miner && config.miner.mining_key.is_none() { + warn!("`[miner.mining_key]` not set in config file. This will be required to mine in Epoch 3.0!") + } LeaderKeyRegistrationState::Inactive }; globals.set_initial_leader_key_registration_state(leader_key_registration_state); - // Warn the user that they need to set up a miner key - if miner && !mock_mining && config.miner.mining_key.is_none() { - warn!("`[miner.mining_key]` not set in config file. This will be required to mine in Epoch 3.0!") - } - let relayer_thread = RelayerThread::new(runloop, local_peer.clone(), relayer); StacksNode::set_monitoring_miner_address(&keychain, &relayer_thread); From 8c79184292f640fe69912d1fb3604b6bc243f5b4 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 20 Jun 2024 16:54:03 -0400 Subject: [PATCH 0349/1400] fix: Use Aaron's suggestion to get Bitcoin integration tests working again --- .../src/tests/nakamoto_integrations.rs | 117 +++--------------- 1 file changed, 14 insertions(+), 103 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c630776d5c..f82ba7c010 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1133,14 +1133,7 @@ fn simple_neon_integration() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -1362,14 +1355,7 @@ fn mine_multiple_per_tenure_integration() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -1687,14 +1673,7 @@ fn correct_burn_outs() { ); assert_eq!(stacker_response.stacker_set.rewarded_addresses.len(), 1); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -1897,14 +1876,7 @@ fn block_proposal_api_endpoint() { info!("Nakamoto miner started..."); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -2248,14 +2220,8 @@ fn miner_writes_proposed_block_to_stackerdb() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -2390,14 +2356,8 @@ fn vote_for_aggregate_key_burn_op() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -2657,14 +2617,7 @@ fn follower_bootup() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -2887,14 +2840,8 @@ fn stack_stx_burn_op_integration_test() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -3339,18 +3286,10 @@ fn forked_tenure_is_ignored() { blind_signer(&naka_conf, &signers, proposals_submitted); info!("Starting tenure A."); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - let commits_before = commits_submitted.load(Ordering::SeqCst); + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > commits_before) + Ok(commits_count >= 1) }) .unwrap(); @@ -3648,14 +3587,7 @@ fn check_block_heights() { let preheights = heights0_value.expect_tuple().unwrap(); info!("Heights from pre-epoch 3.0: {}", preheights); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -4045,14 +3977,7 @@ fn nakamoto_attempt_time() { info!("Nakamoto miner started..."); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -4318,14 +4243,7 @@ fn clarity_burn_state() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -4601,14 +4519,7 @@ fn signer_chainstate() { false, ); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) From 3057aaae249b2e186f5bb7b66024b5c80f5bf356 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 21 Jun 2024 02:15:15 +0300 Subject: [PATCH 0350/1400] Add helper functions to use in the commands' check methods --- .../tests/pox-4/pox_CommandModel.ts | 255 ++++++++++++++++++ ...tackAggregationCommitIndexedAuthCommand.ts | 6 +- ...StackAggregationCommitIndexedSigCommand.ts | 8 +- .../pox_StackAggregationCommitSigCommand.ts | 10 +- .../pox_StackAggregationIncreaseCommand.ts | 9 +- .../tests/pox-4/pox_StackExtendAuthCommand.ts | 29 +- .../tests/pox-4/pox_StackExtendSigCommand.ts | 29 +- .../pox-4/pox_StackIncreaseAuthCommand.ts | 29 +- .../pox-4/pox_StackIncreaseSigCommand.ts | 29 +- .../tests/pox-4/pox_StackStxAuthCommand.ts | 7 +- .../tests/pox-4/pox_StackStxSigCommand.ts | 7 +- 11 files changed, 373 insertions(+), 45 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts index 653a1acbff..5b6cb95c27 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_CommandModel.ts @@ -255,3 +255,258 @@ export const logCommand = (...items: (string | undefined)[]) => { process.stdout.write(prettyPrint.join("")); }; + +/** + * Helper function that checks if the minimum uSTX threshold was set in the model. + * @param model - the model at a given moment in time. + * @returns boolean. + */ +export const isStackingMinimumCalculated = (model: Readonly): boolean => + model.stackingMinimum > 0; + +/** + * Helper function that checks if a stacker is currently stacking. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean. + */ +export const isStacking = (stacker: Stacker): boolean => + stacker.isStacking; + +/** + * Helper function that checks if a stacker has an active delegation. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean. + */ +export const isDelegating = (stacker: Stacker): boolean => + stacker.hasDelegated; + +/** + * Helper function that checks if the stacker is stacking using solo + * stacking methods. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean. + */ +export const isStackingSolo = (stacker: Stacker): boolean => + stacker.isStackingSolo; + +/** + * Helper function that checks if the stacker has locked uSTX. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean. + */ +export const isAmountLockedPositive = (stacker: Stacker): boolean => + stacker.amountLocked > 0; + +/** + * Helper function that checks if an operator has locked uSTX on + * behalf of at least one stacker. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + */ +export const hasLockedStackers = (operator: Stacker): boolean => + operator.lockedAddresses.length > 0; + +/** + * Helper function that checks if an operator has uSTX that was not + * yet committed. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + * + * NOTE: ATC is an abbreviation for "amount to commit". + */ +export const isATCPositive = (operator: Stacker): boolean => + operator.amountToCommit > 0; + +/** + * Helper function that checks if an operator's not committed uSTX + * amount is above the minimum stacking threshold. + * @param operator - the operator's state at a given moment in time. + * @param model - the model at a given moment in time. + * @returns boolean. + * + * NOTE: ATC is an abbreviation for "amount to commit". + */ export const isATCAboveThreshold = ( + operator: Stacker, + model: Readonly, +): boolean => operator.amountToCommit >= model.stackingMinimum; + +/** + * Helper function that checks if a uSTX amount fits within a stacker's + * delegation limit. + * @param stacker - the stacker's state at a given moment in time. + * @param amountToCheck - the uSTX amount to check. + * @returns boolean. + */ +export const isAmountWithinDelegationLimit = ( + stacker: Stacker, + amountToCheck: bigint | number, +): boolean => stacker.delegatedMaxAmount >= Number(amountToCheck); + +/** + * Helper function that checks if a given unlock burn height is within + * a stacker's delegation limit. + * @param stacker - the stacker's state at a given moment in time. + * @param unlockBurnHt - the verified unlock burn height. + * @returns boolean. + * + * NOTE: UBH is an abbreviation for "unlock burn height". + */ +export const isUBHWithinDelegationLimit = ( + stacker: Stacker, + unlockBurnHt: number, +): boolean => + stacker.delegatedUntilBurnHt === undefined || + unlockBurnHt <= stacker.delegatedUntilBurnHt; + +/** + * Helper function that checks if a given amount is within a stacker's + * unlocked uSTX balance. + * @param stacker - the stacker's state at a given moment in time. + * @param amountToCheck - the amount to check. + * @returns boolean. + */ +export const isAmountWithinBalance = ( + stacker: Stacker, + amountToCheck: bigint | number, +): boolean => stacker.ustxBalance >= Number(amountToCheck); + +/** + * Helper function that checks if a given amount is above the minimum + * stacking threshold. + * @param model - the model at a given moment in time. + * @param amountToCheck - the amount to check. + * @returns boolean. + */ +export const isAmountAboveThreshold = ( + model: Readonly, + amountToCheck: bigint | number, +): boolean => Number(amountToCheck) >= model.stackingMinimum; + +/** + * Helper function that checks if an operator has at least one pool + * participant. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + */ +export const hasPoolMembers = (operator: Stacker): boolean => + operator.poolMembers.length > 0; + +/** + * Helper function that checks if a stacker is a pool member of a + * given operator. + * @param operator - the operator's state at a given moment in time. + * @param stacker - the stacker's state at a given moment in time. + * @returns boolean + */ +export const isStackerInOperatorPool = ( + operator: Stacker, + stacker: Wallet, +): boolean => operator.poolMembers.includes(stacker.stxAddress); + +/** + * Helper function that checks if a given stacker's funds are locked + * by a given operator. + * @param stacker - the stacker's state at a given moment in time. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + */ +export const isStackerLockedByOperator = ( + operator: Stacker, + stacker: Wallet, +): boolean => + operator.lockedAddresses.includes( + stacker.stxAddress, + ); + +/** + * Helper function that checks if a given stacker's unlock height is + * within the current reward cycle. + * @param stacker - the stacker's state at a given moment in time. + * @param model - the model at a given moment in time. + * @returns boolean. + * + * NOTE: RC is an abbreviation for "reward cycle". + */ +export const isUnlockedWithinCurrentRC = ( + stackerWallet: Stacker, + model: Readonly, +): boolean => (stackerWallet.unlockHeight <= + model.burnBlockHeight + REWARD_CYCLE_LENGTH); + +/** + * Helper function that checks if the increase amount is within a given + * stacker's unlocked balance. + * @param stacker - the stacker's state at a given moment in time. + * @param increaseBy - the increase amount to check. + * @returns boolean. + */ +export const isIncreaseByWithinUnlockedBalance = ( + stacker: Stacker, + increaseBy: number, +): boolean => increaseBy <= stacker.amountUnlocked; + +/** + * Helper function that checks if the increase amount is greater than zero. + * @param increaseBy - the increase amount to check. + * @returns boolean. + */ +export const isIncreaseByGTZero = (increaseBy: number): boolean => + increaseBy >= 1; + +/** + * Helper function that checks if the increase amount does not exceed the + * PoX-4 maximum lock period. + * @param period - the period to check. + * @returns boolean. + */ +export const isPeriodWithinMax = (period: number) => period <= 12; + +/** + * Helper function that checks if a given stacker is currently delegating + * to a given operator. + * @param stacker - the stacker's state at a given moment in time. + * @param operator - the operator's state at a given moment in time. + * @returns boolean. + */ +export const isStackerDelegatingToOperator = ( + stacker: Stacker, + operator: Wallet, +): boolean => stacker.delegatedTo === operator.stxAddress; + +/** + * Helper function that checks if a given increase amount is greater than + * zero. + * @param increaseAmount - the increase amount to check + * @returns boolean. + */ +export const isIncreaseAmountGTZero = (increaseAmount: number): boolean => + increaseAmount > 0; + +/** + * Helper function that checks if a given stacker's has issued an allowance + * to a potential contract caller. + * @param stacker - the stacker's state at a given moment in time. + * @param potentialAllowedStacker - the potential contract caller's state. + * @returns boolean. + */ +export const isAllowedContractCaller = ( + stacker: Stacker, + potentialAllowedStacker: Wallet, +): boolean => + stacker.allowedContractCallers.includes( + potentialAllowedStacker.stxAddress, + ); + +/** + * Helper function that checks if a given contract caller has been allowed by + * a given stacker. + * @param stacker - the stacker's state at a given moment in time. + * @param caller - the contract caller's state. + * @returns boolean. + */ +export const isCallerAllowedByStacker = ( + stacker: Wallet, + caller: Stacker, +): boolean => caller.callerAllowedBy.includes(stacker.stxAddress); + +export const isPositive = (value: number): boolean => value >= 0; \ No newline at end of file diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts index 926c923135..ba9679e639 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedAuthCommand.ts @@ -1,4 +1,6 @@ import { + hasLockedStackers, + isATCAboveThreshold, logCommand, PoxCommand, Real, @@ -55,8 +57,8 @@ export class StackAggregationCommitIndexedAuthCommand implements PoxCommand { const operator = model.stackers.get(this.operator.stxAddress)!; return ( - operator.lockedAddresses.length > 0 && - operator.amountToCommit >= model.stackingMinimum + hasLockedStackers(operator) && + isATCAboveThreshold(operator, model) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts index 712706d156..beb91ea87f 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitIndexedSigCommand.ts @@ -1,4 +1,6 @@ import { + hasLockedStackers, + isATCAboveThreshold, logCommand, PoxCommand, Real, @@ -33,7 +35,7 @@ export class StackAggregationCommitIndexedSigCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackAggregationCommitIndexedSigCommand` to commit partially + * Constructs a `StackAggregationCommitIndexedSigCommand` to commit partially * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. @@ -55,8 +57,8 @@ export class StackAggregationCommitIndexedSigCommand implements PoxCommand { const operator = model.stackers.get(this.operator.stxAddress)!; return ( - operator.lockedAddresses.length > 0 && - operator.amountToCommit >= model.stackingMinimum + hasLockedStackers(operator) && + isATCAboveThreshold(operator, model) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts index cda1d9cd96..9e6bfd1bde 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitSigCommand.ts @@ -1,4 +1,6 @@ import { + hasLockedStackers, + isATCAboveThreshold, logCommand, PoxCommand, Real, @@ -31,7 +33,7 @@ export class StackAggregationCommitSigCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackAggregationCommitSigCommand` to commit partially + * Constructs a `StackAggregationCommitSigCommand` to commit partially * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. @@ -52,8 +54,10 @@ export class StackAggregationCommitSigCommand implements PoxCommand { // stackers has to be greater than the uSTX threshold. const operator = model.stackers.get(this.operator.stxAddress)!; - return operator.lockedAddresses.length > 0 && - operator.amountToCommit >= model.stackingMinimum; + return ( + hasLockedStackers(operator) && + isATCAboveThreshold(operator, model) + ); } run(model: Stub, real: Real): void { diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts index 22ae0a0bea..80e1950abb 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationIncreaseCommand.ts @@ -1,4 +1,7 @@ import { + hasLockedStackers, + isATCPositive, + isPositive, logCommand, PoxCommand, Real, @@ -56,9 +59,9 @@ export class StackAggregationIncreaseCommand implements PoxCommand { // - The Reward Cycle Index must be positive. const operator = model.stackers.get(this.operator.stxAddress)!; return ( - operator.lockedAddresses.length > 0 && - this.rewardCycleIndex >= 0 && - operator.amountToCommit > 0 + hasLockedStackers(operator) && + isPositive(this.rewardCycleIndex) && + isATCPositive(operator) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts index ffcae01512..203bef86b9 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendAuthCommand.ts @@ -1,5 +1,18 @@ import { poxAddressToTuple } from "@stacks/stacking"; -import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + hasPoolMembers, + isAmountLockedPositive, + isPeriodWithinMax, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel"; import { currentCycle, FIRST_BURNCHAIN_BLOCK_HEIGHT, @@ -63,13 +76,13 @@ export class StackExtendAuthCommand implements PoxCommand { const totalPeriod = lastExtendCycle - firstRewardCycle + 1; return ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - stacker.poolMembers.length === 0 && - totalPeriod <= 12 + isStackingMinimumCalculated(model) && + isStacking(stacker) && + isStackingSolo(stacker) && + !isDelegating(stacker) && + isAmountLockedPositive(stacker) && + !hasPoolMembers(stacker) && + isPeriodWithinMax(totalPeriod) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts index 4ac87cd853..b937b61207 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackExtendSigCommand.ts @@ -1,5 +1,18 @@ import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; -import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + hasPoolMembers, + isAmountLockedPositive, + isPeriodWithinMax, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel"; import { currentCycle, FIRST_BURNCHAIN_BLOCK_HEIGHT, @@ -62,13 +75,13 @@ export class StackExtendSigCommand implements PoxCommand { const totalPeriod = lastExtendCycle - firstRewardCycle + 1; return ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - stacker.poolMembers.length === 0 && - totalPeriod <= 12 + isStackingMinimumCalculated(model) && + isStacking(stacker) && + isStackingSolo(stacker) && + !isDelegating(stacker) && + isAmountLockedPositive(stacker) && + !hasPoolMembers(stacker) && + isPeriodWithinMax(totalPeriod) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts index cd802b1b88..d819a82215 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseAuthCommand.ts @@ -1,5 +1,18 @@ import { Pox4SignatureTopic, poxAddressToTuple } from "@stacks/stacking"; -import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + isAmountLockedPositive, + isIncreaseAmountGTZero, + isIncreaseByWithinUnlockedBalance, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel"; import { currentCycle } from "./pox_Commands"; import { Cl, cvToJSON } from "@stacks/transactions"; import { expect } from "vitest"; @@ -47,13 +60,13 @@ export class StackIncreaseAuthCommand implements PoxCommand { const stacker = model.stackers.get(this.wallet.stxAddress)!; return ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - this.increaseBy <= stacker.amountUnlocked && - this.increaseBy >= 1 + isStackingMinimumCalculated(model) && + isStacking(stacker) && + isStackingSolo(stacker) && + !isDelegating(stacker) && + isAmountLockedPositive(stacker) && + isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) && + isIncreaseAmountGTZero(this.increaseBy) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts index 1bd50691b6..899be8900e 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackIncreaseSigCommand.ts @@ -1,5 +1,18 @@ import { Pox4SignatureTopic } from "@stacks/stacking"; -import { logCommand, PoxCommand, Real, Stub, Wallet } from "./pox_CommandModel"; +import { + isAmountLockedPositive, + isIncreaseAmountGTZero, + isIncreaseByWithinUnlockedBalance, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + logCommand, + PoxCommand, + Real, + Stub, + Wallet, +} from "./pox_CommandModel"; import { Cl, ClarityType, @@ -53,13 +66,13 @@ export class StackIncreaseSigCommand implements PoxCommand { const stacker = model.stackers.get(this.wallet.stxAddress)!; return ( - model.stackingMinimum > 0 && - stacker.isStacking && - stacker.isStackingSolo && - !stacker.hasDelegated && - stacker.amountLocked > 0 && - this.increaseBy <= stacker.amountUnlocked && - this.increaseBy >= 1 + isStackingMinimumCalculated(model) && + isStacking(stacker) && + isStackingSolo(stacker) && + !isDelegating(stacker) && + isAmountLockedPositive(stacker) && + isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) && + isIncreaseAmountGTZero(this.increaseBy) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts index 53f34ca0bb..de3bc96964 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxAuthCommand.ts @@ -1,4 +1,7 @@ import { + isDelegating, + isStacking, + isStackingMinimumCalculated, logCommand, PoxCommand, Real, @@ -67,7 +70,9 @@ export class StackStxAuthCommand implements PoxCommand { const stacker = model.stackers.get(this.wallet.stxAddress)!; return ( - model.stackingMinimum > 0 && !stacker.isStacking && !stacker.hasDelegated + isStackingMinimumCalculated(model) && + !isStacking(stacker) && + !isDelegating(stacker) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts index 100d84a6e0..d397297037 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackStxSigCommand.ts @@ -1,4 +1,7 @@ import { + isDelegating, + isStacking, + isStackingMinimumCalculated, logCommand, PoxCommand, Real, @@ -66,7 +69,9 @@ export class StackStxSigCommand implements PoxCommand { const stacker = model.stackers.get(this.wallet.stxAddress)!; return ( - model.stackingMinimum > 0 && !stacker.isStacking && !stacker.hasDelegated + isStackingMinimumCalculated(model) && + !isStacking(stacker) && + !isDelegating(stacker) ); } From dbaf000e9008cf1aa78e1a148a4b5021fbb6287d Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 21 Jun 2024 02:16:02 +0300 Subject: [PATCH 0351/1400] Update `err_Commands` check functions to use suggestive helpers --- .../tests/pox-4/err_Commands.ts | 627 +++++++++--------- 1 file changed, 318 insertions(+), 309 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts index cbf657fd96..7e7a4f0e95 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/err_Commands.ts @@ -1,6 +1,34 @@ import fc from "fast-check"; import { Simnet } from "@hirosystems/clarinet-sdk"; -import { PoxCommand, Stacker, StxAddress, Wallet } from "./pox_CommandModel"; +import { + hasLockedStackers, + hasPoolMembers, + isAllowedContractCaller, + isAmountAboveThreshold, + isAmountLockedPositive, + isAmountWithinBalance, + isAmountWithinDelegationLimit, + isATCAboveThreshold, + isATCPositive, + isCallerAllowedByStacker, + isIncreaseByGTZero, + isIncreaseByWithinUnlockedBalance, + isPeriodWithinMax, + isStackerDelegatingToOperator, + isDelegating, + isStacking, + isStackingSolo, + isStackingMinimumCalculated, + isUBHWithinDelegationLimit, + isUnlockedWithinCurrentRC, + isStackerInOperatorPool, + isStackerLockedByOperator, + PoxCommand, + Stacker, + StxAddress, + Wallet, + isPositive, +} from "./pox_CommandModel"; import { currentCycle, currentCycleFirstBlock, @@ -70,9 +98,9 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - stacker.hasDelegated + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isDelegating(stacker) ) return false; model.trackCommandRun( @@ -105,9 +133,9 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.hasDelegated + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isDelegating(stacker) ) return false; model.trackCommandRun( @@ -140,9 +168,9 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - stacker.isStacking || - !stacker.hasDelegated + !isStackingMinimumCalculated(model) || + isStacking(stacker) || + !isDelegating(stacker) ) return false; model.trackCommandRun( @@ -175,9 +203,9 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - stacker.hasDelegated + !(isStackingMinimumCalculated(model)) || + !isStacking(stacker) || + isDelegating(stacker) ) return false; model.trackCommandRun( @@ -210,9 +238,9 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.hasDelegated + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isDelegating(stacker) ) return false; model.trackCommandRun( @@ -245,9 +273,9 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - stacker.isStacking || - !stacker.hasDelegated + !isStackingMinimumCalculated(model) || + isStacking(stacker) || + !isDelegating(stacker) ) return false; model.trackCommandRun( @@ -271,8 +299,8 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - stacker.hasDelegated + !isStackingMinimumCalculated(model) || + isDelegating(stacker) ) return false; model.trackCommandRun( @@ -306,8 +334,8 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.hasDelegated + !isStackingMinimumCalculated(model) || + !isDelegating(stacker) ) return false; model.trackCommandRun( @@ -331,9 +359,9 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - operator.amountToCommit >= model.stackingMinimum || - !(operator.amountToCommit > 0) + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + !isATCPositive(operator) ) return false; model.trackCommandRun( @@ -357,9 +385,9 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - (operator.amountToCommit >= model.stackingMinimum) || - operator.amountToCommit !== 0 + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + isATCPositive(operator) ) return false; model.trackCommandRun( @@ -383,8 +411,8 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 || - operator.amountToCommit >= model.stackingMinimum + hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) ) return false; model.trackCommandRun( @@ -408,9 +436,9 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - operator.amountToCommit >= model.stackingMinimum || - !(operator.amountToCommit > 0) + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + !isATCPositive(operator) ) return false; model.trackCommandRun( @@ -434,9 +462,9 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - operator.amountToCommit >= model.stackingMinimum || - !(operator.amountToCommit === 0) + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + isATCPositive(operator) ) return false; model.trackCommandRun( @@ -460,8 +488,8 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 || - operator.amountToCommit >= model.stackingMinimum + hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) ) return false; model.trackCommandRun( @@ -485,9 +513,9 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - operator.amountToCommit >= model.stackingMinimum || - !(operator.amountToCommit > 0) + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + !isATCPositive(operator) ) return false; model.trackCommandRun( @@ -511,9 +539,9 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - operator.amountToCommit >= model.stackingMinimum || - operator.amountToCommit > 0 + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + isATCPositive(operator) ) return false; model.trackCommandRun( @@ -537,8 +565,8 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 || - operator.amountToCommit >= model.stackingMinimum + hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) ) return false; model.trackCommandRun( @@ -562,9 +590,9 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - operator.amountToCommit >= model.stackingMinimum || - operator.amountToCommit > 0 + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + isATCPositive(operator) ) return false; model.trackCommandRun( @@ -588,8 +616,8 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - operator.lockedAddresses.length > 0 || - operator.amountToCommit >= model.stackingMinimum + hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) ) return false; model.trackCommandRun( @@ -613,9 +641,9 @@ export function ErrCommands( const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - operator.amountToCommit >= model.stackingMinimum || - !(operator.amountToCommit > 0) + !hasLockedStackers(operator) || + isATCAboveThreshold(operator, model) || + !isATCPositive(operator) ) return false; model.trackCommandRun( @@ -652,9 +680,9 @@ export function ErrCommands( function (this, model) { const operator = model.stackers.get(this.operator.stxAddress)!; if ( - !(operator.lockedAddresses.length > 0) || - !(this.rewardCycleIndex >= 0) || - operator.amountToCommit > 0 + !hasLockedStackers(operator) || + !isPositive(this.rewardCycleIndex) || + isATCPositive(operator) ) return false; model.trackCommandRun( @@ -717,15 +745,14 @@ export function ErrCommands( const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - stackerWallet.isStacking || - !(stackerWallet.hasDelegated) || - stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) || - !(Number(this.amountUstx) <= stackerWallet.ustxBalance) || - !(Number(this.amountUstx) >= model.stackingMinimum) || - !(operatorWallet.poolMembers.includes(this.stacker.stxAddress)) || - !(stackerWallet.delegatedUntilBurnHt === undefined || - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) + !isStackingMinimumCalculated(model) || + isStacking(stackerWallet) || + !isDelegating(stackerWallet) || + isAmountWithinDelegationLimit(stackerWallet, this.amountUstx) || + !isAmountWithinBalance(stackerWallet, this.amountUstx) || + !isAmountAboveThreshold(model, this.amountUstx) || + !isStackerInOperatorPool(operatorWallet, this.stacker) || + !isUBHWithinDelegationLimit(stackerWallet, this.unlockBurnHt) ) return false; model.trackCommandRun( @@ -791,15 +818,14 @@ export function ErrCommands( const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - stackerWallet.isStacking || - !stackerWallet.hasDelegated || - !(stackerWallet.delegatedMaxAmount >= Number(this.amountUstx)) || - !(Number(this.amountUstx) <= stackerWallet.ustxBalance) || - !(Number(this.amountUstx) >= model.stackingMinimum) || - operatorWallet.poolMembers.includes(this.stacker.stxAddress) || - !(stackerWallet.delegatedUntilBurnHt === undefined || - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) + !isStackingMinimumCalculated(model) || + isStacking(stackerWallet) || + !isDelegating(stackerWallet) || + !isAmountWithinDelegationLimit(stackerWallet, this.amountUstx) || + !isAmountWithinBalance(stackerWallet, this.amountUstx) || + !isAmountAboveThreshold(model, this.amountUstx) || + isStackerInOperatorPool(operatorWallet, this.stacker) || + !isUBHWithinDelegationLimit(stackerWallet, this.unlockBurnHt) ) return false; model.trackCommandRun( @@ -862,15 +888,14 @@ export function ErrCommands( const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - stackerWallet.isStacking || - stackerWallet.hasDelegated || - stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) || - !(Number(this.amountUstx) <= stackerWallet.ustxBalance) || - !(Number(this.amountUstx) >= model.stackingMinimum) || - operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - (stackerWallet.delegatedUntilBurnHt === undefined || - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) + !isStackingMinimumCalculated(model) || + isStacking(stackerWallet) || + isDelegating(stackerWallet) || + isAmountWithinDelegationLimit(stackerWallet, this.amountUstx) || + !isAmountWithinBalance(stackerWallet, this.amountUstx) || + !isAmountAboveThreshold(model, this.amountUstx) || + isStackerInOperatorPool(operatorWallet, this.stacker) || + isUBHWithinDelegationLimit(stackerWallet, this.unlockBurnHt) ) return false; model.trackCommandRun( @@ -895,13 +920,13 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(this.increaseBy <= stacker.amountUnlocked) || - !(this.increaseBy >= 1) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + !isIncreaseByGTZero(this.increaseBy) ) return false; model.trackCommandRun( @@ -926,13 +951,13 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - this.increaseBy <= stacker.amountUnlocked || - !(this.increaseBy >= 1) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + !isIncreaseByGTZero(this.increaseBy) ) return false; model.trackCommandRun( @@ -957,13 +982,13 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(this.increaseBy <= stacker.amountUnlocked) || - this.increaseBy >= 1 + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + isIncreaseByGTZero(this.increaseBy) ) return false; model.trackCommandRun( @@ -988,13 +1013,13 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(this.increaseBy <= stacker.amountUnlocked) || - !(this.increaseBy >= 1) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + !isIncreaseByGTZero(this.increaseBy) ) return false; model.trackCommandRun( @@ -1019,13 +1044,13 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - this.increaseBy <= stacker.amountUnlocked || - !(this.increaseBy >= 1) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + !isIncreaseByGTZero(this.increaseBy) ) return false; model.trackCommandRun( @@ -1050,13 +1075,13 @@ export function ErrCommands( function (this, model) { const stacker = model.stackers.get(this.wallet.stxAddress)!; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(this.increaseBy <= stacker.amountUnlocked) || - this.increaseBy >= 1 + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !isIncreaseByWithinUnlockedBalance(stacker, this.increaseBy) || + isIncreaseByGTZero(this.increaseBy) ) return false; model.trackCommandRun( @@ -1099,13 +1124,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(stacker.poolMembers.length === 0) || - !(totalPeriod <= 12) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1148,13 +1173,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - stacker.poolMembers.length === 0 || - !(totalPeriod <= 12) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !hasPoolMembers(stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1197,13 +1222,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.isStackingSolo || - !stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(stacker.poolMembers.length === 0) || - !(totalPeriod <= 12) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + !isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1246,13 +1271,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(stacker.poolMembers.length === 0) || - totalPeriod <= 12 + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1295,13 +1320,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - stacker.isStacking || - stacker.isStackingSolo || - stacker.hasDelegated || - stacker.amountLocked > 0 || - !(stacker.poolMembers.length === 0) || - !(totalPeriod <= 12) + !isStackingMinimumCalculated(model) || + isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1344,13 +1369,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(stacker.poolMembers.length === 0) || - !(totalPeriod <= 12) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1393,13 +1418,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - stacker.poolMembers.length === 0 || - !(totalPeriod <= 12) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + !hasPoolMembers(stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1442,13 +1467,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.isStackingSolo || - !stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(stacker.poolMembers.length === 0) || - !(totalPeriod <= 12) + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + !isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1491,13 +1516,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - !stacker.isStacking || - !stacker.isStackingSolo || - stacker.hasDelegated || - !(stacker.amountLocked > 0) || - !(stacker.poolMembers.length === 0) || - totalPeriod <= 12 + !isStackingMinimumCalculated(model) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isDelegating(stacker) || + !isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1540,13 +1565,13 @@ export function ErrCommands( const lastExtendCycle = firstExtendCycle + this.extendCount - 1; const totalPeriod = lastExtendCycle - firstRewardCycle + 1; if ( - !(model.stackingMinimum > 0) || - stacker.isStacking || - stacker.isStackingSolo || - stacker.hasDelegated || - stacker.amountLocked > 0 || - !(stacker.poolMembers.length === 0) || - !(totalPeriod <= 12) + !isStackingMinimumCalculated(model) || + isStacking(stacker) || + isStackingSolo(stacker) || + isDelegating(stacker) || + isAmountLockedPositive(stacker) || + hasLockedStackers(stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1612,18 +1637,15 @@ export function ErrCommands( const stackedAmount = stacker.amountLocked; if ( - !(stacker.amountLocked > 0) || - !stacker.hasDelegated || - !stacker.isStacking || - !(stacker.delegatedTo === this.operator.stxAddress) || - (stacker.delegatedUntilBurnHt === undefined || - stacker.delegatedUntilBurnHt >= newUnlockHeight) || - !(stacker.delegatedMaxAmount >= stackedAmount) || - !(operator.poolMembers.includes(this.stacker.stxAddress)) || - !operator.lockedAddresses.includes( - this.stacker.stxAddress, - ) || - totalPeriod <= 12 + !isAmountLockedPositive(stacker) || + !isDelegating(stacker) || + !isStacking(stacker) || + !isStackerDelegatingToOperator(stacker, this.operator) || + isUBHWithinDelegationLimit(stacker, newUnlockHeight) || + !isAmountWithinDelegationLimit(stacker, stackedAmount) || + !isStackerInOperatorPool(operator, this.stacker) || + !isStackerLockedByOperator(operator, this.stacker) || + isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1691,19 +1713,16 @@ export function ErrCommands( const stackedAmount = stacker.amountLocked; if ( - !(stacker.amountLocked > 0) || - stacker.hasDelegated || - !stacker.isStacking || - !stacker.isStackingSolo || - stacker.delegatedTo === this.operator.stxAddress || - (stacker.delegatedUntilBurnHt === undefined || - stacker.delegatedUntilBurnHt >= newUnlockHeight) || - stacker.delegatedMaxAmount >= stackedAmount || - operator.poolMembers.includes(this.stacker.stxAddress) || - operator.lockedAddresses.includes( - this.stacker.stxAddress, - ) || - !(totalPeriod <= 12) + !isAmountLockedPositive(stacker) || + isDelegating(stacker) || + !isStacking(stacker) || + !isStackingSolo(stacker) || + isStackerDelegatingToOperator(stacker, this.operator) || + isUBHWithinDelegationLimit(stacker, newUnlockHeight) || + isAmountWithinDelegationLimit(stacker, stackedAmount) || + isStackerInOperatorPool(operator, this.stacker) || + isStackerLockedByOperator(operator, this.stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1768,18 +1787,15 @@ export function ErrCommands( FIRST_BURNCHAIN_BLOCK_HEIGHT; const stackedAmount = stacker.amountLocked; if ( - stacker.amountLocked > 0 || - !stacker.hasDelegated || - stacker.isStacking || - stacker.delegatedTo === this.operator.stxAddress || - !(stacker.delegatedUntilBurnHt === undefined || - stacker.delegatedUntilBurnHt >= newUnlockHeight) || - stacker.delegatedMaxAmount >= stackedAmount || - operator.poolMembers.includes(this.stacker.stxAddress) || - operator.lockedAddresses.includes( - this.stacker.stxAddress, - ) || - !(totalPeriod <= 12) + isAmountLockedPositive(stacker) || + !isDelegating(stacker) || + isStacking(stacker) || + isStackerDelegatingToOperator(stacker, this.operator) || + !isUBHWithinDelegationLimit(stacker, newUnlockHeight) || + isAmountWithinDelegationLimit(stacker, stackedAmount) || + isStackerInOperatorPool(operator, this.stacker) || + isStackerLockedByOperator(operator, this.stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1827,18 +1843,15 @@ export function ErrCommands( const stackedAmount = stacker.amountLocked; if ( - !(stacker.amountLocked > 0) || - stacker.hasDelegated || - !stacker.isStacking || - stacker.delegatedTo === this.operator.stxAddress || - (stacker.delegatedUntilBurnHt === undefined || - stacker.delegatedUntilBurnHt >= newUnlockHeight) || - stacker.delegatedMaxAmount >= stackedAmount || - operator.poolMembers.includes(this.stacker.stxAddress) || - !operator.lockedAddresses.includes( - this.stacker.stxAddress, - ) || - !(totalPeriod <= 12) + !isAmountLockedPositive(stacker) || + isDelegating(stacker) || + !isStacking(stacker) || + isStackerDelegatingToOperator(stacker, this.operator) || + isUBHWithinDelegationLimit(stacker, newUnlockHeight) || + isAmountWithinDelegationLimit(stacker, stackedAmount) || + isStackerInOperatorPool(operator, this.stacker) || + !isStackerLockedByOperator(operator, this.stacker) || + !isPeriodWithinMax(totalPeriod) ) return false; model.trackCommandRun( @@ -1887,21 +1900,21 @@ export function ErrCommands( )!; if ( - !(stackerWallet.amountLocked > 0) || - !(stackerWallet.hasDelegated) || - !stackerWallet.isStacking || - !(this.increaseBy > 0) || - !operatorWallet.poolMembers.includes(this.stacker.stxAddress) || - stackerWallet.amountUnlocked >= this.increaseBy || - ( - stackerWallet.delegatedMaxAmount >= - this.increaseBy + stackerWallet.amountLocked + !isAmountLockedPositive(stackerWallet) || + !isDelegating(stackerWallet) || + !isStacking(stackerWallet) || + !isIncreaseByGTZero(this.increaseBy) || + !isStackerInOperatorPool(operatorWallet, this.stacker) || + isIncreaseByWithinUnlockedBalance( + stackerWallet, + this.increaseBy, ) || - !(operatorWallet.lockedAddresses.indexOf( - this.stacker.stxAddress, - ) > -1) || - !(stackerWallet.unlockHeight > - model.burnBlockHeight + REWARD_CYCLE_LENGTH) + isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) || + !isStackerLockedByOperator(operatorWallet, this.stacker) || + isUnlockedWithinCurrentRC(stackerWallet, model) ) return false; model.trackCommandRun( @@ -1950,19 +1963,21 @@ export function ErrCommands( )!; if ( - !(stackerWallet.amountLocked > 0) || - !stackerWallet.hasDelegated || - !stackerWallet.isStacking || - this.increaseBy > 0 || - !operatorWallet.poolMembers.includes(this.stacker.stxAddress) || - !(stackerWallet.amountUnlocked >= this.increaseBy) || - !(stackerWallet.delegatedMaxAmount >= - this.increaseBy + stackerWallet.amountLocked) || - !(operatorWallet.lockedAddresses.indexOf( - this.stacker.stxAddress, - ) > -1) || - !(stackerWallet.unlockHeight > - model.burnBlockHeight + REWARD_CYCLE_LENGTH) + !isAmountLockedPositive(stackerWallet) || + !isDelegating(stackerWallet) || + !isStacking(stackerWallet) || + isIncreaseByGTZero(this.increaseBy) || + !isStackerInOperatorPool(operatorWallet, this.stacker) || + !isIncreaseByWithinUnlockedBalance( + stackerWallet, + this.increaseBy, + ) || + !isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) || + !isStackerLockedByOperator(operatorWallet, this.stacker) || + isUnlockedWithinCurrentRC(stackerWallet, model) ) return false; model.trackCommandRun( @@ -2011,24 +2026,22 @@ export function ErrCommands( )!; if ( - !(stackerWallet.amountLocked > 0) || - stackerWallet.hasDelegated || - !stackerWallet.isStacking || - !stackerWallet.isStackingSolo || - !(this.increaseBy > 0) || - operatorWallet.poolMembers.includes(this.stacker.stxAddress) || - !(stackerWallet.amountUnlocked >= this.increaseBy) || - ( - stackerWallet.delegatedMaxAmount >= - this.increaseBy + stackerWallet.amountLocked + !isAmountLockedPositive(stackerWallet) || + isDelegating(stackerWallet) || + !isStacking(stackerWallet) || + !isStackingSolo(stackerWallet) || + !isIncreaseByGTZero(this.increaseBy) || + isStackerInOperatorPool(operatorWallet, this.stacker) || + !isIncreaseByWithinUnlockedBalance( + stackerWallet, + this.increaseBy, ) || - ( - operatorWallet.lockedAddresses.indexOf( - this.stacker.stxAddress, - ) > -1 + isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, ) || - !(stackerWallet.unlockHeight > - model.burnBlockHeight + REWARD_CYCLE_LENGTH) + isStackerLockedByOperator(operatorWallet, this.stacker) || + isUnlockedWithinCurrentRC(stackerWallet, model) ) return false; model.trackCommandRun( @@ -2077,21 +2090,21 @@ export function ErrCommands( )!; if ( - !(stackerWallet.amountLocked > 0) || - stackerWallet.hasDelegated || - !stackerWallet.isStacking || - !(this.increaseBy > 0) || - operatorWallet.poolMembers.includes(this.stacker.stxAddress) || - !(stackerWallet.amountUnlocked >= this.increaseBy) || - ( - stackerWallet.delegatedMaxAmount >= - this.increaseBy + stackerWallet.amountLocked + !isAmountLockedPositive(stackerWallet) || + isDelegating(stackerWallet) || + !isStacking(stackerWallet) || + !isIncreaseByGTZero(this.increaseBy) || + isStackerInOperatorPool(operatorWallet, this.stacker) || + !isIncreaseByWithinUnlockedBalance( + stackerWallet, + this.increaseBy, ) || - !(operatorWallet.lockedAddresses.indexOf( - this.stacker.stxAddress, - ) > -1) || - !(stackerWallet.unlockHeight > - model.burnBlockHeight + REWARD_CYCLE_LENGTH) + isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) || + !isStackerLockedByOperator(operatorWallet, this.stacker) || + isUnlockedWithinCurrentRC(stackerWallet, model) ) return false; model.trackCommandRun( @@ -2117,12 +2130,8 @@ export function ErrCommands( this.callerToDisallow.stxAddress, )!; if ( - stacker.allowedContractCallers.includes( - this.callerToDisallow.stxAddress, - ) || - callerToDisallow.callerAllowedBy.includes( - this.stacker.stxAddress, - ) + isAllowedContractCaller(stacker, this.callerToDisallow) || + isCallerAllowedByStacker(this.stacker, callerToDisallow) ) return false; model.trackCommandRun( From 00c07381c09fba8b2e8268e50c0c3404bcac072d Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Fri, 21 Jun 2024 02:16:59 +0300 Subject: [PATCH 0352/1400] Update happy path commands check functions to use suggestive helpers --- .../pox-4/pox_DelegateStackExtendCommand.ts | 28 ++++++++++------ .../pox-4/pox_DelegateStackIncreaseCommand.ts | 32 ++++++++++++------- .../pox-4/pox_DelegateStackStxCommand.ts | 26 +++++++++------ .../tests/pox-4/pox_DelegateStxCommand.ts | 7 ++-- .../pox_DisallowContractCallerCommand.ts | 12 +++---- .../pox-4/pox_RevokeDelegateStxCommand.ts | 13 +++++--- .../pox_StackAggregationCommitAuthCommand.ts | 18 +++++++---- 7 files changed, 84 insertions(+), 52 deletions(-) diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts index 2db12e5f40..a3fe2a5f1a 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackExtendCommand.ts @@ -1,4 +1,13 @@ import { + isAmountLockedPositive, + isAmountWithinDelegationLimit, + isPeriodWithinMax, + isStackerDelegatingToOperator, + isDelegating, + isStacking, + isUBHWithinDelegationLimit, + isStackerInOperatorPool, + isStackerLockedByOperator, logCommand, PoxCommand, Real, @@ -80,16 +89,15 @@ export class DelegateStackExtendCommand implements PoxCommand { const stackedAmount = stackerWallet.amountLocked; return ( - stackerWallet.amountLocked > 0 && - stackerWallet.hasDelegated === true && - stackerWallet.isStacking === true && - stackerWallet.delegatedTo === this.operator.stxAddress && - (stackerWallet.delegatedUntilBurnHt === undefined || - stackerWallet.delegatedUntilBurnHt >= newUnlockHeight) && - stackerWallet.delegatedMaxAmount >= stackedAmount && - operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - operatorWallet.lockedAddresses.includes(this.stacker.stxAddress) && - totalPeriod <= 12 + isAmountLockedPositive(stackerWallet) && + isDelegating(stackerWallet) && + isStacking(stackerWallet) && + isStackerDelegatingToOperator(stackerWallet, this.operator) && + isUBHWithinDelegationLimit(stackerWallet, newUnlockHeight) && + isAmountWithinDelegationLimit(stackerWallet, stackedAmount) && + isStackerInOperatorPool(operatorWallet, this.stacker) && + isStackerLockedByOperator(operatorWallet, this.stacker) && + isPeriodWithinMax(totalPeriod) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts index b78fe187bb..43b6a0473a 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackIncreaseCommand.ts @@ -1,4 +1,13 @@ import { + isAmountLockedPositive, + isAmountWithinDelegationLimit, + isIncreaseAmountGTZero, + isIncreaseByWithinUnlockedBalance, + isStackerDelegatingToOperator, + isDelegating, + isStacking, + isUnlockedWithinCurrentRC, + isStackerLockedByOperator, logCommand, PoxCommand, Real, @@ -8,7 +17,6 @@ import { import { poxAddressToTuple } from "@stacks/stacking"; import { expect } from "vitest"; import { Cl } from "@stacks/transactions"; -import { REWARD_CYCLE_LENGTH } from "./pox_Commands.ts"; /** * The `DelegateStackIncreaseCommand` allows a pool operator to @@ -62,16 +70,18 @@ export class DelegateStackIncreaseCommand implements PoxCommand { const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; return ( - stackerWallet.amountLocked > 0 && - stackerWallet.hasDelegated === true && - stackerWallet.isStacking === true && - this.increaseBy > 0 && - operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - stackerWallet.amountUnlocked >= this.increaseBy && - stackerWallet.delegatedMaxAmount >= - this.increaseBy + stackerWallet.amountLocked && - operatorWallet.lockedAddresses.indexOf(this.stacker.stxAddress) > -1 && - stackerWallet.unlockHeight > model.burnBlockHeight + REWARD_CYCLE_LENGTH + isAmountLockedPositive(stackerWallet) && + isDelegating(stackerWallet) && + isStacking(stackerWallet) && + isIncreaseAmountGTZero(this.increaseBy) && + isStackerDelegatingToOperator(stackerWallet, this.operator) && + isIncreaseByWithinUnlockedBalance(stackerWallet, this.increaseBy) && + isAmountWithinDelegationLimit( + stackerWallet, + this.increaseBy + stackerWallet.amountLocked, + ) && + isStackerLockedByOperator(operatorWallet, this.stacker) && + isUnlockedWithinCurrentRC(stackerWallet, model) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts index c284975ae0..70f56fc191 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStackStxCommand.ts @@ -1,4 +1,12 @@ import { + isAmountAboveThreshold, + isAmountWithinBalance, + isAmountWithinDelegationLimit, + isStackerDelegatingToOperator, + isDelegating, + isStacking, + isStackingMinimumCalculated, + isUBHWithinDelegationLimit, logCommand, PoxCommand, Real, @@ -83,19 +91,17 @@ export class DelegateStackStxCommand implements PoxCommand { // - The Operator has to currently be delegated by the Stacker. // - The Period has to fit the last delegation burn block height. - const operatorWallet = model.stackers.get(this.operator.stxAddress)!; const stackerWallet = model.stackers.get(this.stacker.stxAddress)!; return ( - model.stackingMinimum > 0 && - !stackerWallet.isStacking && - stackerWallet.hasDelegated && - stackerWallet.delegatedMaxAmount >= Number(this.amountUstx) && - Number(this.amountUstx) <= stackerWallet.ustxBalance && - Number(this.amountUstx) >= model.stackingMinimum && - operatorWallet.poolMembers.includes(this.stacker.stxAddress) && - (stackerWallet.delegatedUntilBurnHt === undefined || - this.unlockBurnHt <= stackerWallet.delegatedUntilBurnHt) + isStackingMinimumCalculated(model) && + !isStacking(stackerWallet) && + isDelegating(stackerWallet) && + isAmountWithinDelegationLimit(stackerWallet, this.amountUstx) && + isAmountWithinBalance(stackerWallet, this.amountUstx) && + isAmountAboveThreshold(model, this.amountUstx) && + isStackerDelegatingToOperator(stackerWallet, this.operator) && + isUBHWithinDelegationLimit(stackerWallet, this.unlockBurnHt) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts index 836b7d5162..cd14c39ce3 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DelegateStxCommand.ts @@ -1,4 +1,6 @@ import { + isDelegating, + isStackingMinimumCalculated, logCommand, PoxCommand, Real, @@ -55,10 +57,11 @@ export class DelegateStxCommand implements PoxCommand { check(model: Readonly): boolean { // Constraints for running this command include: // - The Stacker cannot currently be a delegator in another delegation. + const stackerWallet = model.stackers.get(this.wallet.stxAddress)!; return ( - model.stackingMinimum > 0 && - !model.stackers.get(this.wallet.stxAddress)?.hasDelegated + isStackingMinimumCalculated(model) && + !isDelegating(stackerWallet) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts index 16b830b5fb..6108a5973f 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_DisallowContractCallerCommand.ts @@ -1,4 +1,6 @@ import { + isAllowedContractCaller, + isCallerAllowedByStacker, logCommand, PoxCommand, Real, @@ -41,14 +43,10 @@ export class DisallowContractCallerCommand implements PoxCommand { const callerToDisallow = model.stackers.get( this.callerToDisallow.stxAddress, )!; + return ( - stacker.allowedContractCallers.includes( - this.callerToDisallow.stxAddress, - ) && - callerToDisallow.callerAllowedBy.includes( - this.stacker.stxAddress, - ) === - true + isAllowedContractCaller(stacker, this.callerToDisallow) && + isCallerAllowedByStacker(this.stacker, callerToDisallow) ); } diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts index 2c3593f27d..98e2349a1f 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_RevokeDelegateStxCommand.ts @@ -1,4 +1,7 @@ import { + isDelegating, + isStackingMinimumCalculated, + isUBHWithinDelegationLimit, logCommand, PoxCommand, Real, @@ -34,11 +37,11 @@ export class RevokeDelegateStxCommand implements PoxCommand { // - The Stacker has to currently be delegating. // - The Stacker's delegation must not be expired. const stacker = model.stackers.get(this.wallet.stxAddress)!; + return ( - model.stackingMinimum > 0 && - stacker.hasDelegated === true && - (stacker.delegatedUntilBurnHt === undefined || - stacker.delegatedUntilBurnHt > model.burnBlockHeight) + isStackingMinimumCalculated(model) && + isDelegating(stacker) && + isUBHWithinDelegationLimit(stacker, model.burnBlockHeight) ); } @@ -80,7 +83,7 @@ export class RevokeDelegateStxCommand implements PoxCommand { // Update model so that we know this wallet is not delegating anymore. // This is important in order to prevent the test from revoking the // delegation multiple times with the same address. - // We update delegatedUntilBurnHt to 0, and not undefined. Undefined + // We update delegatedUntilBurnHt to 0, and not undefined. Undefined // stands for indefinite delegation. wallet.hasDelegated = false; wallet.delegatedTo = ""; diff --git a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts index 999aa2f5b0..7145c673d4 100644 --- a/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts +++ b/contrib/boot-contracts-stateful-prop-tests/tests/pox-4/pox_StackAggregationCommitAuthCommand.ts @@ -1,4 +1,6 @@ import { + hasLockedStackers, + isATCAboveThreshold, logCommand, PoxCommand, Real, @@ -31,7 +33,7 @@ export class StackAggregationCommitAuthCommand implements PoxCommand { readonly authId: number; /** - * Constructs a `StackAggregationCommitAuthCommand` to commit partially + * Constructs a `StackAggregationCommitAuthCommand` to commit partially * locked uSTX. * * @param operator - Represents the `Operator`'s wallet. @@ -52,8 +54,10 @@ export class StackAggregationCommitAuthCommand implements PoxCommand { // stackers has to be greater than the uSTX threshold. const operator = model.stackers.get(this.operator.stxAddress)!; - return operator.lockedAddresses.length > 0 && - operator.amountToCommit >= model.stackingMinimum; + return ( + hasLockedStackers(operator) && + isATCAboveThreshold(operator, model) + ); } run(model: Stub, real: Real): void { @@ -64,10 +68,10 @@ export class StackAggregationCommitAuthCommand implements PoxCommand { // Act - // Include the authorization and the `stack-aggregation-commit` transactions - // in a single block. This way we ensure both the authorization and the - // stack-aggregation-commit transactions are called during the same reward - // cycle, so the authorization currentRewCycle param is relevant for the + // Include the authorization and the `stack-aggregation-commit` transactions + // in a single block. This way we ensure both the authorization and the + // stack-aggregation-commit transactions are called during the same reward + // cycle, so the authorization currentRewCycle param is relevant for the // upcoming stack-aggregation-commit call. const block = real.network.mineBlock([ tx.callPublicFn( From bb402093f8dcfa963c46a31e99f9a2bed56e12f8 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 21 Jun 2024 10:58:04 -0400 Subject: [PATCH 0353/1400] fix: Warnings and build error --- .../stacks-node/src/nakamoto_node/relayer.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 23 +------------------ 2 files changed, 2 insertions(+), 23 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 9355558de0..21a993e899 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -44,7 +44,7 @@ use stacks::net::db::LocalPeer; use stacks::net::relay::Relayer; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed, }; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e84deea2cf..c32deac7f2 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1079,7 +1079,6 @@ fn simple_neon_integration() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -1312,7 +1311,6 @@ fn mine_multiple_per_tenure_integration() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -1502,7 +1500,6 @@ fn correct_burn_outs() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -1836,7 +1833,6 @@ fn block_proposal_api_endpoint() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -2200,7 +2196,6 @@ fn miner_writes_proposed_block_to_stackerdb() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -2321,7 +2316,6 @@ fn vote_for_aggregate_key_burn_op() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -2575,7 +2569,6 @@ fn follower_bootup() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -2815,7 +2808,6 @@ fn stack_stx_burn_op_integration_test() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -3251,7 +3243,6 @@ fn forked_tenure_is_ignored() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, naka_mined_blocks: mined_blocks, @@ -3517,7 +3508,6 @@ fn check_block_heights() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -3938,7 +3928,6 @@ fn nakamoto_attempt_time() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -4217,7 +4206,6 @@ fn clarity_burn_state() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -4460,7 +4448,6 @@ fn signer_chainstate() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -4975,7 +4962,6 @@ fn continue_tenure_extend() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -5030,14 +5016,7 @@ fn continue_tenure_extend() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) From d2bc1276a3c2695c3566cb51cde805cf1e79bf93 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 21 Jun 2024 13:13:09 -0400 Subject: [PATCH 0354/1400] chore: remove unnecessary fully-qualified type --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f299367629..5199d54a14 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5720,7 +5720,7 @@ fn check_block_times() { fn assert_block_info( tuple0: &BTreeMap, - miner: &clarity::vm::Value, + miner: &Value, miner_spend: &clarity::vm::Value, ) { assert!(tuple0 From eb9b3a1822fb16e4b0e114d0d81e3b106a631cca Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 21 Jun 2024 13:23:53 -0400 Subject: [PATCH 0355/1400] docs: add documentation for `TenureBlockId` --- stacks-common/src/types/chainstate.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index d61f78581f..47d6c3c499 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -247,6 +247,9 @@ impl_array_hexstring_fmt!(StacksBlockId); impl_byte_array_newtype!(StacksBlockId, u8, 32); impl_byte_array_serde!(StacksBlockId); +/// A newtype for `StacksBlockId` that indicates a block is a tenure-change +/// block. This helps to explicitly differentiate tenure-change blocks in the +/// code. pub struct TenureBlockId(pub StacksBlockId); impl From for TenureBlockId { fn from(id: StacksBlockId) -> TenureBlockId { From f0bfea323b4cdeabf75123282cd6f235d4bad23c Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 21 Jun 2024 12:47:30 -0400 Subject: [PATCH 0356/1400] fix: `signer::v0` Bitcoin integration tests --- testnet/stacks-node/src/tests/signer/v0.rs | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 85b971a426..29c9c001dd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -100,20 +100,10 @@ impl SignerTest { self.run_until_epoch_3_boundary(); - let (vrfs_submitted, commits_submitted) = ( - self.running_nodes.vrfs_submitted.clone(), - self.running_nodes.commits_submitted.clone(), - ); - info!("Submitting 1 BTC block for miner VRF key registration"); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); + let commits_submitted = self.running_nodes.commits_submitted.clone(); - info!("Successfully triggered first block to wake up the miner runloop."); - // second block should confirm the VRF register, wait until a block commit is submitted + info!("Waiting 1 burnchain block for miner VRF key confirmation"); + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) From 588d22dce44cc40670ac9fe171c71fd3c1491f66 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Jun 2024 13:09:08 -0500 Subject: [PATCH 0357/1400] ci: disable the signer::v1 integration tests --- .github/workflows/bitcoin-tests.yml | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index ea5833f68f..6557b368bd 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -85,15 +85,6 @@ jobs: - tests::nakamoto_integrations::nakamoto_attempt_time - tests::signer::v0::block_proposal_rejection - tests::signer::v0::miner_gather_signatures - - tests::signer::v1::dkg - - tests::signer::v1::sign_request_rejected - # TODO: enable these once v1 signer is fixed - # - tests::signer::v1::filter_bad_transactions - - tests::signer::v1::delayed_dkg - # TODO: enable these once v1 signer is fixed - # - tests::signer::v1::mine_2_nakamoto_reward_cycles - # - tests::signer::v1::sign_after_signer_reboot - # - tests::signer::v1::block_proposal - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state @@ -101,6 +92,14 @@ jobs: # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower + # TODO: enable these once v1 signer is supported by a new nakamoto epoch + # - tests::signer::v1::dkg + # - tests::signer::v1::sign_request_rejected + # - tests::signer::v1::filter_bad_transactions + # - tests::signer::v1::delayed_dkg + # - tests::signer::v1::mine_2_nakamoto_reward_cycles + # - tests::signer::v1::sign_after_signer_reboot + # - tests::signer::v1::block_proposal steps: ## Setup test environment - name: Setup Test Environment From 9b721cc7080d463a806a03af15fbe378a843c17a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 21 Jun 2024 16:08:40 -0400 Subject: [PATCH 0358/1400] fix: changes needed to integration tests after merge --- stackslib/src/clarity_vm/database/mod.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 30 ++----------------- 2 files changed, 4 insertions(+), 28 deletions(-) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index ab859da00f..8fffc76d7d 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -568,7 +568,7 @@ fn get_first_block_in_tenure( } } None => { - if let Some(ch) = get_stacks_header_column_from_table( + if let Some(_) = get_stacks_header_column_from_table( conn, id_bhh, "consensus_hash", diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5a630e9f45..8570b0aeff 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5258,7 +5258,6 @@ fn check_block_times() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -5318,14 +5317,7 @@ fn check_block_times() { .unwrap(); info!("Time from pre-epoch 3.0: {}", time0); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -5738,7 +5730,6 @@ fn check_block_info() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -5811,14 +5802,7 @@ fn check_block_info() { let tuple0 = result0.expect_tuple().unwrap().data_map; info!("Info from pre-epoch 3.0: {:?}", tuple0); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) @@ -6200,7 +6184,6 @@ fn check_block_info_rewards() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. @@ -6266,14 +6249,7 @@ fn check_block_info_rewards() { let tuple0 = result0.expect_tuple().unwrap().data_map; info!("Info from pre-epoch 3.0: {:?}", tuple0); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted + // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count >= 1) From ab7fa74d6f4d6318c346316aea03b3e1a441019f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 24 Jun 2024 08:44:17 -0400 Subject: [PATCH 0359/1400] Add mine_2_nakamoto_reward_cycles test to v0 signer Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/mod.rs | 13 -- testnet/stacks-node/src/tests/signer/v0.rs | 154 +++++++++++++++----- testnet/stacks-node/src/tests/signer/v1.rs | 13 ++ 4 files changed, 130 insertions(+), 51 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index b7b7c2a977..1bb4bd17f4 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -85,6 +85,7 @@ jobs: - tests::nakamoto_integrations::nakamoto_attempt_time - tests::signer::v0::block_proposal_rejection - tests::signer::v0::miner_gather_signatures + - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 31d2dabc11..72db269423 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -51,7 +51,6 @@ use stacks_signer::client::{SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State}; use stacks_signer::{Signer, SpawnedSigner}; -use wsts::curve::point::Point; use wsts::state_machine::PublicKeys; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; @@ -237,18 +236,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest MinedNakamotoBlockEvent { - let new_block = self.mine_nakamoto_block(timeout); - let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); - assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); - new_block - } - fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { let commits_submitted = self.running_nodes.commits_submitted.clone(); let mined_block_time = Instant::now(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 29c9c001dd..e5084c6f78 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -111,6 +111,64 @@ impl SignerTest { .unwrap(); info!("Ready to mine Nakamoto blocks!"); } + + // Only call after already past the epoch 3.0 boundary + fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { + info!("------------------------- Try mining one block -------------------------"); + self.mine_nakamoto_block(timeout); + + // Verify that the signers accepted the proposed block, sending back a validate ok response + let proposed_signer_signature_hash = self.wait_for_validate_ok_response(timeout); + let message = proposed_signer_signature_hash.0; + + info!("------------------------- Test Block Signed -------------------------"); + // Verify that the signers signed the proposed block + let signature = self.wait_for_confirmed_block_v0(&proposed_signer_signature_hash, timeout); + + info!("Got {} signatures", signature.len()); + + assert_eq!(signature.len(), num_signers); + + let reward_cycle = self.get_current_reward_cycle(); + let signers = self.get_reward_set_signers(reward_cycle); + + // Verify that the signers signed the proposed block + let all_signed = signers.iter().zip(signature).all(|(signer, signature)| { + let stacks_public_key = Secp256k1PublicKey::from_slice(signer.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + + // let valid = stacks_public_key.verify(message, signature); + let valid = stacks_public_key + .verify(&message, &signature) + .expect("Failed to verify signature"); + if !valid { + error!( + "Failed to verify signature for signer: {:?}", + stacks_public_key + ); + } + valid + }); + assert!(all_signed); + } + + // Only call after already past the epoch 3.0 boundary + fn run_until_burnchain_height_nakamoto( + &mut self, + timeout: Duration, + burnchain_height: u64, + num_signers: usize, + ) { + let current_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); + debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); + for _ in 0..total_nmb_blocks_to_mine { + self.mine_and_verify_confirmed_naka_block(timeout, num_signers); + } + } } #[test] @@ -247,44 +305,8 @@ fn miner_gather_signatures() { signer_test.boot_to_epoch_3(); let timeout = Duration::from_secs(30); - info!("------------------------- Try mining one block -------------------------"); - signer_test.mine_nakamoto_block(timeout); - - // Verify that the signers accepted the proposed block, sending back a validate ok response - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(timeout); - let message = proposed_signer_signature_hash.0; - - info!("------------------------- Test Block Signed -------------------------"); - // Verify that the signers signed the proposed block - let signature = - signer_test.wait_for_confirmed_block_v0(&proposed_signer_signature_hash, timeout); - - info!("Got {} signatures", signature.len()); - - assert_eq!(signature.len(), num_signers); - - let reward_cycle = signer_test.get_current_reward_cycle(); - let signers = signer_test.get_reward_set_signers(reward_cycle); - - // Verify that the signers signed the proposed block - - let all_signed = signers.iter().zip(signature).all(|(signer, signature)| { - let stacks_public_key = Secp256k1PublicKey::from_slice(signer.signing_key.as_slice()) - .expect("Failed to convert signing key to StacksPublicKey"); - - // let valid = stacks_public_key.verify(message, signature); - let valid = stacks_public_key - .verify(&message, &signature) - .expect("Failed to verify signature"); - if !valid { - error!( - "Failed to verify signature for signer: {:?}", - stacks_public_key - ); - } - valid - }); - assert!(all_signed); + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] @@ -303,3 +325,59 @@ fn miner_gather_signatures() { assert!(metrics_response.contains(&expected_result)); } } + +#[test] +#[ignore] +/// Test that signers can handle a transition between Nakamoto reward cycles +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 2 full Nakamoto reward cycles, sending blocks to observing signers to sign and return. +/// +/// Test Assertion: +/// All signers sign all blocks successfully. +/// The chain advances 2 full reward cycles. +fn mine_2_nakamoto_reward_cycles() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let nmb_reward_cycles = 2; + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers); + let timeout = Duration::from_secs(200); + signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + // Mine 2 full Nakamoto reward cycles (epoch 3 starts in the middle of one, hence the + 1) + let next_reward_cycle = curr_reward_cycle.saturating_add(1); + let final_reward_cycle = next_reward_cycle.saturating_add(nmb_reward_cycles); + let final_reward_cycle_height_boundary = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(final_reward_cycle) + .saturating_sub(1); + + info!("------------------------- Test Mine 2 Nakamoto Reward Cycles -------------------------"); + signer_test.run_until_burnchain_height_nakamoto( + timeout, + final_reward_cycle_height_boundary, + num_signers, + ); + + let current_burnchain_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + assert_eq!(current_burnchain_height, final_reward_cycle_height_boundary); + signer_test.shutdown(); +} diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 09599515ee..eef3193fbb 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -55,6 +55,7 @@ use wsts::net::Message; use wsts::state_machine::OperationResult; use super::SignerTest; +use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::tests::nakamoto_integrations::{ boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, next_block_and, }; @@ -196,6 +197,18 @@ impl SignerTest { points } + fn mine_and_verify_confirmed_naka_block( + &mut self, + agg_key: &Point, + timeout: Duration, + ) -> MinedNakamotoBlockEvent { + let new_block = self.mine_nakamoto_block(timeout); + let signer_sighash = new_block.signer_signature_hash.clone(); + let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); + assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); + new_block + } + fn wait_for_dkg(&mut self, timeout: Duration) -> Point { debug!("Waiting for DKG..."); let mut key = Point::default(); From 4f0f7162b699103623ab0209c748eff1c965ec0e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Jun 2024 13:07:07 -0500 Subject: [PATCH 0360/1400] feat: push blocks to signer set * add /v3/blocks/upload, and relayer handling for it * push blocks from miner to signer set, signer set pushes to their stacks-node * configure the signer set test to rely on signers to push the block (by disabling p2p block push) --- libsigner/src/libsigner.rs | 2 +- libsigner/src/v0/messages.rs | 65 ++++--- libsigner/src/v1/messages.rs | 4 +- stacks-signer/src/client/stackerdb.rs | 6 +- stacks-signer/src/client/stacks_client.rs | 19 ++ stacks-signer/src/v0/signer.rs | 20 +- stackslib/src/net/api/mod.rs | 3 + stackslib/src/net/api/postblock_v3.rs | 174 ++++++++++++++++++ stackslib/src/net/mod.rs | 10 +- stackslib/src/net/relay.rs | 61 ++++-- stackslib/src/net/stackerdb/mod.rs | 2 +- testnet/stacks-node/src/nakamoto_node.rs | 2 + .../stacks-node/src/nakamoto_node/miner.rs | 91 ++++++--- .../src/nakamoto_node/sign_coordinator.rs | 147 +++++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 8 +- 15 files changed, 478 insertions(+), 136 deletions(-) create mode 100644 stackslib/src/net/api/postblock_v3.rs diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 43d8e5b687..0da4e68a8f 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -74,5 +74,5 @@ pub trait MessageSlotID: Sized + Eq + Hash + Debug + Copy { /// A trait for signer messages used in signer communciation pub trait SignerMessage: StacksMessageCodec { /// The contract identifier for the message slot in stacker db - fn msg_id(&self) -> T; + fn msg_id(&self) -> Option; } diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 4d32253f2e..f831aa9e99 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -64,12 +64,19 @@ define_u8_enum!( /// Enum representing the stackerdb message identifier: this is /// the contract index in the signers contracts (i.e., X in signers-0-X) MessageSlotID { - /// Block Proposal message from miners - BlockProposal = 0, /// Block Response message from signers BlockResponse = 1 }); +define_u8_enum!( +/// Enum representing the slots used by the miner +MinerSlotID { + /// Block proposal from the miner + BlockProposal = 0, + /// Block pushed from the miner + BlockPushed = 1 +}); + impl MessageSlotIDTrait for MessageSlotID { fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) @@ -80,7 +87,7 @@ impl MessageSlotIDTrait for MessageSlotID { } impl SignerMessageTrait for SignerMessage { - fn msg_id(&self) -> MessageSlotID { + fn msg_id(&self) -> Option { self.msg_id() } } @@ -91,7 +98,9 @@ SignerMessageTypePrefix { /// Block Proposal message from miners BlockProposal = 0, /// Block Response message from signers - BlockResponse = 1 + BlockResponse = 1, + /// Block Pushed message from miners + BlockPushed = 2 }); #[cfg_attr(test, mutants::skip)] @@ -133,67 +142,65 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { match message { SignerMessage::BlockProposal(_) => SignerMessageTypePrefix::BlockProposal, SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, + SignerMessage::BlockPushed(_) => SignerMessageTypePrefix::BlockPushed, } } } /// The messages being sent through the stacker db contracts -#[derive(Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum SignerMessage { /// The block proposal from miners for signers to observe and sign BlockProposal(BlockProposal), /// The block response from signers for miners to observe BlockResponse(BlockResponse), -} - -impl Debug for SignerMessage { - #[cfg_attr(test, mutants::skip)] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::BlockProposal(b) => Debug::fmt(b, f), - Self::BlockResponse(b) => Debug::fmt(b, f), - } - } + /// A block pushed from miners to the signers set + BlockPushed(NakamotoBlock), } impl SignerMessage { /// Helper function to determine the slot ID for the provided stacker-db writer id + /// Not every message has a `MessageSlotID`: messages from the miner do not + /// broadcast over `.signers-0-X` contracts. #[cfg_attr(test, mutants::skip)] - pub fn msg_id(&self) -> MessageSlotID { + pub fn msg_id(&self) -> Option { match self { - Self::BlockProposal(_) => MessageSlotID::BlockProposal, - Self::BlockResponse(_) => MessageSlotID::BlockResponse, + Self::BlockProposal(_) | Self::BlockPushed(_) => None, + Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), } } } impl StacksMessageCodec for SignerMessage { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(SignerMessageTypePrefix::from(self) as u8))?; + SignerMessageTypePrefix::from(self) + .to_u8() + .consensus_serialize(fd)?; match self { - SignerMessage::BlockProposal(block_proposal) => { - write_next(fd, block_proposal)?; - } - SignerMessage::BlockResponse(block_response) => { - write_next(fd, block_response)?; - } - }; + SignerMessage::BlockProposal(block_proposal) => block_proposal.consensus_serialize(fd), + SignerMessage::BlockResponse(block_response) => block_response.consensus_serialize(fd), + SignerMessage::BlockPushed(block) => block.consensus_serialize(fd), + }?; Ok(()) } #[cfg_attr(test, mutants::skip)] fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; + let type_prefix_byte = u8::consensus_deserialize(fd)?; let type_prefix = SignerMessageTypePrefix::try_from(type_prefix_byte)?; let message = match type_prefix { SignerMessageTypePrefix::BlockProposal => { - let block_proposal = read_next::(fd)?; + let block_proposal = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::BlockProposal(block_proposal) } SignerMessageTypePrefix::BlockResponse => { - let block_response = read_next::(fd)?; + let block_response = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::BlockResponse(block_response) } + SignerMessageTypePrefix::BlockPushed => { + let block = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::BlockPushed(block) + } }; Ok(message) } diff --git a/libsigner/src/v1/messages.rs b/libsigner/src/v1/messages.rs index 30bd4a5769..b412d9a66f 100644 --- a/libsigner/src/v1/messages.rs +++ b/libsigner/src/v1/messages.rs @@ -110,8 +110,8 @@ impl MessageSlotIDTrait for MessageSlotID { } impl SignerMessageTrait for SignerMessage { - fn msg_id(&self) -> MessageSlotID { - self.msg_id() + fn msg_id(&self) -> Option { + Some(self.msg_id()) } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index e5ccb5a89f..e0c6cb1621 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -94,7 +94,11 @@ impl StackerDB { &mut self, message: T, ) -> Result { - let msg_id = message.msg_id(); + let msg_id = message.msg_id().ok_or_else(|| { + ClientError::PutChunkRejected( + "Tried to send a SignerMessage which does not have a corresponding .signers slot identifier".into() + ) + })?; let message_bytes = message.serialize_to_vec(); self.send_message_bytes_with_retry(&msg_id, message_bytes) } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index d85f6d9b7a..0f330a2e96 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -35,7 +35,9 @@ use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; use blockstack_lib::net::api::getstackers::GetStackersResponse; +use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; +use blockstack_lib::net::api::postblock_v3; use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; use clarity::util::hash::to_hex; @@ -655,6 +657,23 @@ impl StacksClient { Ok(unsigned_tx) } + /// Try to post a completed nakamoto block to our connected stacks-node + /// Returns `true` if the block was accepted or `false` if the block + /// was rejected. + pub fn post_block(&self, block: &NakamotoBlock) -> Result { + let response = self + .stacks_node_client + .post(format!("{}{}", self.http_origin, postblock_v3::PATH)) + .header("Content-Type", "application/octet-stream") + .body(block.serialize_to_vec()) + .send()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let post_block_resp = response.json::()?; + Ok(post_block_resp.accepted) + } + /// Helper function to submit a transaction to the Stacks mempool pub fn submit_transaction(&self, tx: &StacksTransaction) -> Result { let txid = tx.txid(); diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b1a11546f0..962c1f623b 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -21,9 +21,9 @@ use clarity::types::PrivateKey; use clarity::util::hash::MerkleHashFunc; use libsigner::v0::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; use libsigner::{BlockProposal, SignerEvent}; -use slog::{slog_debug, slog_error, slog_warn}; +use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; -use stacks_common::{debug, error, warn}; +use stacks_common::{debug, error, info, warn}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; @@ -118,8 +118,20 @@ impl SignerTrait for Signer { messages.len(); ); for message in messages { - if let SignerMessage::BlockProposal(block_proposal) = message { - self.handle_block_proposal(stacks_client, block_proposal); + match message { + SignerMessage::BlockProposal(block_proposal) => { + self.handle_block_proposal(stacks_client, block_proposal); + } + SignerMessage::BlockPushed(b) => { + let block_push_result = stacks_client.post_block(&b); + info!( + "{self}: Got block pushed message"; + "block_id" => %b.block_id(), + "signer_sighash" => %b.header.signer_signature_hash(), + "push_result" => ?block_push_result, + ); + } + _ => {} } } } diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 34fa1ec4c3..d256c15b97 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -66,6 +66,8 @@ pub mod gettransaction_unconfirmed; pub mod liststackerdbreplicas; pub mod postblock; pub mod postblock_proposal; +#[warn(unused_imports)] +pub mod postblock_v3; pub mod postfeerate; pub mod postmempoolquery; pub mod postmicroblock; @@ -129,6 +131,7 @@ impl StacksHttp { self.register_rpc_endpoint(postblock_proposal::RPCBlockProposalRequestHandler::new( self.block_proposal_token.clone(), )); + self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::default()); self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs new file mode 100644 index 0000000000..8ce5a65dde --- /dev/null +++ b/stackslib/src/net/api/postblock_v3.rs @@ -0,0 +1,174 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::{Captures, Regex}; +use stacks_common::codec::{Error as CodecError, StacksMessageCodec, MAX_PAYLOAD_LEN}; +use stacks_common::types::net::PeerHost; + +use super::postblock::StacksBlockAcceptedData; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::net::http::{ + parse_json, Error, HttpContentType, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, +}; +use crate::net::httpcore::{HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest}; +use crate::net::relay::Relayer; +use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; + +pub static PATH: &'static str = "/v3/blocks/upload/"; + +#[derive(Clone, Default)] +pub struct RPCPostBlockRequestHandler { + pub block: Option, +} + +impl RPCPostBlockRequestHandler { + /// Decode a bare block from the body + fn parse_postblock_octets(mut body: &[u8]) -> Result { + let block = NakamotoBlock::consensus_deserialize(&mut body).map_err(|e| { + if let CodecError::DeserializeError(msg) = e { + Error::DecodeError(format!("Failed to deserialize posted transaction: {}", msg)) + } else { + e.into() + } + })?; + Ok(block) + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCPostBlockRequestHandler { + fn verb(&self) -> &'static str { + "POST" + } + + fn path_regex(&self) -> Regex { + Regex::new(&format!("^{PATH}$")).unwrap() + } + + fn metrics_identifier(&self) -> &str { + PATH + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + _captures: &Captures, + query: Option<&str>, + body: &[u8], + ) -> Result { + if preamble.get_content_length() == 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected non-zero-length body for PostBlock".to_string(), + )); + } + + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(Error::DecodeError( + "Invalid Http request: PostBlock body is too big".to_string(), + )); + } + + if Some(HttpContentType::Bytes) != preamble.content_type || preamble.content_type.is_none() + { + return Err(Error::DecodeError( + "Invalid Http request: PostBlock takes application/octet-stream".to_string(), + )); + } + + let block = Self::parse_postblock_octets(body)?; + + self.block = Some(block); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCPostBlockRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + // get out the request body + let block = self + .block + .take() + .ok_or(NetError::SendError("`block` not set".into()))?; + + let accepted = + node.with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { + let mut handle_conn = sortdb.index_handle_at_tip(); + Relayer::process_new_nakamoto_block( + &network.burnchain, + &sortdb, + &mut handle_conn, + chainstate, + &block, + None, + ) + })?; + + let data_resp = StacksBlockAcceptedData { + accepted, + stacks_block_id: block.block_id(), + }; + + // should set to relay... + if data_resp.accepted { + node.set_relay_message(StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: vec![block], + })); + } + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&data_resp)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCPostBlockRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let accepted: StacksBlockAcceptedData = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(accepted)?) + } +} + +impl StacksHttpRequest { + /// Make a new post-block request + pub fn new_post_block_v3(host: PeerHost, block: &NakamotoBlock) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "POST".into(), + PATH.into(), + HttpRequestContents::new().payload_stacks(block), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index e8e52fd137..6875b336af 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1468,6 +1468,8 @@ pub struct NetworkResult { pub uploaded_transactions: Vec, /// blocks sent to us via the http server pub uploaded_blocks: Vec, + /// blocks sent to us via the http server + pub uploaded_nakamoto_blocks: Vec, /// microblocks sent to us by the http server pub uploaded_microblocks: Vec, /// chunks we received from the HTTP server @@ -1515,6 +1517,7 @@ impl NetworkResult { pushed_microblocks: HashMap::new(), pushed_nakamoto_blocks: HashMap::new(), uploaded_transactions: vec![], + uploaded_nakamoto_blocks: vec![], uploaded_blocks: vec![], uploaded_microblocks: vec![], uploaded_stackerdb_chunks: vec![], @@ -1638,8 +1641,8 @@ impl NetworkResult { } } - pub fn consume_http_uploads(&mut self, mut msgs: Vec) -> () { - for msg in msgs.drain(..) { + pub fn consume_http_uploads(&mut self, msgs: Vec) -> () { + for msg in msgs.into_iter() { match msg { StacksMessageType::Transaction(tx_data) => { self.uploaded_transactions.push(tx_data); @@ -1653,6 +1656,9 @@ impl NetworkResult { StacksMessageType::StackerDBPushChunk(chunk_data) => { self.uploaded_stackerdb_chunks.push(chunk_data); } + StacksMessageType::NakamotoBlocks(data) => { + self.uploaded_nakamoto_blocks.extend(data.blocks); + } _ => { // drop warn!("Dropping unknown HTTP message"); diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 11fa5f6364..3004f4d7c9 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1949,22 +1949,53 @@ impl Relayer { }; // process pushed Nakamoto blocks - let (mut pushed_blocks_and_relayers, bad_neighbors) = - match Self::process_pushed_nakamoto_blocks( - network_result, - burnchain, - sortdb, - chainstate, - coord_comms, - ) { - Ok(x) => x, - Err(e) => { - warn!("Failed to process pushed Nakamoto blocks: {:?}", &e); - (vec![], vec![]) - } - }; + let (pushed_blocks_and_relayers, bad_neighbors) = match Self::process_pushed_nakamoto_blocks( + network_result, + burnchain, + sortdb, + chainstate, + coord_comms, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failed to process pushed Nakamoto blocks: {:?}", &e); + (vec![], vec![]) + } + }; - accepted_nakamoto_blocks_and_relayers.append(&mut pushed_blocks_and_relayers); + let mut http_uploaded_blocks = vec![]; + for block in network_result.uploaded_nakamoto_blocks.drain(..) { + let block_id = block.block_id(); + let have_block = chainstate + .nakamoto_blocks_db() + .has_nakamoto_block(&block_id) + .unwrap_or_else(|e| { + warn!( + "Failed to determine if we have Nakamoto block"; + "stacks_block_id" => %block_id, + "err" => ?e + ); + false + }); + if have_block { + debug!( + "Received http-uploaded nakamoto block"; + "stacks_block_id" => %block_id, + ); + http_uploaded_blocks.push(block); + } + } + if !http_uploaded_blocks.is_empty() { + coord_comms.inspect(|comm| { + comm.announce_new_stacks_block(); + }); + } + + accepted_nakamoto_blocks_and_relayers.extend(pushed_blocks_and_relayers); + accepted_nakamoto_blocks_and_relayers.push(AcceptedNakamotoBlocks { + relayers: vec![], + blocks: http_uploaded_blocks, + }); Ok((accepted_nakamoto_blocks_and_relayers, bad_neighbors)) } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 53aa2f3c22..754df3fba1 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -151,7 +151,7 @@ pub const STACKERDB_MAX_PAGE_COUNT: u32 = 2; pub const STACKERDB_SLOTS_FUNCTION: &str = "stackerdb-get-signer-slots"; pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; -pub const MINER_SLOT_COUNT: u32 = 1; +pub const MINER_SLOT_COUNT: u32 = 2; /// Final result of synchronizing state with a remote set of DB replicas #[derive(Clone)] diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index a556a41093..22ba5f2d7e 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -95,6 +95,8 @@ pub enum Error { BadVrfConstruction, CannotSelfSign, MiningFailure(ChainstateError), + /// The miner didn't accept their own block + AcceptFailure(ChainstateError), MinerSignatureError(&'static str), SignerSignatureError(String), /// A failure occurred while configuring the miner thread diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 421702bcfb..2bc93e5cf3 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -18,10 +18,13 @@ use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; +use clarity::boot_util::boot_code_id; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use hashbrown::HashSet; -use libsigner::v1::messages::{MessageSlotID, SignerMessage}; +use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; +use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; +use libsigner::StackerDBSession; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; @@ -30,7 +33,7 @@ use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use stacks::chainstate::stacks::boot::RewardSet; +use stacks::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, @@ -56,9 +59,9 @@ use crate::run_loop::RegisteredKey; use crate::{neon_node, ChainTip}; #[cfg(test)] -lazy_static::lazy_static! { - pub static ref TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); -} +pub static TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(test)] +pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); /// If the miner was interrupted while mining a block, how long should the /// miner thread sleep before trying again? @@ -258,7 +261,7 @@ impl BlockMinerThread { }; new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), reward_set) { + if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { warn!("Error accepting own block: {e:?}. Will try mining again."); continue; } else { @@ -562,12 +565,12 @@ impl BlockMinerThread { let signer_chunks = stackerdbs .get_latest_chunks(&signers_contract_id, &slot_ids) .expect("FATAL: could not get latest chunks from stacker DB"); - let signer_messages: Vec<(u32, SignerMessage)> = slot_ids + let signer_messages: Vec<(u32, SignerMessageV1)> = slot_ids .iter() .zip(signer_chunks.into_iter()) .filter_map(|(slot_id, chunk)| { chunk.and_then(|chunk| { - read_next::(&mut &chunk[..]) + read_next::(&mut &chunk[..]) .ok() .map(|msg| (*slot_id, msg)) }) @@ -609,7 +612,7 @@ impl BlockMinerThread { let mut filtered_transactions: HashMap = HashMap::new(); for (_slot, signer_message) in signer_messages { match signer_message { - SignerMessage::Transactions(transactions) => { + SignerMessageV1::Transactions(transactions) => { NakamotoSigners::update_filtered_transactions( &mut filtered_transactions, &account_nonces, @@ -623,11 +626,40 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } + fn broadcast_p2p( + sort_db: &SortitionDB, + chain_state: &mut StacksChainState, + block: &NakamotoBlock, + reward_set: RewardSet, + ) -> Result<(), ChainstateError> { + #[cfg(test)] + { + if *TEST_SKIP_P2P_BROADCAST.lock().unwrap() == Some(true) { + return Ok(()); + } + } + + let mut sortition_handle = sort_db.index_handle_at_ch(&block.header.consensus_hash)?; + let chainstate_config = chain_state.config(); + let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; + NakamotoChainState::accept_block( + &chainstate_config, + &block, + &mut sortition_handle, + &staging_tx, + headers_conn, + reward_set, + )?; + staging_tx.commit()?; + Ok(()) + } + fn broadcast( &self, block: NakamotoBlock, reward_set: RewardSet, - ) -> Result<(), ChainstateError> { + stackerdbs: &StackerDBs, + ) -> Result<(), NakamotoNodeError> { #[cfg(test)] { if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { @@ -647,7 +679,6 @@ impl BlockMinerThread { } let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let chainstate_config = chain_state.config(); let sort_db = SortitionDB::open( &self.config.get_burn_db_file_path(), true, @@ -655,18 +686,32 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = sort_db.index_handle_at_ch(&block.header.consensus_hash)?; - let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; - NakamotoChainState::accept_block( - &chainstate_config, - &block, - &mut sortition_handle, - &staging_tx, - headers_conn, - reward_set, - )?; - staging_tx.commit()?; - Ok(()) + Self::broadcast_p2p(&sort_db, &mut chain_state, &block, reward_set) + .map_err(NakamotoNodeError::AcceptFailure)?; + + let Some(ref miner_privkey) = self.config.miner.mining_key else { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + + let rpc_socket = self.config.node.get_rpc_loopback().ok_or_else(|| { + NakamotoNodeError::MinerConfigurationFailed("Failed to get RPC loopback socket") + })?; + let miners_contract_id = boot_code_id(MINERS_NAME, chain_state.mainnet); + let mut miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + + SignCoordinator::send_miners_message( + miner_privkey, + &sort_db, + &self.burn_block, + &stackerdbs, + SignerMessageV0::BlockPushed(block), + MinerSlotID::BlockPushed, + chain_state.mainnet, + &mut miners_session, + ) + .map_err(NakamotoNodeError::SigningCoordinatorFailure) } /// Get the coinbase recipient address, if set in the config and if allowed in this epoch diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 7fcb5bb008..63f46a9557 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -18,7 +18,7 @@ use std::sync::mpsc::Receiver; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; -use libsigner::v0::messages::{BlockResponse, SignerMessage as SignerMessageV0}; +use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; @@ -337,19 +337,44 @@ impl SignCoordinator { tip: &BlockSnapshot, stackerdbs: &StackerDBs, message: M, + miner_slot_id: MinerSlotID, is_mainnet: bool, miners_session: &mut StackerDBSession, ) -> Result<(), String> { let mut miner_sk = StacksPrivateKey::from_slice(&message_key.to_bytes()).unwrap(); miner_sk.set_compress_public(true); + Self::send_miners_message( + &miner_sk, + sortdb, + tip, + stackerdbs, + message, + miner_slot_id, + is_mainnet, + miners_session, + ) + } + + pub fn send_miners_message( + miner_sk: &StacksPrivateKey, + sortdb: &SortitionDB, + tip: &BlockSnapshot, + stackerdbs: &StackerDBs, + message: M, + miner_slot_id: MinerSlotID, + is_mainnet: bool, + miners_session: &mut StackerDBSession, + ) -> Result<(), String> { let miner_pubkey = StacksPublicKey::from_private(&miner_sk); let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey) .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? else { return Err("No slot for miner".into()); }; - // We only have one slot per miner - let slot_id = slot_range.start; + + let slot_id = slot_range + .start + .saturating_add(miner_slot_id.to_u8().into()); if !slot_range.contains(&slot_id) { return Err("Not enough slots for miner messages".into()); } @@ -420,6 +445,7 @@ impl SignCoordinator { burn_tip, &stackerdbs, nonce_req_msg.into(), + MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, ) @@ -571,6 +597,9 @@ impl SignCoordinator { burn_tip, stackerdbs, msg.into(), + // TODO: note, in v1, we'll want to add a new slot, but for now, it just shares + // with the block proposal + MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, ) { @@ -630,6 +659,7 @@ impl SignCoordinator { burn_tip, &stackerdbs, block_proposal_message, + MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, ) @@ -709,70 +739,73 @@ impl SignCoordinator { ); for (message, slot_id) in messages.into_iter().zip(slot_ids) { - match message { + let (response_hash, signature) = match message { SignerMessageV0::BlockResponse(BlockResponse::Accepted(( response_hash, signature, - ))) => { - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != response_hash { - warn!( - "Processed signature but didn't validate over the expected block. Returning error."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "slot_id" => slot_id, - ); - continue; - } - debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); - let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { - return Err(NakamotoNodeError::SignerSignatureError( - "Signer entry not found".into(), - )); - }; - let Ok(signer_pubkey) = - StacksPublicKey::from_slice(&signer_entry.signing_key) - else { - return Err(NakamotoNodeError::SignerSignatureError( - "Failed to parse signer public key".into(), - )); - }; - let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) - else { - warn!("Got invalid signature from a signer. Ignoring."); - continue; - }; - if !valid_sig { - warn!( - "Processed signature but didn't validate over the expected block. Ignoring"; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "slot_id" => slot_id, - ); - continue; - } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } - debug!("Signature Added to block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - ); - gathered_signatures.insert(slot_id, signature); - } + ))) => (response_hash, signature), SignerMessageV0::BlockResponse(BlockResponse::Rejected(_)) => { debug!("Received rejected block response. Ignoring."); + continue; } SignerMessageV0::BlockProposal(_) => { debug!("Received block proposal message. Ignoring."); + continue; + } + SignerMessageV0::BlockPushed(_) => { + debug!("Received block pushed message. Ignoring."); + continue; } + }; + let block_sighash = block.header.signer_signature_hash(); + if block_sighash != response_hash { + warn!( + "Processed signature but didn't validate over the expected block. Returning error."; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) else { + warn!("Got invalid signature from a signer. Ignoring."); + continue; + }; + if !valid_sig { + warn!( + "Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); } + debug!("Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => total_weight_signed, + ); + gathered_signatures.insert(slot_id, signature); } // After gathering all signatures, return them if we've hit the threshold diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 29c9c001dd..61eafd41df 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -38,7 +38,7 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; use crate::tests::neon_integrations::next_block_and_wait; -use crate::BurnchainController; +use crate::{nakamoto_node, BurnchainController}; impl SignerTest { /// Run the test until the epoch 3 boundary @@ -241,6 +241,12 @@ fn miner_gather_signatures() { .with(EnvFilter::from_default_env()) .init(); + // Disable p2p broadcast of the nakamoto blocks, so that we rely + // on the signer's using StackerDB to get pushed blocks + *nakamoto_node::miner::TEST_SKIP_P2P_BROADCAST + .lock() + .unwrap() = Some(true); + info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers); From 6caf16d1abf729b2de6fd0bdb78d0f7cc1fe1087 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 24 Jun 2024 14:09:15 -0500 Subject: [PATCH 0361/1400] test: add unit tests for POST /v3/blocks/upload --- stackslib/src/net/api/postblock_v3.rs | 31 ++-- stackslib/src/net/api/tests/mod.rs | 1 + stackslib/src/net/api/tests/postblock_v3.rs | 165 ++++++++++++++++++++ 3 files changed, 187 insertions(+), 10 deletions(-) create mode 100644 stackslib/src/net/api/tests/postblock_v3.rs diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 8ce5a65dde..f7f595913a 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -20,10 +20,13 @@ use stacks_common::types::net::PeerHost; use super::postblock::StacksBlockAcceptedData; use crate::chainstate::nakamoto::NakamotoBlock; use crate::net::http::{ - parse_json, Error, HttpContentType, HttpRequest, HttpRequestContents, HttpRequestPreamble, - HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, + parse_json, Error, HttpContentType, HttpError, HttpRequest, HttpRequestContents, + HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, + HttpResponsePreamble, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, }; -use crate::net::httpcore::{HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest}; use crate::net::relay::Relayer; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; @@ -116,8 +119,8 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { .take() .ok_or(NetError::SendError("`block` not set".into()))?; - let accepted = - node.with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { + let response = node + .with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { let mut handle_conn = sortdb.index_handle_at_tip(); Relayer::process_new_nakamoto_block( &network.burnchain, @@ -127,11 +130,19 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { &block, None, ) - })?; - - let data_resp = StacksBlockAcceptedData { - accepted, - stacks_block_id: block.block_id(), + }) + .map_err(|e| { + StacksHttpResponse::new_error(&preamble, &HttpError::new(400, e.to_string())) + }); + + let data_resp = match response { + Ok(accepted) => StacksBlockAcceptedData { + accepted, + stacks_block_id: block.block_id(), + }, + Err(e) => { + return e.try_into_contents().map_err(NetError::from); + } }; // should set to relay... diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 52b6d7d94b..1adf9369fc 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -83,6 +83,7 @@ mod gettransaction_unconfirmed; mod liststackerdbreplicas; mod postblock; mod postblock_proposal; +mod postblock_v3; mod postfeerate; mod postmempoolquery; mod postmicroblock; diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs new file mode 100644 index 0000000000..e68d334239 --- /dev/null +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -0,0 +1,165 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName, Value}; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress, StacksPrivateKey}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::{Address, StacksEpochId}; + +use super::TestRPC; +use crate::chainstate::stacks::test::make_codec_test_nakamoto_block; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::core::BLOCK_LIMIT_MAINNET_21; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, +}; +use crate::net::test::TestEventObserver; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let miner_sk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4, 5, 6, 7, 8]); + let block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &miner_sk); + let request = StacksHttpRequest::new_post_block_v3(addr.into(), &block); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postblock_v3::RPCPostBlockRequestHandler::default(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + assert_eq!(handler.block, Some(block.clone())); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, _contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.block.is_none()); + + // try to deal with an invalid block + let mut bad_block = block.clone(); + bad_block.txs.clear(); + + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let request = StacksHttpRequest::new_post_block_v3(addr.into(), &bad_block); + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = postblock_v3::RPCPostBlockRequestHandler::default(); + match http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) { + Err(NetError::Http(Error::DecodeError(..))) => {} + _ => { + panic!("worked with bad block"); + } + } +} + +#[test] +fn handle_req_accepted() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &observer); + let (next_block, ..) = rpc_test.peer_1.single_block_tenure( + &rpc_test.privk1, + |_| {}, + |burn_ops| { + rpc_test.peer_2.next_burnchain_block(burn_ops.clone()); + }, + |_| true, + ); + let next_block_id = next_block.block_id(); + let mut requests = vec![]; + + // post the block + requests.push(StacksHttpRequest::new_post_block_v3( + addr.into(), + &next_block, + )); + + // idempotent + requests.push(StacksHttpRequest::new_post_block_v3( + addr.into(), + &next_block, + )); + + let mut responses = rpc_test.run(requests); + + let response = responses.remove(0); + info!( + "Response: {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, true); + assert_eq!(resp.stacks_block_id, next_block_id); + + let response = responses.remove(0); + info!( + "Response: {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + let resp = response.decode_stacks_block_accepted().unwrap(); + assert_eq!(resp.accepted, false); + assert_eq!(resp.stacks_block_id, next_block_id); +} + +#[test] +fn handle_req_unknown_burn_block() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &observer); + // test with a consensus hash not known yet to the peer + let (next_block, ..) = + rpc_test + .peer_1 + .single_block_tenure(&rpc_test.privk1, |_| {}, |_| {}, |_| true); + let next_block_id = next_block.block_id(); + let requests = vec![StacksHttpRequest::new_post_block_v3( + addr.into(), + &next_block, + )]; + + let mut responses = rpc_test.run(requests); + let response = responses.remove(0); + info!( + "Response: {}", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 400); +} From 46344791428b85f89962eafc42fa34a570caf7be Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 25 Jun 2024 20:47:26 +0300 Subject: [PATCH 0362/1400] update unit test getinfo api to include mock values for is_fully_synced --- docs/rpc/api/core-node/get-info.example.json | 3 +-- docs/rpc/api/core-node/get-info.schema.json | 18 +++++------------- stackslib/src/net/api/tests/getinfo.rs | 8 ++++---- 3 files changed, 10 insertions(+), 19 deletions(-) diff --git a/docs/rpc/api/core-node/get-info.example.json b/docs/rpc/api/core-node/get-info.example.json index 4876841497..afc42e6f68 100644 --- a/docs/rpc/api/core-node/get-info.example.json +++ b/docs/rpc/api/core-node/get-info.example.json @@ -12,6 +12,5 @@ "stacks_tip_consensus_hash": "17f76e597bab45646956f38dd39573085d72cbc0", "unanchored_tip": "0000000000000000000000000000000000000000000000000000000000000000", "exit_at_block_height": null, - "is_fully_synced": false, - "max_peer_height": 15 + "is_fully_synced": false } diff --git a/docs/rpc/api/core-node/get-info.schema.json b/docs/rpc/api/core-node/get-info.schema.json index 4190c67120..16b560ed5e 100644 --- a/docs/rpc/api/core-node/get-info.schema.json +++ b/docs/rpc/api/core-node/get-info.schema.json @@ -18,8 +18,7 @@ "stacks_tip_consensus_hash", "unanchored_tip", "exit_at_block_height", - "is_fully_synced", - "max_peer_height" + "is_fully_synced" ], "properties": { "peer_version": { @@ -74,16 +73,9 @@ "type": "integer", "description": "the block height at which the testnet network will be reset. not applicable for mainnet" }, - - "is_fully_synced": { - "type": "boolean", - "description": "indicates whether the node has fully synchronized with the network" - }, - "max_peer_height": { - "type": "integer", - "description": "the highest block height observed among all connected peers, indicating the most advanced state of the blockchain network known to the node" - } - - + "is_fully_synced": { + "type": "boolean", + "description": "indicates whether the node has fully synchronized with the network" + } } } diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs index da1ca4ba19..7d8aeff01c 100644 --- a/stackslib/src/net/api/tests/getinfo.rs +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -63,10 +63,10 @@ fn test_try_parse_request() { #[test] fn test_getinfo_compat() { - let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null}"#; - let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; - let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; - let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false}"#; + let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; + let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; // they all parse for json_obj in &[ From 9a5f30d1bcc7efc4ad42c3e9eb9f9dc122412818 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 26 Jun 2024 09:55:34 -0400 Subject: [PATCH 0363/1400] test: add test checking block handling at end of tenure See #4532 --- stackslib/src/net/api/postblock_proposal.rs | 21 ++++ testnet/stacks-node/src/tests/signer/mod.rs | 19 ++- testnet/stacks-node/src/tests/signer/v0.rs | 132 +++++++++++++++++++- testnet/stacks-node/src/tests/signer/v1.rs | 14 +-- 4 files changed, 171 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index e25c80b9ea..901720ee81 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -63,6 +63,9 @@ use crate::net::{ }; use crate::util_lib::db::Error as DBError; +#[cfg(any(test, feature = "testing"))] +pub static TEST_VALIDATE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); + // This enum is used to supply a `reason_code` for validation // rejection responses. This is serialized as an enum with string // type (in jsonschema terminology). @@ -360,6 +363,24 @@ impl NakamotoBlockProposal { }); } + #[cfg(any(test, feature = "testing"))] + { + if *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block validation is stalled due to testing directive."; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + ); + while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block validation is no longer stalled due to testing directive."; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + ); + } + } + info!( "Participant: validated anchored block"; "block_header_hash" => %computed_block_header_hash, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 72db269423..794c00f05a 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -35,6 +35,7 @@ use std::thread; use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; +use clarity::vm::types::PrincipalData; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -42,6 +43,7 @@ use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::BlockValidateResponse; +use stacks::types::chainstate::StacksAddress; use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -77,6 +79,8 @@ pub struct RunningNodes { pub vrfs_submitted: Arc, pub commits_submitted: Arc, pub blocks_processed: Arc, + pub nakamoto_blocks_proposed: Arc, + pub nakamoto_blocks_mined: Arc, pub coord_channel: Arc>, pub conf: NeonConfig, } @@ -96,13 +100,20 @@ pub struct SignerTest { } impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { - fn new(num_signers: usize) -> Self { + fn new(num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>) -> Self { // Generate Signer Data let signer_stacks_private_keys = (0..num_signers) .map(|_| StacksPrivateKey::new()) .collect::>(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + + // Add initial balances to the config + for (address, amount) in initial_balances.iter() { + naka_conf + .add_initial_balance(PrincipalData::from(address.clone()).to_string(), *amount); + } + // So the combination is... one, two, three, four, five? That's the stupidest combination I've ever heard in my life! // That's the kind of thing an idiot would have on his luggage! let password = "12345"; @@ -550,7 +561,7 @@ fn setup_stx_btc_node( } naka_conf.initial_balances.append(&mut initial_balances); naka_conf.node.stacker = true; - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); for signer_set in 0..2 { for message_id in 0..SIGNER_SLOTS_PER_USER { @@ -583,6 +594,8 @@ fn setup_stx_btc_node( blocks_processed, naka_submitted_vrfs: vrfs_submitted, naka_submitted_commits: commits_submitted, + naka_proposed_blocks: naka_blocks_proposed, + naka_mined_blocks: naka_blocks_mined, .. } = run_loop.counters(); @@ -613,6 +626,8 @@ fn setup_stx_btc_node( vrfs_submitted: vrfs_submitted.0, commits_submitted: commits_submitted.0, blocks_processed: blocks_processed.0, + nakamoto_blocks_proposed: naka_blocks_proposed.0, + nakamoto_blocks_mined: naka_blocks_mined.0, coord_channel, conf: naka_conf, } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e5084c6f78..c1d6169f7d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -17,6 +17,7 @@ use std::env; use std::sync::atomic::Ordering; use std::time::Duration; +use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; @@ -25,9 +26,10 @@ use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::codec::StacksMessageCodec; use stacks::libstackerdb::StackerDBChunkData; -use stacks::types::chainstate::StacksPrivateKey; +use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; +use stacks::types::chainstate::{StacksAddress, StacksPrivateKey}; use stacks::types::PublicKey; -use stacks::util::secp256k1::Secp256k1PublicKey; +use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; @@ -37,7 +39,8 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; -use crate::tests::neon_integrations::next_block_and_wait; +use crate::tests::neon_integrations::{get_chain_info, next_block_and_wait, submit_tx}; +use crate::tests::{self, make_stacks_transfer}; use crate::BurnchainController; impl SignerTest { @@ -198,7 +201,7 @@ fn block_proposal_rejection() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); @@ -301,7 +304,7 @@ fn miner_gather_signatures() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); signer_test.boot_to_epoch_3(); let timeout = Duration::from_secs(30); @@ -353,7 +356,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(200); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -381,3 +384,120 @@ fn mine_2_nakamoto_reward_cycles() { assert_eq!(current_burnchain_height, final_reward_cycle_height_boundary); signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test checks the behavior at the end of a tenure. Specifically: +/// - The miner will broadcast the last block of the tenure, even if the signing is +/// completed after the next burn block arrives +/// - The signers will not sign a block that arrives after the next burn block, but +/// will finish a signing process that was in progress when the next burn block arrived +fn end_of_tenure() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + // signer_test.mine_and_verify_confirmed_naka_block(Duration::from_secs(30), num_signers); + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + loop { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + if blocks_proposed > proposals_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + info!("Block proposed, verifying that it is not processed"); + + // Wait 10 seconds and verify that the block has not been processed + std::thread::sleep(Duration::from_secs(10)); + assert_eq!( + signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst), + blocks_before + ); + + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + info!("Triggering a new block to be mined"); + + // Trigger the next block to be mined and commit submitted + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 10, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("Disabling the stall and waiting for the blocks to be processed"); + // Disable the stall and wait for the block to be processed + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + if blocks_mined > blocks_before + 1 { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(info.stacks_tip_height, 30); + + signer_test.shutdown(); +} diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index eef3193fbb..ae5696e4ea 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -485,7 +485,7 @@ fn dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10); + let mut signer_test = SignerTest::new(10, vec![]); info!("Boot to epoch 3.0 reward calculation..."); boot_to_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -594,7 +594,7 @@ fn sign_request_rejected() { block2.header.tx_merkle_root = tx_merkle_root2; let timeout = Duration::from_secs(200); - let mut signer_test: SignerTest = SignerTest::new(10); + let mut signer_test: SignerTest = SignerTest::new(10, vec![]); let _key = signer_test.boot_to_epoch_3(timeout); info!("------------------------- Test Sign -------------------------"); @@ -689,7 +689,7 @@ fn delayed_dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); let num_signers = 3; - let mut signer_test = SignerTest::new(num_signers); + let mut signer_test = SignerTest::new(num_signers, vec![]); boot_to_epoch_3_reward_set_calculation_boundary( &signer_test.running_nodes.conf, &signer_test.running_nodes.blocks_processed, @@ -881,7 +881,7 @@ fn block_proposal() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); let short_timeout = Duration::from_secs(30); @@ -940,7 +940,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; - let mut signer_test: SignerTest = SignerTest::new(5); + let mut signer_test: SignerTest = SignerTest::new(5, vec![]); let timeout = Duration::from_secs(200); let first_dkg = signer_test.boot_to_epoch_3(timeout); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -1015,7 +1015,7 @@ fn filter_bad_transactions() { info!("------------------------- Test Setup -------------------------"); // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test: SignerTest = SignerTest::new(5); + let mut signer_test: SignerTest = SignerTest::new(5, vec![]); let timeout = Duration::from_secs(200); let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); let next_signers_dkg = signer_test @@ -1103,7 +1103,7 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Setup -------------------------"); let num_signers = 3; - let mut signer_test: SignerTest = SignerTest::new(num_signers); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(30); From 38c491cfd59ac55cd9d036fdc303d28f3c99d8fb Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 26 Jun 2024 21:57:19 +0300 Subject: [PATCH 0364/1400] add skip for timeout mutants - pr 4877 related --- stackslib/src/net/unsolicited.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index e444a4f633..34313827ff 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -705,6 +705,7 @@ impl PeerNetwork { } } + #[cfg_attr(test, mutants::skip)] /// Check the signature of a NakamotoBlock against its sortition's reward cycle. /// The reward cycle must be recent. pub(crate) fn check_nakamoto_block_signer_signature( @@ -793,6 +794,7 @@ impl PeerNetwork { return (Some(reward_set_sn_rc), can_process); } + #[cfg_attr(test, mutants::skip)] /// Determine if an unsolicited NakamotoBlockData message contains data we can potentially /// buffer. Returns whether or not the block can be buffered. pub(crate) fn is_nakamoto_block_bufferable( @@ -867,6 +869,7 @@ impl PeerNetwork { to_buffer } + #[cfg_attr(test, mutants::skip)] /// Handle an unsolicited NakamotoBlocksData message. /// /// Unlike Stacks epoch 2.x blocks, no change to the remote peer's inventory will take place. @@ -896,7 +899,8 @@ impl PeerNetwork { nakamoto_blocks, ) } - + + #[cfg_attr(test, mutants::skip)] /// Handle an unsolicited message, with either the intention of just processing it (in which /// case, `buffer` will be `false`), or with the intention of not only processing it, but also /// determining if it can be bufferred and retried later (in which case, `buffer` will be @@ -998,6 +1002,7 @@ impl PeerNetwork { } } + #[cfg_attr(test, mutants::skip)] /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. /// Return messages that we couldn't handle here, but key them by neighbor, not event, so the /// relayer can do something useful with them. From e6d0177f2b8971a0c156b99ab0660bce9627ea0f Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 27 Jun 2024 16:42:30 +0300 Subject: [PATCH 0365/1400] fix cargo format --- stackslib/src/net/unsolicited.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 34313827ff..53b323862e 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -899,7 +899,7 @@ impl PeerNetwork { nakamoto_blocks, ) } - + #[cfg_attr(test, mutants::skip)] /// Handle an unsolicited message, with either the intention of just processing it (in which /// case, `buffer` will be `false`), or with the intention of not only processing it, but also From 5f73b83b9e47cc685aca1e4c465b60b27f3e178b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 27 Jun 2024 16:27:28 -0400 Subject: [PATCH 0366/1400] First pass focusing on warn and info level logging. Couple debugs Signed-off-by: Jacinta Ferrant --- stackslib/src/burnchains/affirmation.rs | 15 +- stackslib/src/burnchains/burnchain.rs | 35 ++- stackslib/src/burnchains/db.rs | 16 +- .../src/chainstate/burn/db/processing.rs | 6 +- stackslib/src/chainstate/burn/db/sortdb.rs | 22 +- stackslib/src/chainstate/burn/sortition.rs | 13 +- stackslib/src/chainstate/coordinator/mod.rs | 119 ++++---- .../chainstate/nakamoto/coordinator/mod.rs | 26 +- stackslib/src/chainstate/nakamoto/miner.rs | 16 +- stackslib/src/chainstate/nakamoto/mod.rs | 258 +++++++++++++----- stackslib/src/chainstate/nakamoto/tenure.rs | 12 +- stackslib/src/chainstate/stacks/db/blocks.rs | 64 +++-- stackslib/src/chainstate/stacks/miner.rs | 2 +- stackslib/src/net/download/epoch2x.rs | 16 +- .../stacks-node/src/nakamoto_node/miner.rs | 7 +- .../stacks-node/src/nakamoto_node/relayer.rs | 5 +- .../src/nakamoto_node/sign_coordinator.rs | 7 +- testnet/stacks-node/src/node.rs | 2 +- 18 files changed, 428 insertions(+), 213 deletions(-) diff --git a/stackslib/src/burnchains/affirmation.rs b/stackslib/src/burnchains/affirmation.rs index 1f43a7cd7a..fc7398c9ff 100644 --- a/stackslib/src/burnchains/affirmation.rs +++ b/stackslib/src/burnchains/affirmation.rs @@ -974,7 +974,8 @@ pub fn find_heaviest_block_commit( // found debug!( "PoX anchor block-commit {},{},{} has {} burnt, {} confs", - &opdata.txid, opdata.block_height, opdata.vtxindex, most_burnt, most_confs + &opdata.txid, opdata.block_height, opdata.vtxindex, most_burnt, most_confs; + "stacks_block_hash" => opdata.block_header_hash ); // sanity check -- there should be exactly as many confirmations on the suspected @@ -996,7 +997,9 @@ pub fn find_heaviest_block_commit( if *op_ancestor_height == ancestor_block && *op_ancestor_vtxindex == ancestor_vtxindex { - debug!("Block-commit {},{} descends from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, op_ancestor_height, op_ancestor_vtxindex); + debug!("Block-commit {},{} descends from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, op_ancestor_height, op_ancestor_vtxindex; + "stacks_block_hash" => opdata.block_header_hash + ); block_descendancy.push(true); if !found_conf { conf_count += 1; @@ -1004,11 +1007,15 @@ pub fn find_heaviest_block_commit( } burn_count += opdata.burn_fee; } else { - debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); + debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex; + "stacks_block_hash" => opdata.block_header_hash + ); block_descendancy.push(false); } } else { - debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex); + debug!("Block-commit {},{} does NOT descend from likely PoX anchor block {},{}", opdata.block_height, opdata.vtxindex, ancestor_block, ancestor_vtxindex; + "stacks_block_hash" => opdata.block_header_hash + ); block_descendancy.push(false); } } diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 348a472a6f..4002c253ae 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -249,8 +249,11 @@ impl BurnchainStateTransition { for blocks_back in 0..(epoch_id.mining_commitment_window() - 1) { if parent_snapshot.block_height < (blocks_back as u64) { debug!("Mining commitment window shortened because block height is less than window size"; - "block_height" => %parent_snapshot.block_height, - "window_size" => %epoch_id.mining_commitment_window()); + "block_height" => %parent_snapshot.block_height, + "window_size" => %epoch_id.mining_commitment_window(), + "burn_block_hash" => %parent_snapshot.burn_header_hash, + "consensus_hash" => %parent_snapshot.consensus_hash + ); break; } let block_height = parent_snapshot.block_height - (blocks_back as u64); @@ -275,13 +278,17 @@ impl BurnchainStateTransition { "Block {} is in a reward phase with PoX. Miner commit window is {}: {:?}", parent_snapshot.block_height + 1, windowed_block_commits.len(), - &windowed_block_commits + &windowed_block_commits; + "burn_block_hash" => %parent_snapshot.burn_header_hash, + "consensus_hash" => %parent_snapshot.consensus_hash ); } else { // PoX reward-phase is not active, or we're starting a new epoch debug!( "Block {} is in a prepare phase, in the post-PoX sunset, or in an epoch transition, so no windowing will take place", - parent_snapshot.block_height + 1 + parent_snapshot.block_height + 1; + "burn_block_hash" => %parent_snapshot.burn_header_hash, + "consensus_hash" => %parent_snapshot.consensus_hash ); assert_eq!(windowed_block_commits.len(), 1); @@ -342,7 +349,8 @@ impl BurnchainStateTransition { for op in all_block_commits.values() { warn!( "REJECTED({}) block commit {} at {},{}: Committed to an already-consumed VRF key", - op.block_height, &op.txid, op.block_height, op.vtxindex + op.block_height, &op.txid, op.block_height, op.vtxindex; + "stacks_block_hash" => %op.block_header_hash ); } @@ -1001,7 +1009,8 @@ impl Burnchain { // duplicate warn!( "REJECTED({}) leader key register {} at {},{}: Duplicate VRF key", - data.block_height, &data.txid, data.block_height, data.vtxindex + data.block_height, &data.txid, data.block_height, data.vtxindex; + "consensus_hash" => %data.consensus_hash ); false } else { @@ -1071,7 +1080,7 @@ impl Burnchain { "prev_reward_cycle" => %prev_reward_cycle, "this_reward_cycle" => %this_reward_cycle, "block_height" => %block_height, - "cycle-length" => %burnchain.pox_constants.reward_cycle_length + "cycle_length" => %burnchain.pox_constants.reward_cycle_length, ); update_pox_affirmation_maps(burnchain_db, indexer, prev_reward_cycle, burnchain)?; } @@ -1312,7 +1321,8 @@ impl Burnchain { "Parsed block {} (epoch {}) in {}ms", burnchain_block.block_height(), cur_epoch.epoch_id, - parse_end.saturating_sub(parse_start) + parse_end.saturating_sub(parse_start); + "burn_block_hash" => %burnchain_block.block_hash() ); db_send @@ -1350,7 +1360,8 @@ impl Burnchain { debug!( "Inserted block {} in {}ms", burnchain_block.block_height(), - insert_end.saturating_sub(insert_start) + insert_end.saturating_sub(insert_start); + "burn_block_hash" => %burnchain_block.block_hash() ); } Ok(last_processed) @@ -1647,7 +1658,8 @@ impl Burnchain { "Parsed block {} (in epoch {}) in {}ms", burnchain_block.block_height(), cur_epoch.epoch_id, - parse_end.saturating_sub(parse_start) + parse_end.saturating_sub(parse_start); + "burn_block_hash" => %burnchain_block.block_hash() ); db_send @@ -1699,7 +1711,8 @@ impl Burnchain { debug!( "Inserted block {} in {}ms", burnchain_block.block_height(), - insert_end.saturating_sub(insert_start) + insert_end.saturating_sub(insert_start); + "burn_block_hash" => %burnchain_block.block_hash() ); } Ok(last_processed) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 79e34b3539..d105115f36 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -404,12 +404,12 @@ impl<'a> BurnchainDBTransaction<'a> { match self.sql_tx.execute(sql, args) { Ok(_) => { info!( - "Set anchor block for reward cycle {} to {},{},{},{}", - target_reward_cycle, - &block_commit.burn_header_hash, - &block_commit.txid, - &block_commit.block_height, - &block_commit.vtxindex + "Setting anchor block for reward cycle {target_reward_cycle}."; + "burn_block_hash" => %block_commit.burn_header_hash, + "stacks_block_hash" => %block_commit.block_header_hash, + "block_commit_txid" => %block_commit.txid, + "block_commit_height" => block_commit.block_height, + "block_commit_vtxindex" => block_commit.vtxindex, ); Ok(()) } @@ -1419,7 +1419,9 @@ impl BurnchainDB { ) -> Result, BurnchainError> { let header = block.header(); debug!("Storing new burnchain block"; - "burn_header_hash" => %header.block_hash.to_string()); + "burn_block_hash" => %header.block_hash, + "block_height" => header.block_height + ); let mut blockstack_ops = self.get_blockstack_transactions(burnchain, indexer, block, &header, epoch_id); apply_blockstack_txs_safety_checks(header.block_height, &mut blockstack_ops); diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index b439a9512f..70f170a60c 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -48,7 +48,8 @@ impl<'a> SortitionHandleTx<'a> { op.check(burnchain, self).map_err(|e| { warn!( "REJECTED({}) leader key register {} at {},{}: {:?}", - op.block_height, &op.txid, op.block_height, op.vtxindex, &e + op.block_height, &op.txid, op.block_height, op.vtxindex, &e; + "consensus_hash" => %op.consensus_hash ); BurnchainError::OpError(e) }) @@ -63,7 +64,8 @@ impl<'a> SortitionHandleTx<'a> { op.vtxindex, op.parent_block_ptr, op.parent_vtxindex, - &e + &e; + "stacks_block_hash" => %op.block_header_hash ); BurnchainError::OpError(e) }) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 14011a2800..3b42a0af45 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -1618,8 +1618,10 @@ impl<'a> SortitionHandleTx<'a> { .map(|ix| { let recipient = reward_set.rewarded_addresses[ix as usize].clone(); info!("PoX recipient chosen"; - "recipient" => recipient.to_burnchain_repr(), - "block_height" => block_height); + "recipient" => recipient.to_burnchain_repr(), + "block_height" => block_height, + "anchor_stacks_block_hash" => &anchor_block, + ); (recipient, u16::try_from(ix).unwrap()) }) .collect(), @@ -1651,8 +1653,10 @@ impl<'a> SortitionHandleTx<'a> { let ix = u16::try_from(ix).unwrap(); let recipient = self.get_reward_set_entry(ix)?; info!("PoX recipient chosen"; - "recipient" => recipient.to_burnchain_repr(), - "block_height" => block_height); + "recipient" => recipient.to_burnchain_repr(), + "block_height" => block_height, + "stacks_block_hash" => %anchor_block + ); recipients.push((recipient, ix)); } Ok(Some(RewardSetInfo { @@ -5532,7 +5536,9 @@ impl<'a> SortitionHandleTx<'a> { BlockstackOperationType::LeaderKeyRegister(ref op) => { info!( "ACCEPTED({}) leader key register {} at {},{}", - op.block_height, &op.txid, op.block_height, op.vtxindex + op.block_height, &op.txid, op.block_height, op.vtxindex; + "consensus_hash" => %op.consensus_hash, + "burn_header_hash" => %op.burn_header_hash ); self.insert_leader_key(op, sort_id) } @@ -5540,7 +5546,8 @@ impl<'a> SortitionHandleTx<'a> { info!( "ACCEPTED({}) leader block commit {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex; - "apparent_sender" => %op.apparent_sender + "apparent_sender" => %op.apparent_sender, + "stacks_block_hash" => %op.block_header_hash ); self.insert_block_commit(op, sort_id) } @@ -5561,7 +5568,8 @@ impl<'a> SortitionHandleTx<'a> { BlockstackOperationType::PreStx(ref op) => { info!( "ACCEPTED({}) pre stack stx op {} at {},{}", - op.block_height, &op.txid, op.block_height, op.vtxindex + op.block_height, &op.txid, op.block_height, op.vtxindex; + "burn_header_hash" => %op.burn_header_hash ); // no need to store this op in the sortition db. Ok(()) diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index 0c5e020a64..b0221f1439 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -680,9 +680,10 @@ impl BlockSnapshot { } if let Some(reject_winner_reason) = reject_winner_reason { - info!("SORTITION({}): WINNER REJECTED: {}", block_height, &reject_winner_reason; + info!("SORTITION({block_height}): WINNER REJECTED: {reject_winner_reason:?}"; "txid" => %winning_block.txid, - "block_hash" => %winning_block.block_header_hash); + "stacks_block_hash" => %winning_block.block_header_hash, + "burn_block_hash" => %winning_block.burn_header_hash); // N.B. can't use `make_snapshot_no_sortition()` helper here because then `sort_tx` // would be mutably borrowed twice. @@ -714,10 +715,10 @@ impl BlockSnapshot { my_pox_id, )?; - info!( - "SORTITION({}): WINNER IS {:?} (from {:?})", - block_height, &winning_block.block_header_hash, &winning_block.txid - ); + info!("SORTITION({block_height}): WINNER SELECTED"; + "txid" => %winning_block.txid, + "stacks_block_hash" => %winning_block.block_header_hash, + "burn_block_hash" => %winning_block.burn_header_hash); let miner_pk_hash = sort_tx .get_leader_key_at( diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index b2a1c8679f..d9bf2713cd 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -789,46 +789,50 @@ pub fn get_reward_cycle_info( ic.get_chosen_pox_anchor(burnchain_db_conn_opt, &parent_bhh, &burnchain.pox_constants) }?; - let reward_cycle_info = if let Some((consensus_hash, stacks_block_hash, txid)) = - reward_cycle_info - { - let anchor_block_known = StacksChainState::is_stacks_block_processed( - &chain_state.db(), - &consensus_hash, - &stacks_block_hash, - )?; - info!( - "PoX Anchor block selected"; - "cycle" => reward_cycle, - "consensus_hash" => %consensus_hash, - "block_hash" => %stacks_block_hash, - "block_id" => %StacksBlockId::new(&consensus_hash, &stacks_block_hash), - "is_known" => anchor_block_known, - "commit_txid" => %txid, - "cycle_burn_height" => burn_height - ); - let anchor_status = if anchor_block_known { - let block_id = StacksBlockId::new(&consensus_hash, &stacks_block_hash); - let reward_set = - provider.get_reward_set(burn_height, chain_state, burnchain, sort_db, &block_id)?; - PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set) + let reward_cycle_info = + if let Some((consensus_hash, stacks_block_hash, txid)) = reward_cycle_info { + let anchor_block_known = StacksChainState::is_stacks_block_processed( + &chain_state.db(), + &consensus_hash, + &stacks_block_hash, + )?; + let stacks_block_id = StacksBlockId::new(&consensus_hash, &stacks_block_hash); + info!( + "PoX Anchor block selected"; + "cycle" => reward_cycle, + "consensus_hash" => %consensus_hash, + "stacks_block_hash" => %stacks_block_hash, + "stacks_block_id" => %stacks_block_id, + "is_known" => anchor_block_known, + "commit_txid" => %txid, + "cycle_burn_height" => burn_height + ); + let anchor_status = if anchor_block_known { + let reward_set = provider.get_reward_set( + burn_height, + chain_state, + burnchain, + sort_db, + &stacks_block_id, + )?; + PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set) + } else { + PoxAnchorBlockStatus::SelectedAndUnknown(stacks_block_hash, txid) + }; + RewardCycleInfo { + reward_cycle, + anchor_status, + } } else { - PoxAnchorBlockStatus::SelectedAndUnknown(stacks_block_hash, txid) + info!( + "PoX anchor block NOT chosen for reward cycle {} at burn height {}", + reward_cycle, burn_height + ); + RewardCycleInfo { + reward_cycle, + anchor_status: PoxAnchorBlockStatus::NotSelected, + } }; - RewardCycleInfo { - reward_cycle, - anchor_status, - } - } else { - info!( - "PoX anchor block NOT chosen for reward cycle {} at burn height {}", - reward_cycle, burn_height - ); - RewardCycleInfo { - reward_cycle, - anchor_status: PoxAnchorBlockStatus::NotSelected, - } - }; // cache the reward cycle info as of the first sortition in the prepare phase, so that // the first Nakamoto epoch can go find it later. Subsequent Nakamoto epochs will use the @@ -2348,13 +2352,13 @@ impl< if self.config.require_affirmed_anchor_blocks { // missing this anchor block -- cannot proceed until we have it info!( - "Burnchain block processing stops due to missing affirmed anchor block {}", + "Burnchain block processing stops due to missing affirmed anchor stacks block hash {}", &missing_anchor_block ); return Ok(Some(missing_anchor_block)); } else { // this and descendant sortitions might already exist - info!("Burnchain block processing will continue in spite of missing affirmed anchor block {}", &missing_anchor_block); + info!("Burnchain block processing will continue in spite of missing affirmed anchor stacks block hash {}", &missing_anchor_block); } } } @@ -2609,7 +2613,7 @@ impl< self.check_missing_anchor_block(&header, &canonical_affirmation_map, rc_info)? { info!( - "Burnchain block processing stops due to missing affirmed anchor block {}", + "Burnchain block processing stops due to missing affirmed anchor stacks block hash {}", &missing_anchor_block ); return Ok(Some(missing_anchor_block)); @@ -2797,7 +2801,7 @@ impl< self.process_new_pox_anchor(pox_anchor, already_processed_burn_blocks)? { info!( - "Burnchain block processing stops due to missing affirmed anchor block {}", + "Burnchain block processing stops due to missing affirmed anchor stacks block hash {}", &expected_anchor_block_hash ); return Ok(Some(expected_anchor_block_hash)); @@ -2953,6 +2957,9 @@ impl< "attachments_count" => attachments_instances.len(), "index_block_hash" => %block_receipt.header.index_block_hash(), "stacks_height" => block_receipt.header.stacks_block_height, + "burn_height" => block_receipt.header.burn_header_height, + "burn_block_hash" => %block_receipt.header.burn_header_hash, + "consensus_hash" => %block_receipt.header.consensus_hash, ); if let Some(atlas_db) = atlas_db { for new_attachment in attachments_instances.into_iter() { @@ -3133,12 +3140,29 @@ impl< == &AffirmationMapEntry::PoxAnchorBlockPresent { // yup, we're expecting this - debug!("Discovered an old anchor block: {} (height {}, rc {}) with heaviest affirmation map {}", pox_anchor, commit.block_height, reward_cycle, &heaviest_am); - info!("Discovered an old anchor block: {}", pox_anchor); + debug!("Discovered an old anchor block: {}", pox_anchor; + "height" => commit.block_height, + "burn_block_hash" => %commit.burn_header_hash, + "stacks_block_hash" => %commit.block_header_hash, + "reward_cycle" => reward_cycle, + "heaviest_affirmation_map" => %heaviest_am + ); + info!("Discovered an old anchor block: {}", pox_anchor; + "height" => commit.block_height, + "burn_block_hash" => %commit.burn_header_hash, + "stacks_block_hash" => %commit.block_header_hash, + "reward_cycle" => reward_cycle + ); return Ok(Some(pox_anchor.clone())); } else { // nope -- can ignore - debug!("Discovered unaffirmed old anchor block: {} (height {}, rc {}) with heaviest affirmation map {}", pox_anchor, commit.block_height, reward_cycle, &heaviest_am); + debug!("Discovered unaffirmed old anchor block: {}", pox_anchor; + "height" => commit.block_height, + "burn_block_hash" => %commit.burn_header_hash, + "stacks_block_hash" => %commit.block_header_hash, + "reward_cycle" => reward_cycle, + "heaviest_affirmation_map" => %heaviest_am + ); return Ok(None); } } else { @@ -3417,7 +3441,10 @@ impl< info!( "Reprocessing with anchor block information, starting at block height: {}", - prep_end.block_height + prep_end.block_height; + "consensus_hash" => %prep_end.consensus_hash, + "burn_block_hash" => %prep_end.burn_header_hash, + "stacks_block_height" => prep_end.stacks_block_height ); let mut pox_id = self.sortition_db.get_pox_id(sortition_id)?; pox_id.extend_with_present_block(); diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 1443fe4350..9793b062f8 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -225,6 +225,9 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { debug_log, "PoX reward set loaded from written block state"; "reward_set_block_id" => %reward_set_block.index_block_hash(), + "burn_block_hash" => %reward_set_block.burn_header_hash, + "stacks_block_height" => reward_set_block.stacks_block_height, + "burn_header_height" => reward_set_block.burn_header_height, ); if reward_set.signers.is_none() { @@ -338,8 +341,8 @@ pub fn get_nakamoto_reward_cycle_info( "block_id" => %block_id, "consensus_hash" => %anchor_block_header.consensus_hash, "burn_height" => anchor_block_header.burn_header_height, - "anchor_chain_tip" => %anchor_block_header.index_block_hash(), - "anchor_chain_tip_height" => %anchor_block_header.burn_header_height, + "stacks_block_height" => anchor_block_header.stacks_block_height, + "burn_block_hash" => %anchor_block_header.burn_header_hash ); return Ok(Some(rc_info)); @@ -540,7 +543,10 @@ pub fn load_nakamoto_reward_set( )?; debug!( "Stacks anchor block (ch {}) {} cycle {} is processed", - &anchor_block_header.consensus_hash, &block_id, reward_cycle + &anchor_block_header.consensus_hash, &block_id, reward_cycle; + "anchor.consensus_hash" => %anchor_block_header.consensus_hash, + "anchor.burn_header_hash" => %anchor_block_header.burn_header_hash, + "anchor.burn_block_height" => anchor_block_header.burn_header_height ); let anchor_status = PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set); @@ -855,9 +861,11 @@ impl< .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { warn!("FeeEstimator failed to process block receipt"; - "stacks_block" => %block_hash, - "stacks_height" => %block_receipt.header.stacks_block_height, - "error" => %e); + "stacks_block_hash" => %block_hash, + "stacks_block_height" => %block_receipt.header.stacks_block_height, + "burn_block_hash" => %block_receipt.header.burn_header_hash, + "error" => %e + ); } } @@ -1030,17 +1038,13 @@ impl< .block_height_to_reward_cycle(header.block_height) .unwrap_or(u64::MAX); - debug!( - "Process burn block {} reward cycle {} in {}", - header.block_height, reward_cycle, &self.burnchain.working_dir, - ); - info!( "Process burn block {} reward cycle {} in {}", header.block_height, reward_cycle, &self.burnchain.working_dir; "in_prepare_phase" => self.burnchain.is_in_prepare_phase(header.block_height), "is_rc_start" => self.burnchain.is_reward_cycle_start(header.block_height), "is_prior_in_prepare_phase" => self.burnchain.is_in_prepare_phase(header.block_height.saturating_sub(2)), + "burn_block_hash" => %header.block_hash, ); // calculate paid rewards during this burnchain block if we announce diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 74cc9a2d72..3c4990de4d 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -236,7 +236,10 @@ impl NakamotoBlockBuilder { SortitionDB::get_block_snapshot_consensus(&burn_dbconn, &self.header.consensus_hash)? else { warn!("Could not find sortition snapshot for burn block that elected the miner"; - "consensus_hash" => %self.header.consensus_hash); + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(Error::NoSuchBlockError); }; let Some(tenure_block_commit) = SortitionDB::get_block_commit( @@ -246,8 +249,11 @@ impl NakamotoBlockBuilder { )? else { warn!("Could not find winning block commit for burn block that elected the miner"; - "consensus_hash" => %self.header.consensus_hash, - "winning_txid" => %tenure_election_sn.winning_block_txid); + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), + "winning_txid" => %tenure_election_sn.winning_block_txid + ); return Err(Error::NoSuchBlockError); }; @@ -560,8 +566,8 @@ impl NakamotoBlockBuilder { info!( "Miner: mined Nakamoto block"; - "block_hash" => %block.header.block_hash(), - "block_id" => %block.header.block_id(), + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), "height" => block.header.chain_length, "tx_count" => block.txs.len(), "parent_block_id" => %block.header.parent_block_id, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index bfa92f3198..2aa2d29d62 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -851,13 +851,21 @@ impl NakamotoBlock { } let Some(tc_payload) = self.try_get_tenure_change_payload() else { - warn!("Invalid block -- tx at index 0 is not a tenure tx",); + warn!("Invalid block -- tx at index 0 is not a tenure tx"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); }; if tc_payload.cause != TenureChangeCause::Extended { // not a tenure-extend, and can't be valid since all other tenure-change types require // a coinbase (which is not present) - warn!("Invalid block -- tenure tx cause is not an extension"); + warn!("Invalid block -- tenure tx cause is not an extension"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } @@ -866,7 +874,10 @@ impl NakamotoBlock { warn!( "Invalid block -- discontiguous"; "previous_tenure_end" => %tc_payload.previous_tenure_end, - "parent_block_id" => %self.header.parent_block_id + "parent_block_id" => %self.header.parent_block_id, + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } @@ -880,6 +891,8 @@ impl NakamotoBlock { "tenure_consensus_hash" => %tc_payload.tenure_consensus_hash, "prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } @@ -935,14 +948,21 @@ impl NakamotoBlock { warn!( "Invalid block -- have {} coinbases and {} tenure txs", coinbase_positions.len(), - tenure_change_positions.len() + tenure_change_positions.len(); + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } if coinbase_positions.len() == 1 && tenure_change_positions.len() == 0 { // coinbase unaccompanied by a tenure change - warn!("Invalid block -- have coinbase without tenure change"); + warn!("Invalid block -- have coinbase without tenure change"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } @@ -953,7 +973,10 @@ impl NakamotoBlock { // wrong position warn!( "Invalid block -- tenure change positions = {:?}, expected [0]", - &tenure_change_positions, + &tenure_change_positions; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } @@ -962,13 +985,21 @@ impl NakamotoBlock { let TransactionPayload::TenureChange(tc_payload) = &self.txs[0].payload else { // this transaction is not a tenure change // (should be unreachable) - warn!("Invalid block -- first transaction is not a tenure change"); + warn!("Invalid block -- first transaction is not a tenure change"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); }; if tc_payload.cause.expects_sortition() { // not valid - warn!("Invalid block -- no coinbase, but tenure change expects sortition"); + warn!("Invalid block -- no coinbase, but tenure change expects sortition"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } @@ -982,24 +1013,39 @@ impl NakamotoBlock { if coinbase_positions[0] != coinbase_idx && tenure_change_positions[0] != tc_idx { // invalid -- expect exactly one sortition-induced tenure change and exactly one coinbase expected, // and the tenure change must be the first transaction and the coinbase must be the second transaction - warn!("Invalid block -- coinbase and/or tenure change txs are in the wrong position -- ({:?}, {:?}) != [{}], [{}]", &coinbase_positions, &tenure_change_positions, coinbase_idx, tc_idx); + warn!("Invalid block -- coinbase and/or tenure change txs are in the wrong position -- ({:?}, {:?}) != [{}], [{}]", &coinbase_positions, &tenure_change_positions, coinbase_idx, tc_idx; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } let Some(tc_payload) = self.try_get_tenure_change_payload() else { - warn!("Invalid block -- tx at index 0 is not a tenure tx",); + warn!("Invalid block -- tx at index 0 is not a tenure tx"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); }; if !tc_payload.cause.expects_sortition() { // the only tenure change allowed in a block with a coinbase is a sortition-triggered // tenure change - warn!("Invalid block -- tenure change does not expect a sortition"); + warn!("Invalid block -- tenure change does not expect a sortition"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } if tc_payload.previous_tenure_end != self.header.parent_block_id { // discontinuous warn!( "Invalid block -- discontiguous -- {} != {}", - &tc_payload.previous_tenure_end, &self.header.parent_block_id + &tc_payload.previous_tenure_end, &self.header.parent_block_id; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); } @@ -1010,13 +1056,20 @@ impl NakamotoBlock { // this transaction is not a coinbase (but this should be unreachable) warn!( "Invalid block -- tx index {} is not a coinbase", - coinbase_idx + coinbase_idx; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(()); }; if vrf_proof_opt.is_none() { // not a Nakamoto coinbase - warn!("Invalid block -- no VRF proof in coinbase"); + warn!("Invalid block -- no VRF proof in coinbase"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return Err(()); } @@ -1041,7 +1094,9 @@ impl NakamotoBlock { )?; if !block_commit.new_seed.is_from_proof(&parent_vrf_proof) { warn!("Invalid Nakamoto block-commit: seed does not match parent VRF proof"; - "block_id" => %self.block_id(), + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.block_id(), "commit_seed" => %block_commit.new_seed, "proof_seed" => %VRFSeed::from_proof(&parent_vrf_proof), "parent_vrf_proof" => %parent_vrf_proof.to_hex(), @@ -1063,8 +1118,9 @@ impl NakamotoBlock { let recovered_miner_pubk = self.header.recover_miner_pk().ok_or_else(|| { warn!( "Nakamoto Stacks block downloaded with unrecoverable miner public key"; - "block_hash" => %self.header.block_hash(), - "block_id" => %self.header.block_id(), + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return ChainstateError::InvalidStacksBlock("Unrecoverable miner public key".into()); })?; @@ -1082,8 +1138,9 @@ impl NakamotoBlock { if &recovered_miner_hash160 != miner_pubkey_hash160 { warn!( "Nakamoto Stacks block signature mismatch: {recovered_miner_hash160} != {miner_pubkey_hash160} from leader-key"; - "block_hash" => %self.header.block_hash(), - "block_id" => %self.header.block_id(), + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(ChainstateError::InvalidStacksBlock( "Invalid miner signature".into(), @@ -1110,8 +1167,9 @@ impl NakamotoBlock { if tc_payload.pubkey_hash != recovered_miner_hash160 { warn!( "Invalid tenure-change transaction -- bad miner pubkey hash160"; - "block_hash" => %self.header.block_hash(), - "block_id" => %self.header.block_id(), + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), "pubkey_hash" => %tc_payload.pubkey_hash, "recovered_miner_hash160" => %recovered_miner_hash160 ); @@ -1125,9 +1183,9 @@ impl NakamotoBlock { if tc_payload.tenure_consensus_hash != self.header.consensus_hash { warn!( "Invalid tenure-change transaction -- bad consensus hash"; - "block_hash" => %self.header.block_hash(), - "block_id" => %self.header.block_id(), "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), "tc_payload.tenure_consensus_hash" => %tc_payload.tenure_consensus_hash ); return Err(ChainstateError::InvalidStacksBlock( @@ -1173,9 +1231,11 @@ impl NakamotoBlock { if !valid { warn!("Invalid Nakamoto block: leader VRF key did not produce a valid proof"; - "block_id" => %self.block_id(), - "leader_public_key" => %leader_vrf_key.to_hex(), - "sortition_hash" => %sortition_hash + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), + "leader_public_key" => %leader_vrf_key.to_hex(), + "sortition_hash" => %sortition_hash ); return Err(ChainstateError::InvalidStacksBlock( "Invalid Nakamoto block: leader VRF key did not produce a valid proof".into(), @@ -1210,8 +1270,10 @@ impl NakamotoBlock { // this block's consensus hash must match the sortition that selected it if tenure_burn_chain_tip.consensus_hash != self.header.consensus_hash { warn!("Invalid Nakamoto block: consensus hash does not match sortition"; - "consensus_hash" => %self.header.consensus_hash, - "sortition.consensus_hash" => %tenure_burn_chain_tip.consensus_hash + "sortition.consensus_hash" => %tenure_burn_chain_tip.consensus_hash, + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), ); return Err(ChainstateError::InvalidStacksBlock( "Invalid Nakamoto block: invalid consensus hash".into(), @@ -1222,8 +1284,11 @@ impl NakamotoBlock { if let Some(expected_burn) = expected_burn { if self.header.burn_spent != expected_burn { warn!("Invalid Nakamoto block header: invalid total burns"; - "header.burn_spent" => self.header.burn_spent, - "expected_burn" => expected_burn, + "header.burn_spent" => self.header.burn_spent, + "expected_burn" => expected_burn, + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() ); return Err(ChainstateError::InvalidStacksBlock( "Invalid Nakamoto block: invalid total burns".into(), @@ -1298,7 +1363,11 @@ impl NakamotoBlock { } } else if valid_tenure_start.is_err() { // bad tenure change - warn!("Not a well-formed tenure-start block"); + warn!("Not a well-formed tenure-start block"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return false; } let valid_tenure_extend = self.is_wellformed_tenure_extend_block(); @@ -1308,7 +1377,11 @@ impl NakamotoBlock { } } else if valid_tenure_extend.is_err() { // bad tenure extend - warn!("Not a well-formed tenure-extend block"); + warn!("Not a well-formed tenure-extend block"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id() + ); return false; } if !StacksBlock::validate_transactions_static_epoch(&self.txs, epoch_id) { @@ -1437,7 +1510,8 @@ impl NakamotoChainState { debug!("Process staging Nakamoto block"; "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash ); @@ -1463,7 +1537,8 @@ impl NakamotoChainState { "Cannot process Nakamoto block: could not load reward set that elected the block"; "err" => ?e, "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id, ); ChainstateError::NoSuchBlockError @@ -1477,7 +1552,8 @@ impl NakamotoChainState { // no parent; cannot process yet debug!("Cannot process Nakamoto block: missing parent header"; "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); return Ok(None); @@ -1494,7 +1570,10 @@ impl NakamotoChainState { let msg = "Discontinuous Nakamoto Stacks block"; warn!("{}", &msg; "child parent_block_id" => %next_ready_block.header.parent_block_id, - "expected parent_block_id" => %parent_block_id + "expected parent_block_id" => %parent_block_id, + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id() ); let staging_block_tx = stacks_chain_state.staging_db_tx_begin()?; staging_block_tx.set_block_orphaned(&block_id)?; @@ -1518,8 +1597,8 @@ impl NakamotoChainState { warn!( "Cannot process Nakamoto block: could not find parent block's burnchain view"; "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), - "block_id" => %next_ready_block.block_id(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) @@ -1530,19 +1609,19 @@ impl NakamotoChainState { warn!( "Cannot process Nakamoto block: could not find parent block's burnchain view"; "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), - "block_id" => %next_ready_block.block_id(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; if connected_sort_id != parent_burn_view_sn.sortition_id { warn!( - "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; - "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), - "block_id" => %next_ready_block.block_id(), - "parent_block_id" => %next_ready_block.header.parent_block_id + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), + "parent_block_id" => %next_ready_block.header.parent_block_id ); return Err(ChainstateError::InvalidStacksBlock( "Does not connect to burn view of parent block ID".into(), @@ -1555,8 +1634,8 @@ impl NakamotoChainState { warn!( "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), - "block_id" => %next_ready_block.block_id(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) @@ -1574,7 +1653,8 @@ impl NakamotoChainState { warn!( "Cannot process Nakamoto block: failed to find Sortition ID associated with burnchain view"; "consensus_hash" => %next_ready_block.header.consensus_hash, - "block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_hash" => %next_ready_block.header.block_hash(), + "stacks_block_id" => %next_ready_block.header.block_id(), "burn_view_consensus_hash" => %burnchain_view, ); return Ok(None); @@ -1649,7 +1729,8 @@ impl NakamotoChainState { "Failed to append {}/{}: {:?}", &next_ready_block.header.consensus_hash, &next_ready_block.header.block_hash(), - &e + &e; + "stacks_block_id" => %next_ready_block.header.block_id() ); // as a separate transaction, mark this block as processed and orphaned. @@ -1854,7 +1935,7 @@ impl NakamotoChainState { warn!( "Invalid Nakamoto block, could not validate on burnchain"; "consensus_hash" => %consensus_hash, - "block_hash" => %block_hash, + "stacks_block_hash" => %block_hash, "error" => ?e ); @@ -1991,7 +2072,7 @@ impl NakamotoChainState { config.chain_id, ) { warn!("Unacceptable Nakamoto block; will not store"; - "block_id" => %block.block_id(), + "stacks_block_id" => %block.block_id(), "error" => ?e ); return Ok(false); @@ -1999,7 +2080,7 @@ impl NakamotoChainState { if let Err(e) = block.header.verify_signer_signatures(&reward_set) { warn!("Received block, but the signer signatures are invalid"; - "block_id" => %block.block_id(), + "stacks_block_id" => %block.block_id(), "error" => ?e, ); return Err(e); @@ -2270,7 +2351,9 @@ impl NakamotoChainState { .ok_or(ChainstateError::NoSuchBlockError) .map_err(|e| { warn!("Nakamoto block has no parent"; - "block consensus_hash" => %consensus_hash); + "consensus_hash" => %consensus_hash, + "parent_consensus_hash" => %parent_sn.consensus_hash + ); e })?; @@ -2355,7 +2438,11 @@ impl NakamotoChainState { SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? .ok_or(ChainstateError::NoSuchBlockError) .map_err(|e| { - warn!("No block-commit for block"; "block_id" => %block.block_id()); + warn!("No block-commit for block"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); e })?; @@ -2363,7 +2450,11 @@ impl NakamotoChainState { get_block_commit_by_txid(sortdb_conn, &sn.sortition_id, &sn.winning_block_txid)? .ok_or(ChainstateError::NoSuchBlockError) .map_err(|e| { - warn!("No block-commit for block"; "block_id" => %block.block_id()); + warn!("No block-commit for block"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); e })?; @@ -2562,7 +2653,7 @@ impl NakamotoChainState { warn!( "Failed to fetch parent block's total tx fees"; "parent_block_id" => %parent_hash, - "block_id" => %index_block_hash, + "stacks_block_id" => %index_block_hash, ); ChainstateError::NoSuchBlockError })? @@ -3118,7 +3209,9 @@ impl NakamotoChainState { || { warn!( "Parent of Nakamoto block is not in block headers DB yet"; - "block_hash" => %block.header.block_hash(), + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), "parent_block_hash" => %parent_block_hash, "parent_block_id" => %parent_block_id ); @@ -3130,17 +3223,21 @@ impl NakamotoChainState { let expected_burn_opt = Self::get_expected_burns(burn_dbconn, chainstate_tx, block) .map_err(|e| { warn!("Unacceptable Nakamoto block: could not load expected burns (unable to find its paired sortition)"; - "block_id" => %block.block_id(), - "parent_block_id" => %block.header.parent_block_id, - "error" => e.to_string(), + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.block_id(), + "parent_block_id" => %block.header.parent_block_id, + "error" => e.to_string(), ); ChainstateError::InvalidStacksBlock("Invalid Nakamoto block: could not find sortition burns".into()) })?; let Some(expected_burn) = expected_burn_opt else { warn!("Unacceptable Nakamoto block: unable to find parent block's burns"; - "block_id" => %block.block_id(), - "parent_block_id" => %block.header.parent_block_id, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.block_id(), + "parent_block_id" => %block.header.parent_block_id, ); return Err(ChainstateError::InvalidStacksBlock( "Invalid Nakamoto block: could not find sortition burns".into(), @@ -3166,9 +3263,12 @@ impl NakamotoChainState { )? .ok_or_else(|| { warn!("Invalid Nakamoto block: has no block-commit in its sortition"; - "block_id" => %block.header.block_id(), - "sortition_id" => %tenure_block_snapshot.sortition_id, - "block_commit_txid" => %tenure_block_snapshot.winning_block_txid); + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "sortition_id" => %tenure_block_snapshot.sortition_id, + "block_commit_txid" => %tenure_block_snapshot.winning_block_txid + ); ChainstateError::NoSuchBlockError })?; @@ -3181,19 +3281,21 @@ impl NakamotoChainState { Self::get_nakamoto_tenure_start_block_header(chainstate_tx.tx(), &parent_ch)? .ok_or_else(|| { warn!("Invalid Nakamoto block: no start-tenure block for parent"; - "parent_consensus_hash" => %parent_ch, - "block_id" => %block.header.block_id()); - + "parent_consensus_hash" => %parent_ch, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); ChainstateError::NoSuchBlockError })?; if parent_tenure_start_header.index_block_hash() != tenure_block_commit.last_tenure_id() { warn!("Invalid Nakamoto block: its tenure's block-commit's block ID hash does not match its parent tenure's start block"; - "block_id" => %block.header.block_id(), - "parent_consensus_hash" => %parent_ch, - "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), - "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id()); + "parent_consensus_hash" => %parent_ch, + "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), + "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id() + ); return Err(ChainstateError::NoSuchBlockError); } @@ -3670,7 +3772,11 @@ impl StacksMessageCodec for NakamotoBlock { // all transactions are unique if !StacksBlock::validate_transactions_unique(&txs) { - warn!("Invalid block: Found duplicate transaction"; "block_hash" => header.block_hash()); + warn!("Invalid block: Found duplicate transaction"; + "consensus_hash" => %header.consensus_hash, + "stacks_block_hash" => %header.block_hash(), + "stacks_block_id" => %header.block_id() + ); return Err(CodecError::DeserializeError( "Invalid block: found duplicate transaction".to_string(), )); @@ -3683,7 +3789,11 @@ impl StacksMessageCodec for NakamotoBlock { let tx_merkle_root: Sha512Trunc256Sum = merkle_tree.root(); if tx_merkle_root != header.tx_merkle_root { - warn!("Invalid block: Tx Merkle root mismatch"; "block_hash" => header.block_hash()); + warn!("Invalid block: Tx Merkle root mismatch"; + "consensus_hash" => %header.consensus_hash, + "stacks_block_hash" => %header.block_hash(), + "stacks_block_id" => %header.block_id() + ); return Err(CodecError::DeserializeError( "Invalid block: tx Merkle root mismatch".to_string(), )); diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 198143d1a9..cdd4f5c1c3 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -1031,8 +1031,10 @@ impl NakamotoChainState { warn!("While processing tenure change, failed to look up parent tenure"; "parent_coinbase_height" => parent_coinbase_height, "parent_block_id" => %block.header.parent_block_id, - "block_hash" => %block.header.block_hash(), - "block_consensus_hash" => %block.header.consensus_hash); + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); ChainstateError::NoSuchBlockError })?; // fetch the parent tenure fees by reading the total tx fees from this block's @@ -1045,8 +1047,10 @@ impl NakamotoChainState { )?.ok_or_else(|| { warn!("While processing tenure change, failed to look up parent block's total tx fees"; "parent_block_id" => %block.header.parent_block_id, - "block_hash" => %block.header.block_hash(), - "block_consensus_hash" => %block.header.consensus_hash); + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); ChainstateError::NoSuchBlockError })? } else { diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 971351b1d5..91cb417d33 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -2423,17 +2423,14 @@ impl StacksChainState { } }; + let stacks_block_id = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash); if !block.processed { if !has_stored_block { if accept { debug!( "Accept block {}/{} as {}", - consensus_hash, - anchored_block_hash, - StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &anchored_block_hash - ) + consensus_hash, anchored_block_hash, stacks_block_id ); } else { info!("Reject block {}/{}", consensus_hash, anchored_block_hash); @@ -2441,17 +2438,13 @@ impl StacksChainState { } else { debug!( "Already stored block {}/{} ({})", - consensus_hash, - anchored_block_hash, - StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash) + consensus_hash, anchored_block_hash, stacks_block_id ); } } else { debug!( "Already processed block {}/{} ({})", - consensus_hash, - anchored_block_hash, - StacksBlockHeader::make_index_block_hash(&consensus_hash, &anchored_block_hash) + consensus_hash, anchored_block_hash, stacks_block_id ); } @@ -4086,7 +4079,10 @@ impl StacksChainState { let mut current_epoch = stacks_parent_epoch; while current_epoch != sortition_epoch.epoch_id { applied = true; - info!("Applying epoch transition"; "new_epoch_id" => %sortition_epoch.epoch_id, "old_epoch_id" => %current_epoch); + info!("Applying epoch transition"; + "new_epoch_id" => %sortition_epoch.epoch_id, + "old_epoch_id" => %current_epoch + ); // this assertion failing means that the _parent_ block was invalid: this is bad and should panic. assert!(current_epoch < sortition_epoch.epoch_id, "The SortitionDB believes the epoch is earlier than this Stacks block's parent: sortition db epoch = {}, current epoch = {}", sortition_epoch.epoch_id, current_epoch); // time for special cases: @@ -4199,7 +4195,14 @@ impl StacksChainState { "burn_block" => %burn_header_hash, "contract_call_ecode" => %resp.data); } else { - debug!("Processed StackStx burnchain op"; "amount_ustx" => stacked_ustx, "num_cycles" => num_cycles, "burn_block_height" => block_height, "sender" => %sender, "reward_addr" => %reward_addr, "txid" => %txid); + debug!("Processed StackStx burnchain op"; + "amount_ustx" => stacked_ustx, + "num_cycles" => num_cycles, + "burn_block_height" => block_height, + "sender" => %sender, + "reward_addr" => %reward_addr, + "txid" => %txid + ); } let mut execution_cost = clarity_tx.cost_so_far(); execution_cost @@ -4232,7 +4235,8 @@ impl StacksChainState { info!("StackStx burn op processing error."; "error" => %format!("{:?}", e), "txid" => %txid, - "burn_block" => %burn_header_hash); + "burn_block_hash" => %burn_header_hash + ); } }; } @@ -4318,9 +4322,10 @@ impl StacksChainState { } Err(e) => { info!("TransferStx burn op processing error."; - "error" => ?e, - "txid" => %txid, - "burn_block" => %burn_header_hash); + "error" => ?e, + "txid" => %txid, + "burn_block_hash" => %burn_header_hash + ); None } } @@ -4394,13 +4399,22 @@ impl StacksChainState { if let Value::Response(ref resp) = value { if !resp.committed { info!("DelegateStx burn op rejected by PoX contract."; - "txid" => %txid, - "burn_block" => %burn_header_hash, - "contract_call_ecode" => %resp.data); + "txid" => %txid, + "burn_block_hash" => %burn_header_hash, + "contract_call_ecode" => %resp.data); } else { let reward_addr_fmt = format!("{:?}", reward_addr); let delegate_to_fmt = format!("{:?}", delegate_to); - info!("Processed DelegateStx burnchain op"; "resp" => %resp.data, "amount_ustx" => delegated_ustx, "delegate_to" => delegate_to_fmt, "until_burn_height" => until_burn_height, "burn_block_height" => block_height, "sender" => %sender, "reward_addr" => reward_addr_fmt, "txid" => %txid); + info!("Processed DelegateStx burnchain op"; + "resp" => %resp.data, + "amount_ustx" => delegated_ustx, + "delegate_to" => delegate_to_fmt, + "until_burn_height" => until_burn_height, + "burn_block_height" => block_height, + "sender" => %sender, + "reward_addr" => reward_addr_fmt, + "txid" => %txid + ); } let mut execution_cost = clarity_tx.cost_so_far(); execution_cost @@ -4433,7 +4447,7 @@ impl StacksChainState { info!("DelegateStx burn op processing error."; "error" => %format!("{:?}", e), "txid" => %txid, - "burn_block" => %burn_header_hash); + "burn_header_hash" => %burn_header_hash); } }; } @@ -4492,7 +4506,7 @@ impl StacksChainState { if !resp.committed { info!("VoteForAggregateKey burn op rejected by signers-voting contract."; "txid" => %txid, - "burn_block" => %burn_header_hash, + "burn_block_hash" => %burn_header_hash, "contract_call_ecode" => %resp.data); } else { let aggregate_key_fmt = format!("{:?}", aggregate_key.to_hex()); @@ -4541,7 +4555,7 @@ impl StacksChainState { info!("VoteForAggregateKey burn op processing error."; "error" => %format!("{:?}", e), "txid" => %txid, - "burn_block" => %burn_header_hash); + "burn_block_hash" => %burn_header_hash); } }; } diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 93ffdda2b5..41ae4a2646 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2515,7 +2515,7 @@ impl StacksBlockBuilder { info!( "Miner: mined anchored block"; - "block_hash" => %block.block_hash(), + "stacks_block_hash" => %block.block_hash(), "height" => block.header.total_work.work, "tx_count" => block.txs.len(), "parent_stacks_block_hash" => %block.header.parent_block, diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index f5b4b44a3a..c57d9d19bc 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -522,7 +522,9 @@ impl BlockDownloader { self.broken_neighbors.push(block_key.neighbor.clone()); } Err(e) => { - info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e); + info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e; + "consensus_hash" => %block_key.consensus_hash + ); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); } @@ -626,7 +628,9 @@ impl BlockDownloader { Ok(microblocks) => { if microblocks.len() == 0 { // we wouldn't have asked for a 0-length stream - info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url); + info!("Got unexpected zero-length microblock stream from {:?} ({:?})", &block_key.neighbor, &block_key.data_url; + "consensus_hash" => %block_key.consensus_hash + ); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); } else { @@ -644,7 +648,9 @@ impl BlockDownloader { Err(net_error::NotFoundError) => { // remote peer didn't have the microblock, even though their blockinv said // they did. - info!("Remote neighbor {:?} ({:?}) does not have microblock stream indexed at {}", &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash); + info!("Remote neighbor {:?} ({:?}) does not have microblock stream indexed at {}", &block_key.neighbor, &block_key.data_url, &block_key.index_block_hash; + "consensus_hash" => %block_key.consensus_hash + ); // the fact that we asked this peer means that it's block inv indicated // it was present, so the absence is the mark of a broken peer. @@ -654,7 +660,9 @@ impl BlockDownloader { // talk to them for a while. } Err(e) => { - info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e); + info!("Error decoding response from remote neighbor {:?} (at {}): {:?}", &block_key.neighbor, &block_key.data_url, &e; + "consensus_hash" => %block_key.consensus_hash + ); self.broken_peers.push(event_id); self.broken_neighbors.push(block_key.neighbor.clone()); } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 421702bcfb..76fd2aad5e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -265,7 +265,7 @@ impl BlockMinerThread { info!( "Miner: Block signed by signer set and broadcasted"; "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_hash" => %new_block.header.block_hash(), + "stacks_block_hash" => %new_block.header.block_hash(), "stacks_block_id" => %new_block.header.block_id(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, @@ -633,8 +633,10 @@ impl BlockMinerThread { if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { // Do an extra check just so we don't log EVERY time. warn!("Broadcasting is stalled due to testing directive."; - "block_id" => %block.block_id(), + "stacks_block_id" => %block.block_id(), + "stacks_block_hash" => %block.header.block_hash(), "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash ); while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { std::thread::sleep(std::time::Duration::from_millis(10)); @@ -642,6 +644,7 @@ impl BlockMinerThread { info!("Broadcasting is no longer stalled due to testing directive."; "block_id" => %block.block_id(), "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash ); } } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 21a993e899..92e6a78b8a 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -891,8 +891,9 @@ impl RelayerThread { ) else { warn!("Failure getting the first block of tenure in order to assemble block commit"; - "tenure_consensus_hash" => %chain_tip_header.consensus_hash, - "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); + "tenure_consensus_hash" => %chain_tip_header.consensus_hash, + "tip_stacks_block_hash" => %chain_tip_header.anchored_header.block_hash() + ); return None; }; diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 7fcb5bb008..7f001a7d09 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -763,6 +763,8 @@ impl SignCoordinator { "signature" => %signature, "signer_weight" => signer_entry.weight, "total_weight_signed" => total_weight_signed, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() ); gathered_signatures.insert(slot_id, signature); } @@ -777,7 +779,10 @@ impl SignCoordinator { // After gathering all signatures, return them if we've hit the threshold if total_weight_signed >= self.weight_threshold { - info!("SignCoordinator: Received enough signatures. Continuing."); + info!("SignCoordinator: Received enough signatures. Continuing."; + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); return Ok(gathered_signatures.values().cloned().collect()); } } diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 2c78b4c187..4918f6f948 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -905,7 +905,7 @@ impl Node { if let Some(estimator) = fee_estimator.as_mut() { if let Err(e) = estimator.notify_block(&processed_block, &stacks_epoch.block_limit) { warn!("FeeEstimator failed to process block receipt"; - "stacks_block" => %processed_block.header.anchored_header.block_hash(), + "stacks_block_hash" => %processed_block.header.anchored_header.block_hash(), "stacks_height" => %processed_block.header.stacks_block_height, "error" => %e); } From d13419323f6794e4cd7add2011837aafee115a7b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Jun 2024 14:01:46 -0400 Subject: [PATCH 0367/1400] feat: make miner retry when signature collection times out --- .../stacks-node/src/nakamoto_node/miner.rs | 8 +- testnet/stacks-node/src/tests/signer/mod.rs | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 90 ++++++++++++++++++- 3 files changed, 92 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 421702bcfb..edd26243d2 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -248,12 +248,8 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!( - "Unrecoverable error while gathering signatures: {e:?}. Ending tenure." - ); - return Err(NakamotoNodeError::MiningFailure( - ChainstateError::MinerAborted, - )); + error!("Error while gathering signatures: {e:?}. Will try mining again."); + continue; } }; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 794c00f05a..0ac82f4365 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -118,6 +118,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + // Stall block validation so the signers will not be able to sign. + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + loop { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + if blocks_proposed > proposals_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + info!("Block proposed, verifying that it is not processed"); + + // Wait 20 seconds to be sure that the timeout has occurred + std::thread::sleep(Duration::from_secs(20)); + assert_eq!( + signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst), + blocks_before + ); + + // Disable the stall and wait for the block to be processed on retry + info!("Disable the stall and wait for the block to be processed"); + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + if blocks_mined > blocks_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + signer_test.shutdown(); +} From cbce8d22a688dac9a6bdffc8dcbe05877b358dd0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 28 Jun 2024 11:26:49 -0500 Subject: [PATCH 0368/1400] fix: block commit pox-descendant check in nakamoto --- stacks-common/src/types/mod.rs | 16 ++++ stackslib/src/burnchains/tests/mod.rs | 5 +- .../burn/operations/leader_block_commit.rs | 7 +- .../chainstate/nakamoto/coordinator/tests.rs | 82 +++++++++++++++++++ stackslib/src/net/mod.rs | 8 ++ 5 files changed, 113 insertions(+), 5 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index fbb4ded443..5af696d30d 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -157,6 +157,22 @@ impl StacksEpochId { } } + /// Whether or not this epoch interprets block commit OPs block hash field + /// as a new block hash or the StacksBlockId of a new tenure's parent tenure. + pub fn block_commits_to_parent(&self) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 => true, + } + } + /// Does this epoch support unlocking PoX contributors that miss a slot? /// /// Epoch 2.0 - 2.05 didn't support this feature, but they weren't epoch-guarded on it. Instead, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 10e83605b3..31e29c0b26 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -161,10 +161,7 @@ impl TestMiner { } pub fn last_block_commit(&self) -> Option { - match self.block_commits.len() { - 0 => None, - x => Some(self.block_commits[x - 1].clone()), - } + self.block_commits.last().cloned() } pub fn block_commit_at(&self, idx: usize) -> Option { diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 753d2c51ad..cea03d4435 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -692,7 +692,12 @@ impl LeaderBlockCommitOp { // Now, we are checking the reward sets match, and if they don't, // whether or not pox descendant is necessary - let descended_from_anchor = tx.descended_from(parent_block_height, &reward_set_info.anchor_block) + // first, if we're in a nakamoto epoch, any block commit building directly off of the anchor block + // is descendant + let directly_descended_from_anchor = epoch_id.block_commits_to_parent() + && self.block_header_hash == reward_set_info.anchor_block; + let descended_from_anchor = directly_descended_from_anchor || tx + .descended_from(parent_block_height, &reward_set_info.anchor_block) .map_err(|e| { error!("Failed to check whether parent (height={}) is descendent of anchor block={}: {}", parent_block_height, &reward_set_info.anchor_block, e); diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 4f91181713..c2645e22ad 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -40,6 +40,7 @@ use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; +use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; @@ -696,6 +697,87 @@ impl<'a> TestPeer<'a> { } } +#[test] +// Test the block commit descendant check in nakamoto +// - create a 12 address PoX reward set +// - make a normal block commit, assert that the bitvec must contain 1s for those addresses +// - make a burn block commit, assert that the bitvec must contain 0s for those addresses +fn block_descendant() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); + + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_address: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + }) + .collect::>(); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.pox_constants = pox_constants; + + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + let mut blocks = vec![]; + let pox_constants = peer.sortdb().pox_constants.clone(); + let first_burn_height = peer.sortdb().first_block_height; + + // mine until we're at the start of the prepare reward phase (so we *know* + // that the reward set contains entries) + loop { + let (block, burn_height, ..) = + peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + blocks.push(block); + + if pox_constants.is_in_prepare_phase(first_burn_height, burn_height + 1) { + info!("At prepare phase start"; "burn_height" => burn_height); + break; + } + } + + // mine until right before the end of the prepare phase + loop { + let (burn_height, ..) = peer.mine_empty_tenure(); + if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 3) { + info!("At prepare phase end"; "burn_height" => burn_height); + break; + } + } + + // this should get chosen as the anchor block. + let (naka_anchor_block, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + + // make the index=0 block empty, because it doesn't get a descendancy check + // so, if this has a tenure mined, the direct parent check won't occur + peer.mine_empty_tenure(); + + // this would be where things go haywire. this tenure's parent will be the anchor block. + let (first_reward_block, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + + assert_eq!(first_reward_block.header.parent_block_id, naka_anchor_block.block_id()); +} + #[test] // Test PoX Reward and Punish treatment in nakamoto // - create a 12 address PoX reward set diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 94c061dc5d..9a99cabe64 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3429,6 +3429,14 @@ pub mod test { self.next_burnchain_block(vec![]) } + pub fn mine_empty_tenure(&mut self) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let (burn_ops, ..) = self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let result = self.next_burnchain_block(burn_ops); + // remove the last block commit so that the testpeer doesn't try to build off of this tenure + self.miner.block_commits.pop(); + result + } + pub fn mempool(&mut self) -> &mut MemPoolDB { self.mempool.as_mut().unwrap() } From 9eee4525e3b8f3fd4303640f14aba0a9f4b8c4f5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Jun 2024 14:26:50 -0400 Subject: [PATCH 0369/1400] test: fix block reward check in `check_block_info_rewards` --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8570b0aeff..f541b842b8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6424,15 +6424,17 @@ fn check_block_info_rewards() { vec![&clarity::vm::Value::UInt(last_nakamoto_block)], ); let tuple0 = result0.expect_tuple().unwrap().data_map; - assert_eq!( + assert!( tuple0 .get("block-reward") .unwrap() .clone() .expect_optional() .unwrap() - .unwrap(), - Value::UInt(3061200000) + .unwrap() + .expect_u128() + .unwrap() + > 0 ); let result1 = call_read_only( From 4ad8e912f6b1a6ead00b028f8aeadd58b49bd6ee Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 28 Jun 2024 14:44:45 -0500 Subject: [PATCH 0370/1400] test: signer client test should use a signer-sent message --- stacks-signer/src/client/stackerdb.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index e0c6cb1621..499bdddd5f 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -234,7 +234,7 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; - use libsigner::v0::messages::SignerMessage; + use libsigner::v0::messages::{BlockRejection, BlockResponse, RejectCode, SignerMessage}; use libsigner::BlockProposal; use rand::{thread_rng, RngCore}; @@ -276,12 +276,12 @@ mod tests { }; block.header.tx_merkle_root = tx_merkle_root; - let block_proposal = BlockProposal { - block, - burn_height: thread_rng().next_u64(), - reward_cycle: thread_rng().next_u64(), + let block_reject = BlockRejection { + reason: "Did not like it".into(), + reason_code: RejectCode::RejectedInPriorRound, + signer_signature_hash: block.header.signer_signature_hash(), }; - let signer_message = SignerMessage::BlockProposal(block_proposal); + let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { accepted: true, reason: None, @@ -289,12 +289,13 @@ mod tests { code: None, }; let mock_server = mock_server_from_config(&config); - let h = spawn(move || stackerdb.send_message_with_retry(signer_message)); + debug!("Spawning msg sender"); + let h = spawn(move || stackerdb.send_message_with_retry(signer_message).unwrap()); let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); let payload = serde_json::to_string(&ack).expect("Failed to serialize ack"); response_bytes.extend(payload.as_bytes()); std::thread::sleep(Duration::from_millis(500)); write_response(mock_server, response_bytes.as_slice()); - assert_eq!(ack, h.join().unwrap().unwrap()); + assert_eq!(ack, h.join().unwrap()); } } From 08e554f9c38efb53c999bf27f122b6f09346ddd0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 28 Jun 2024 15:02:54 -0500 Subject: [PATCH 0371/1400] cargo fmt --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index c2645e22ad..8358a9b622 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -775,7 +775,10 @@ fn block_descendant() { // this would be where things go haywire. this tenure's parent will be the anchor block. let (first_reward_block, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); - assert_eq!(first_reward_block.header.parent_block_id, naka_anchor_block.block_id()); + assert_eq!( + first_reward_block.header.parent_block_id, + naka_anchor_block.block_id() + ); } #[test] From b67b62916fd208cc4eb33ce4408dc2a0b4d451d9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 17:57:54 -0400 Subject: [PATCH 0372/1400] chore: strip duplicate peer epoch ID --- clarity/src/vm/database/clarity_db.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 7c2dce309a..cdf411fc3e 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -25,9 +25,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, }; -use stacks_common::types::{ - Address, StacksEpoch as GenericStacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, -}; +use stacks_common::types::{Address, StacksEpoch as GenericStacksEpoch, StacksEpochId}; use stacks_common::util::hash::{to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum}; use super::clarity_store::SpecialCaseHandler; @@ -328,7 +326,7 @@ impl BurnStateDB for NullBurnStateDB { start_height: 0, end_height: u64::MAX, block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, + network_epoch: 0, }) } fn get_stacks_epoch_by_epoch_id(&self, _epoch_id: &StacksEpochId) -> Option { From fe27f9282c31e18da80ef29393ee145e52f49692 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 17:58:12 -0400 Subject: [PATCH 0373/1400] chore: strip duplicate peer version ID --- clarity/src/vm/docs/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 0c660d7e67..cfaccd0ec9 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2751,7 +2751,7 @@ mod test { BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, }; - use stacks_common::types::{Address, StacksEpochId, PEER_VERSION_EPOCH_2_1}; + use stacks_common::types::{Address, StacksEpochId}; use stacks_common::util::hash::hex_bytes; use super::{get_input_type_string, make_all_api_reference, make_json_api_reference}; @@ -2905,7 +2905,7 @@ mod test { start_height: 0, end_height: u64::MAX, block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_1, + network_epoch: 0x06, }) } fn get_burn_start_height(&self) -> u32 { From ed78166e91a90963e797710d8d42defb01edab8a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 17:58:28 -0400 Subject: [PATCH 0374/1400] chore: strip duplicate peer version ID --- clarity/src/vm/test_util/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 605180d005..d885a62e8d 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -7,7 +7,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, VRFSeed, }; -use stacks_common::types::{StacksEpochId, PEER_VERSION_EPOCH_2_0}; +use stacks_common::types::StacksEpochId; use crate::vm::ast::ASTRules; use crate::vm::costs::ExecutionCost; @@ -257,7 +257,7 @@ impl BurnStateDB for UnitTestBurnStateDB { start_height: 0, end_height: u64::MAX, block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, + network_epoch: 0, }) } From 1cf1048d62b766a9414e4c9f81ba78a4476241fa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 17:58:42 -0400 Subject: [PATCH 0375/1400] chore: DRY up peer versions --- stacks-common/src/types/mod.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index fbb4ded443..db55eb97ba 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -58,11 +58,6 @@ pub trait Address: Clone + fmt::Debug + fmt::Display { fn is_burn(&self) -> bool; } -pub const PEER_VERSION_EPOCH_1_0: u8 = 0x00; -pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; -pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; -pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; - // sliding burnchain window over which a miner's past block-commit payouts will be used to weight // its current block-commit in a sortition. // This is the value used in epoch 2.x From 2ecf14d5d88f9bc4da73740eb8553af432bf9363 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 17:59:22 -0400 Subject: [PATCH 0376/1400] fix: only advance canonical nakamoto fork in the sortition DB if it's actually higher than the memoized fork, and also, add get_nakamoto_tip_block_id() to SortitionHandle to facilitate querying the canonical Nakamoto tip --- stackslib/src/chainstate/burn/db/sortdb.rs | 32 ++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 14011a2800..2914739fea 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -1072,6 +1072,14 @@ pub trait SortitionHandle { /// Returns Err(..) on DB errors fn get_nakamoto_tip(&self) -> Result, db_error>; + /// Get the block ID of the highest-processed Nakamoto block on this history. + fn get_nakamoto_tip_block_id(&self) -> Result, db_error> { + let Some((ch, bhh, _)) = self.get_nakamoto_tip()? else { + return Ok(None); + }; + Ok(Some(StacksBlockId::new(&ch, &bhh))) + } + /// is the given block a descendant of `potential_ancestor`? /// * block_at_burn_height: the burn height of the sortition that chose the stacks block to check /// * potential_ancestor: the stacks block hash of the potential ancestor @@ -1808,6 +1816,30 @@ impl<'a> SortitionHandleTx<'a> { if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { // Nakamoto blocks are always processed in order since the chain can't fork + // arbitrarily. + // However, two kinds of forks can happen: + // * one of a set of malleableized siblings can be confirmed (but they all have the + // same sighash and same height). + // * a late tenure-change can be processed. + // As a result, only update the canonical Nakamoto tip if the given block is higher + // than the existing tip for this sortiton. + let current_sortition_tip : Option<(ConsensusHash, BlockHeaderHash, u64)> = self.query_row_and_then( + "SELECT consensus_hash,block_hash,block_height FROM stacks_chain_tips WHERE sortition_id = ?1 ORDER BY block_height DESC LIMIT 1", + rusqlite::params![&burn_tip.sortition_id], + |row| Ok((row.get_unwrap(0), row.get_unwrap(1), (u64::try_from(row.get_unwrap::<_, i64>(2)).expect("FATAL: block height too high")))) + ).optional()?; + + if let Some((cur_ch, cur_bhh, cur_height)) = current_sortition_tip { + if cur_height >= stacks_block_height { + debug!("Will NOT replace canonical Stacks tip {}/{} ({}) height {} with {}/{} ({}) height {}", + &cur_ch, &cur_bhh, &StacksBlockId::new(&cur_ch, &cur_bhh), &cur_height, consensus_hash, stacks_block_hash, &StacksBlockId::new(consensus_hash, stacks_block_hash), stacks_block_height); + return Ok(()); + } else { + debug!("Will replace canonical Stacks tip {}/{} ({}) height {} with {}/{} ({}) height {}", + &cur_ch, &cur_bhh, &StacksBlockId::new(&cur_ch, &cur_bhh), &cur_height, consensus_hash, stacks_block_hash, &StacksBlockId::new(consensus_hash, stacks_block_hash), stacks_block_height); + } + } + self.update_canonical_stacks_tip( &burn_tip.sortition_id, consensus_hash, From af9d5d71cd09ed5872ca6e7efe01f19c9a04596d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:00:08 -0400 Subject: [PATCH 0377/1400] fix: remove sortition DB-specific functions from IndexDBConn --- stackslib/src/chainstate/coordinator/mod.rs | 29 +++++++++++++-------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index b2a1c8679f..f99776c843 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -66,7 +66,7 @@ use crate::chainstate::stacks::events::{ StacksBlockEventData, StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, }; use crate::chainstate::stacks::index::marf::MARFOpenOpts; -use crate::chainstate::stacks::index::MarfTrieId; +use crate::chainstate::stacks::index::{Error as IndexError, MarfTrieId}; use crate::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, TransactionPayload, @@ -252,6 +252,7 @@ pub enum Error { NoSortitions, FailedToProcessSortition(BurnchainError), DBError(DBError), + IndexError(IndexError), NotPrepareEndBlock, NotPoXAnchorBlock, NotInPreparePhase, @@ -278,6 +279,12 @@ impl From for Error { } } +impl From for Error { + fn from(o: IndexError) -> Error { + Error::IndexError(o) + } +} + pub trait RewardSetProvider { fn get_reward_set( &self, @@ -3296,11 +3303,11 @@ impl< // update cost estimator if let Some(ref mut estimator) = self.cost_estimator { - let stacks_epoch = self - .sortition_db - .index_conn() - .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = SortitionDB::get_stacks_epoch_by_epoch_id( + self.sortition_db.conn(), + &block_receipt.evaluated_epoch, + )? + .expect("Could not find a stacks epoch."); estimator.notify_block( &block_receipt.tx_receipts, &stacks_epoch.block_limit, @@ -3310,11 +3317,11 @@ impl< // update fee estimator if let Some(ref mut estimator) = self.fee_estimator { - let stacks_epoch = self - .sortition_db - .index_conn() - .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = SortitionDB::get_stacks_epoch_by_epoch_id( + self.sortition_db.conn(), + &block_receipt.evaluated_epoch, + )? + .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { From 4fd033f2837fab774c512d4c2f67be4bc164d9fd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:00:36 -0400 Subject: [PATCH 0378/1400] fix: query reward set off of a Stacks tip via the MARF, which for the chains coordinator requires querying it from the canonical fork memoized in the sortition DB. The reason I think this is safe here (and only here) is because forks can't happen arbitrarily. The memoized tip at the end of the prepare phase is practically guaranteed to point to the same reward set on all nodes, since the reward set is calculated at the start of the prepare phase. --- .../chainstate/nakamoto/coordinator/mod.rs | 96 +++++++++++++++---- 1 file changed, 78 insertions(+), 18 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 1443fe4350..3732df86c8 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -30,7 +30,9 @@ use stacks_common::types::{StacksEpoch, StacksEpochId}; use crate::burnchains::db::{BurnchainBlockData, BurnchainDB, BurnchainHeaderReader}; use crate::burnchains::{Burnchain, BurnchainBlockHeader}; -use crate::chainstate::burn::db::sortdb::{get_ancestor_sort_id, SortitionDB, SortitionHandleConn}; +use crate::chainstate::burn::db::sortdb::{ + get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleConn, +}; use crate::chainstate::burn::operations::leader_block_commit::RewardSetInfo; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::comm::{ @@ -44,7 +46,9 @@ use crate::chainstate::coordinator::{ }; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{RewardSet, SIGNERS_NAME}; -use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; +use crate::chainstate::stacks::db::{ + StacksBlockHeaderTypes, StacksChainState, StacksDBConn, StacksHeaderInfo, +}; use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use crate::chainstate::stacks::Error as ChainstateError; @@ -149,11 +153,13 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { sort_handle: &SortitionHandleConn, block_id: &StacksBlockId, ) -> Result { + let ro_index = chainstate.state_index.reopen_readonly()?; + let headers_db = HeadersDBConn(StacksDBConn::new(&ro_index, ())); let Some(coinbase_height_of_calculation) = chainstate .clarity_state .eval_read_only( block_id, - &HeadersDBConn(chainstate.state_index.sqlite_conn()), + &headers_db, sort_handle, &boot_code_id(SIGNERS_NAME, chainstate.mainnet), &format!("(map-get? cycle-set-height u{})", cycle), @@ -186,7 +192,7 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { debug_log: bool, ) -> Result { let Some(reward_set_block) = NakamotoChainState::get_header_by_coinbase_height( - &mut chainstate.index_tx_begin()?, + &mut chainstate.index_tx_begin(), block_id, coinbase_height_of_calculation, )? @@ -292,6 +298,7 @@ pub fn get_nakamoto_reward_cycle_info( sortition_tip: &SortitionId, burnchain: &Burnchain, chain_state: &mut StacksChainState, + stacks_tip: &StacksBlockId, sort_db: &mut SortitionDB, provider: &U, ) -> Result, Error> { @@ -310,6 +317,7 @@ pub fn get_nakamoto_reward_cycle_info( .expect("FATAL: no reward cycle for burn height"); debug!("Processing reward set for Nakamoto reward cycle"; + "stacks_tip" => %stacks_tip, "burn_height" => burn_height, "reward_cycle" => reward_cycle, "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, @@ -320,6 +328,7 @@ pub fn get_nakamoto_reward_cycle_info( sortition_tip, burnchain, chain_state, + stacks_tip, sort_db, provider, )? @@ -360,6 +369,7 @@ pub fn load_nakamoto_reward_set( sortition_tip: &SortitionId, burnchain: &Burnchain, chain_state: &mut StacksChainState, + stacks_tip: &StacksBlockId, sort_db: &SortitionDB, provider: &U, ) -> Result, Error> { @@ -475,7 +485,8 @@ pub fn load_nakamoto_reward_set( } match NakamotoChainState::get_nakamoto_tenure_start_block_header( - chain_state.db(), + &mut chain_state.index_conn(), + stacks_tip, &sn.consensus_hash, ) { Ok(Some(x)) => return Some(Ok(x)), @@ -556,10 +567,12 @@ pub fn load_nakamoto_reward_set( /// * we're guaranteed to have an anchor block /// * we pre-compute the reward set at the start of the prepare phase, so we only need to load it /// up here at the start of the reward phase. +/// `stacks_tip` is the tip that the caller is going to build a block on. pub fn get_nakamoto_next_recipients( sortition_tip: &BlockSnapshot, sort_db: &mut SortitionDB, chain_state: &mut StacksChainState, + stacks_tip: &StacksBlockId, burnchain: &Burnchain, ) -> Result, Error> { let reward_cycle_info = @@ -571,6 +584,7 @@ pub fn get_nakamoto_next_recipients( &sortition_tip.sortition_id, burnchain, chain_state, + stacks_tip, sort_db, &OnChainRewardSetProvider::new(), )? @@ -637,7 +651,17 @@ impl< .block_height_to_reward_cycle(epoch3.start_height) .expect("FATAL: epoch3 block height has no reward cycle"); - // only proceed if we have processed the _anchor block_ for this reward cycle + // NOTE(safety): this is not guaranteed to be the canonical best Stacks tip. + // However, it's safe to use here because we're only interested in loading up the first + // Nakamoto reward set, which uses the epoch2 anchor block selection algorithm. There will + // only be one such reward set in epoch2 rules, since it's tied to a specific block-commit + // (note that this is not true for reward sets generated in Nakamoto prepare phases). + let (local_best_stacks_ch, local_best_stacks_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortition_db.conn())?; + let local_best_stacks_tip = + StacksBlockId::new(&local_best_stacks_ch, &local_best_stacks_bhh); + + // only proceed if we have processed the _anchor block_ for this reward cycle. let Some((rc_info, _)) = load_nakamoto_reward_set( self.burnchain .pox_reward_cycle(canonical_sn.block_height) @@ -645,6 +669,7 @@ impl< &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, + &local_best_stacks_tip, &self.sortition_db, &OnChainRewardSetProvider::new(), )? @@ -834,11 +859,11 @@ impl< // update cost estimator if let Some(ref mut estimator) = self.cost_estimator { - let stacks_epoch = self - .sortition_db - .index_conn() - .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = SortitionDB::get_stacks_epoch_by_epoch_id( + self.sortition_db.conn(), + &block_receipt.evaluated_epoch, + )? + .expect("Could not find a stacks epoch."); estimator.notify_block( &block_receipt.tx_receipts, &stacks_epoch.block_limit, @@ -848,11 +873,11 @@ impl< // update fee estimator if let Some(ref mut estimator) = self.fee_estimator { - let stacks_epoch = self - .sortition_db - .index_conn() - .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = SortitionDB::get_stacks_epoch_by_epoch_id( + self.sortition_db.conn(), + &block_receipt.evaluated_epoch, + )? + .expect("Could not find a stacks epoch."); if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { warn!("FeeEstimator failed to process block receipt"; "stacks_block" => %block_hash, @@ -892,6 +917,9 @@ impl< &canonical_sortition_tip, )? .ok_or(DBError::NotFoundError)?; + + // check and see if *this block* or one if its ancestors has processed the reward + // cycle data let Some((rc_info, _)) = load_nakamoto_reward_set( self.burnchain .pox_reward_cycle(canonical_sn.block_height) @@ -899,6 +927,7 @@ impl< &canonical_sn.sortition_id, &self.burnchain, &mut self.chain_state_db, + &canonical_stacks_block_id, &self.sortition_db, &OnChainRewardSetProvider::new(), )? @@ -914,7 +943,8 @@ impl< continue; } - // This is the first Stacks block in the prepare phase for the next reward cycle. + // This is the first Stacks block in the prepare phase for the next reward cycle, + // as determined by the history tipped at `canonical_stacks_block_id`. // Pause here and process the next sortitions debug!("Process next reward cycle's sortitions"); self.handle_new_nakamoto_burnchain_block()?; @@ -929,6 +959,7 @@ impl< fn get_nakamoto_reward_cycle_info( &mut self, block_height: u64, + stacks_tip: &StacksBlockId, ) -> Result, Error> { let sortition_tip_id = self .canonical_sortition_tip @@ -940,6 +971,7 @@ impl< sortition_tip_id, &self.burnchain, &mut self.chain_state_db, + stacks_tip, &mut self.sortition_db, &self.reward_set_provider, ) @@ -1057,7 +1089,34 @@ impl< let reward_cycle_info = if self.burnchain.is_reward_cycle_start(header.block_height) { // we're at the end of the prepare phase, so we'd better have obtained the reward // cycle info of we must block. - let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; + // NOTE(safety): the reason it's safe to use the local best stacks tip here is + // because as long as at least 30% of the signers are honest, there's no way there + // can be two or more distinct reward sets calculated for a reward cycle. Due to + // signature malleability, there can be multiple unconfirmed siblings at a given + // height H, but at height H+1, exactly one of those siblings will be canonical, + // and will remain canonical with respect to its tenure's Bitcoin fork forever. + // Here, we're loading a reward set calculated between H and H+99 from H+100, where + // H is the start of the prepare phase. So if we get any reward set from our + // canonical tip, it's guaranteed to be te canonical one. + let canonical_sortition_tip = self.canonical_sortition_tip.clone().unwrap_or( + // should be unreachable + SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())? + .sortition_id, + ); + + let Some(local_best_nakamoto_tip) = self + .sortition_db + .index_handle(&canonical_sortition_tip) + .get_nakamoto_tip_block_id()? + else { + debug!("No Nakamoto blocks processed yet, so no reward cycle known for this next reward cycle"); + return Ok(false); + }; + + let reward_cycle_info = self.get_nakamoto_reward_cycle_info( + header.block_height, + &local_best_nakamoto_tip, + )?; if let Some(rc_info) = reward_cycle_info.as_ref() { // in nakamoto, if we have any reward cycle info at all, it will be known. // otherwise, we may have to process some more Stacks blocks @@ -1068,6 +1127,7 @@ impl< } else { // have to block -- we don't have the reward cycle information debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; + "local_best_nakamoto_tip" => %local_best_nakamoto_tip, "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), "block_height" => header.block_height); return Ok(false); From f1db79e384d7242ed172e6702312157b646ecb38 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:02:28 -0400 Subject: [PATCH 0379/1400] chore: remove tests that cannot be run without the MARF (and test them elsewhere), and use malleablized blocks in the 10-sortitions-with-10-blocks-and-extensions test --- .../chainstate/nakamoto/coordinator/tests.rs | 361 +++++------------- 1 file changed, 100 insertions(+), 261 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 4f91181713..8ff14f4146 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -45,14 +45,14 @@ use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::pox_4_tests::{get_stacking_minimum, get_tip}; use crate::chainstate::stacks::boot::signers_tests::{readonly_call, readonly_call_with_sortdb}; use crate::chainstate::stacks::boot::test::{ - key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, - make_signers_vote_for_aggregate_public_key, make_signers_vote_for_aggregate_public_key_value, - with_sortdb, + key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, }; use crate::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use crate::chainstate::stacks::db::{MinerPaymentTxFees, StacksAccount, StacksChainState}; @@ -67,9 +67,23 @@ use crate::net::relay::Relayer; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::net::tests::NakamotoBootPlan; +use crate::stacks_common::codec::StacksMessageCodec; use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::{query_rows, u64_to_sql}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; +impl<'a> NakamotoStagingBlocksConnRef<'a> { + pub fn get_blocks_at_height(&self, height: u64) -> Vec { + let sql = "SELECT data FROM nakamoto_staging_blocks WHERE height = ?1"; + let args = rusqlite::params![&u64_to_sql(height).unwrap()]; + let serialized_blocks: Vec> = query_rows(self, sql, args).unwrap(); + serialized_blocks + .into_iter() + .map(|blk_bytes| NakamotoBlock::consensus_deserialize(&mut &blk_bytes[..]).unwrap()) + .collect() + } +} + /// Bring a TestPeer into the Nakamoto Epoch fn advance_to_nakamoto( peer: &mut TestPeer, @@ -85,6 +99,8 @@ fn advance_to_nakamoto( &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); let mut tip = None; for sortition_height in 0..11 { @@ -94,16 +110,19 @@ fn advance_to_nakamoto( test_stackers .iter() .map(|test_stacker| { - let pox_addr = test_stacker.pox_address.clone().unwrap_or_else(|| { - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()) - }); + let pox_addr = test_stacker + .pox_addr + .clone() + .unwrap_or(default_pox_addr.clone()); + let max_amount = test_stacker.max_amount.clone().unwrap_or(u128::MAX); + let signature = make_signer_key_signature( &pox_addr, &test_stacker.signer_private_key, 6, &Pox4SignatureTopic::StackStx, 12_u128, - u128::MAX, + max_amount, 1, ); let signing_key = @@ -117,22 +136,11 @@ fn advance_to_nakamoto( &signing_key, 34, Some(signature), - u128::MAX, + max_amount, 1, ) }) .collect() - } else if sortition_height == 8 { - with_sortdb(peer, |chainstate, sortdb| { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.unwrap(), - test_signers, - test_stackers, - 7, - ) - }) } else { vec![] }; @@ -142,94 +150,8 @@ fn advance_to_nakamoto( // peer is at the start of cycle 8 } -pub fn make_all_signers_vote_for_aggregate_key( - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - tip: &StacksBlockId, - test_signers: &mut TestSigners, - test_stackers: &[TestStacker], - cycle_id: u128, -) -> Vec { - info!("Trigger signers vote for cycle {}", cycle_id); - - // Check if we already have an aggregate key for this cycle - if chainstate - .get_aggregate_public_key_pox_4(sortdb, tip, cycle_id as u64) - .unwrap() - .is_some() - { - debug!("Aggregate key already set for cycle {}", cycle_id); - return vec![]; - } - - // Generate a new aggregate key - test_signers.generate_aggregate_key(cycle_id as u64); - - let signers_res = readonly_call_with_sortdb( - chainstate, - sortdb, - tip, - SIGNERS_NAME.into(), - "get-signers".into(), - vec![Value::UInt(cycle_id)], - ); - - // If the signers are not set yet, then we're not ready to vote yet. - let signer_vec = match signers_res.expect_optional().unwrap() { - Some(signer_vec) => signer_vec.expect_list().unwrap(), - None => { - debug!("No signers set for cycle {}", cycle_id); - return vec![]; - } - }; - - let mut signers_to_index = HashMap::new(); - for (index, value) in signer_vec.into_iter().enumerate() { - let tuple = value.expect_tuple().unwrap(); - let signer = tuple - .get_owned("signer") - .unwrap() - .expect_principal() - .unwrap(); - let insert_res = signers_to_index.insert(signer, index); - assert!(insert_res.is_none(), "Duplicate signer in signers list"); - } - - // Build a map of the signers, their private keys, and their index - let mut signers = HashMap::new(); - for test_stacker in test_stackers { - let addr = key_to_stacks_addr(&test_stacker.signer_private_key); - let principal = PrincipalData::from(addr); - signers.insert( - addr, - ( - test_stacker.signer_private_key, - signers_to_index[&principal], - ), - ); - } - - // Vote for the aggregate key for each signer - info!("Trigger votes for cycle {}", cycle_id); - signers - .iter() - .map(|(addr, (signer_key, index))| { - let account = get_account(chainstate, sortdb, &addr); - make_signers_vote_for_aggregate_public_key_value( - signer_key, - account.nonce, - *index as u128, - Value::buff_from(test_signers.aggregate_public_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"), - 0, - cycle_id, - ) - }) - .collect() -} - /// Make a peer and transition it into the Nakamoto epoch. -/// The node needs to be stacking and it needs to vote for an aggregate key; +/// The node needs to be stacking. /// otherwise, Nakamoto can't activate. pub fn boot_nakamoto<'a>( test_name: &str, @@ -238,7 +160,6 @@ pub fn boot_nakamoto<'a>( test_stackers: &[TestStacker], observer: Option<&'a TestEventObserver>, ) -> TestPeer<'a> { - let aggregate_public_key = test_signers.aggregate_public_key.clone(); let mut peer_config = TestPeerConfig::new(test_name, 0, 0); let private_key = peer_config.private_key.clone(); let addr = StacksAddress::from_public_keys( @@ -253,7 +174,6 @@ pub fn boot_nakamoto<'a>( // first 25 blocks are boot-up // reward cycle 6 instantiates pox-3 // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); @@ -397,6 +317,7 @@ fn replay_reward_cycle( let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); + let nakamoto_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); let mut blocks_to_process = stacks_blocks.to_vec(); blocks_to_process.shuffle(&mut thread_rng()); @@ -409,8 +330,10 @@ fn replay_reward_cycle( &sortdb, &mut sort_handle, &mut node.chainstate, + &nakamoto_tip, &block, None, + NakamotoBlockObtainMethod::Pushed, ) .unwrap_or(false); if accepted { @@ -714,13 +637,14 @@ fn pox_treatment() { signer_private_key: signing_key.clone(), stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), amount: u64::MAX as u128 - 10000, - pox_address: Some(PoxAddress::Standard( + pox_addr: Some(PoxAddress::Standard( StacksAddress::new( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, Hash160::from_data(&index.to_be_bytes()), ), Some(AddressHashMode::SerializeP2PKH), )), + max_amount: None, }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); @@ -756,7 +680,7 @@ fn pox_treatment() { let mut expected_reward_set = vec![]; for stacker in test_stackers.iter() { - let pox_addr = stacker.pox_address.as_ref().unwrap(); + let pox_addr = stacker.pox_addr.as_ref().unwrap(); (0..3).for_each(|_| expected_reward_set.push(pox_addr.clone())); } expected_reward_set.sort_by_key(|addr| addr.to_burnchain_repr()); @@ -945,8 +869,7 @@ fn pox_treatment() { /// There are 11 epoch2 blocks and 2 nakamto tenure with 10 nakamoto blocks each /// Tests: /// * get_header_by_coinbase_height -/// * get_parent_vrf_proof -/// * get_highest_nakamoto_tenure +/// * get_ongoing_tenure /// * check_first_nakamoto_tenure /// * check_valid_consensus_hash /// * check_nakamoto_tenure @@ -981,11 +904,12 @@ fn test_nakamoto_chainstate_getters() { let sort_handle = sort_db.index_handle(&sort_tip.sortition_id); // no tenures yet - assert!( - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_handle,) - .unwrap() - .is_none() - ); + assert!(NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap() + ) + .unwrap() + .is_none()); // sortition-existence-check works assert_eq!( @@ -1114,13 +1038,14 @@ fn test_nakamoto_chainstate_getters() { let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); // we now have a tenure, and it confirms the last epoch2 block - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_tx) - .unwrap() - .unwrap(); + let highest_tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_tx.get_nakamoto_tip_block_id().unwrap().unwrap(), + ) + .unwrap() + .unwrap(); assert_eq!(highest_tenure.coinbase_height, 12); assert_eq!(highest_tenure.num_blocks_confirmed, 1); - assert_eq!(highest_tenure.tenure_index, 1); assert_eq!(highest_tenure.tenure_id_consensus_hash, consensus_hash); assert_eq!(highest_tenure.burn_view_consensus_hash, consensus_hash); @@ -1142,8 +1067,7 @@ fn test_nakamoto_chainstate_getters() { .unwrap() .is_some()); assert!(NakamotoChainState::check_tenure_continuity( - chainstate.db(), - &sort_tx, + &mut chainstate.index_conn(), &blocks[0].header.consensus_hash, &blocks[1].header, ) @@ -1170,7 +1094,7 @@ fn test_nakamoto_chainstate_getters() { // this should return the previous tenure assert_eq!( NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), + &mut chainstate.index_conn(), &mut sort_tx, &blocks[0].header, &tenure_change_payload, @@ -1203,7 +1127,7 @@ fn test_nakamoto_chainstate_getters() { // check works (this would be the first tenure) assert_eq!( NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), + &mut chainstate.index_conn(), &mut sort_tx, &blocks[0].header, &tenure_change_payload, @@ -1227,7 +1151,6 @@ fn test_nakamoto_chainstate_getters() { chainstate.db(), &blocks[0].header, 12, - 1, &tenure_change_payload, ) .unwrap(); @@ -1260,16 +1183,6 @@ fn test_nakamoto_chainstate_getters() { .miner .make_nakamoto_coinbase(None, next_vrf_proof.clone()); - // parent VRF proof check - let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( - &peer.stacks_node.as_ref().unwrap().chainstate.db(), - peer.sortdb.as_ref().unwrap().conn(), - &next_consensus_hash, - &txid, - ) - .unwrap(); - assert_eq!(parent_vrf_proof, vrf_proof); - // make the second tenure's blocks let blocks_and_sizes = peer.make_nakamoto_tenure( next_tenure_change_tx.clone(), @@ -1314,13 +1227,14 @@ fn test_nakamoto_chainstate_getters() { let mut sort_tx = sort_db.tx_handle_begin(&sort_tip.sortition_id).unwrap(); // we now have a new highest tenure - let highest_tenure = - NakamotoChainState::get_highest_nakamoto_tenure(chainstate.db(), &sort_tx) - .unwrap() - .unwrap(); + let highest_tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_tx.get_nakamoto_tip_block_id().unwrap().unwrap(), + ) + .unwrap() + .unwrap(); assert_eq!(highest_tenure.coinbase_height, 13); assert_eq!(highest_tenure.num_blocks_confirmed, 10); - assert_eq!(highest_tenure.tenure_index, 2); assert_eq!(highest_tenure.tenure_id_consensus_hash, next_consensus_hash); assert_eq!(highest_tenure.prev_tenure_id_consensus_hash, consensus_hash); assert_eq!(highest_tenure.burn_view_consensus_hash, next_consensus_hash); @@ -1339,15 +1253,13 @@ fn test_nakamoto_chainstate_getters() { .unwrap() .is_none()); assert!(NakamotoChainState::check_tenure_continuity( - chainstate.db(), - &sort_tx, + &mut chainstate.index_conn(), &new_blocks[0].header.consensus_hash, &new_blocks[1].header, ) .unwrap()); assert!(!NakamotoChainState::check_tenure_continuity( - chainstate.db(), - &sort_tx, + &mut chainstate.index_conn(), &blocks[0].header.consensus_hash, &new_blocks[1].header, ) @@ -1410,7 +1322,7 @@ fn test_nakamoto_chainstate_getters() { assert_eq!( NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), + &mut chainstate.index_conn(), &mut sort_tx, &new_blocks[0].header, &tenure_change_payload, @@ -1424,7 +1336,7 @@ fn test_nakamoto_chainstate_getters() { // checks on older confired tenures return the prev tenure assert_eq!( NakamotoChainState::check_nakamoto_tenure( - chainstate.db(), + &mut chainstate.index_conn(), &mut sort_tx, &blocks[0].header, &old_tenure_change_payload, @@ -1448,7 +1360,6 @@ fn test_nakamoto_chainstate_getters() { chainstate.db(), &new_blocks[0].header, 13, - 2, &tenure_change_payload, ) .unwrap(); @@ -1514,33 +1425,6 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a let num_blocks: usize = (thread_rng().gen::() % 10) + 1; let block_height = peer.get_burn_block_height(); - // If we are in the prepare phase, check if we need to generate - // aggregate key votes - let txs = if peer.config.burnchain.is_in_prepare_phase(block_height) { - let cycle_id = peer - .config - .burnchain - .block_height_to_reward_cycle(block_height) - .unwrap(); - let next_cycle_id = cycle_id as u128 + 1; - - with_sortdb(&mut peer, |chainstate, sortdb| { - if let Some(tip) = all_blocks.last() { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.block_id(), - &mut test_signers, - &test_stackers, - next_cycle_id, - ) - } else { - vec![] - } - }) - } else { - vec![] - }; // do a stx transfer in each block to a given recipient let recipient_addr = @@ -1550,13 +1434,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a coinbase_tx, &mut test_signers, |miner, chainstate, sortdb, blocks_so_far| { - // Include the aggregate key voting transactions in the first block. - let mut txs = if blocks_so_far.is_empty() { - txs.clone() - } else { - vec![] - }; - + let mut txs = vec![]; if blocks_so_far.len() < num_blocks { debug!("\n\nProduce block {}\n\n", all_blocks.len()); @@ -1737,18 +1615,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a assert_eq!(matured_reward.parent_miner.coinbase, 1000_000_000); } - if i == 8 { - // epoch2 - assert_eq!( - matured_reward.parent_miner.tx_fees, - MinerPaymentTxFees::Epoch2 { - // The signers voting transaction is paying a fee of 1 uSTX - // currently, but this may change to pay 0. - anchored: 1, - streamed: 0, - } - ); - } else if i < 11 { + if i < 11 { // epoch2 assert_eq!( matured_reward.parent_miner.tx_fees, @@ -1782,18 +1649,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a } else { assert_eq!(miner_reward.coinbase, 1000_000_000); } - if i == 7 { - // epoch2 - assert_eq!( - miner_reward.tx_fees, - MinerPaymentTxFees::Epoch2 { - // The signers voting transaction is paying a fee of 1 uSTX - // currently, but this may change to pay 0. - anchored: 1, - streamed: 0, - } - ); - } else if i < 10 { + if i < 10 { // epoch2 assert_eq!( miner_reward.tx_fees, @@ -1958,9 +1814,13 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( - chainstate.db(), - &sort_db.index_handle_at_tip(), + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), ) .unwrap() .unwrap(); @@ -1974,7 +1834,6 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> assert!(tip.consensus_hash == sort_tip.consensus_hash); assert_eq!(highest_tenure.coinbase_height, 12); assert_eq!(highest_tenure.cause, TenureChangeCause::BlockFound); - assert_eq!(highest_tenure.tenure_index, 1); assert_eq!(highest_tenure.num_blocks_confirmed, 1); // extend first tenure @@ -2052,9 +1911,13 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( - chainstate.db(), - &sort_db.index_handle_at_tip(), + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), ) .unwrap() .unwrap(); @@ -2068,7 +1931,6 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> assert!(tip.consensus_hash != sort_tip.consensus_hash); assert_eq!(highest_tenure.coinbase_height, 12); assert_eq!(highest_tenure.cause, TenureChangeCause::Extended); - assert_eq!(highest_tenure.tenure_index, 2); assert_eq!(highest_tenure.num_blocks_confirmed, 10); // second tenure @@ -2149,9 +2011,13 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( - chainstate.db(), - &sort_db.index_handle_at_tip(), + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), ) .unwrap() .unwrap(); @@ -2165,7 +2031,6 @@ pub fn simple_nakamoto_coordinator_2_tenures_3_sortitions<'a>() -> TestPeer<'a> assert!(tip.consensus_hash == sort_tip.consensus_hash); assert_eq!(highest_tenure.coinbase_height, 13); assert_eq!(highest_tenure.cause, TenureChangeCause::BlockFound); - assert_eq!(highest_tenure.tenure_index, 3); assert_eq!(highest_tenure.num_blocks_confirmed, 20); // replay the blocks and sortitions in random order, and verify that we still reach the chain @@ -2212,7 +2077,12 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe &vec![StacksPublicKey::from_private(&private_key)], ) .unwrap(); - let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); + + // make enough signers and signing keys so we can create a block and a malleablized block that + // are both valid + let (mut test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); let mut peer = boot_nakamoto( function_name!(), vec![(addr.into(), 100_000_000)], @@ -2246,33 +2116,6 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe debug!("Next burnchain block: {}", &consensus_hash); let block_height = peer.get_burn_block_height(); - // If we are in the prepare phase, check if we need to generate - // aggregate key votes - let txs = if peer.config.burnchain.is_in_prepare_phase(block_height) { - let cycle_id = peer - .config - .burnchain - .block_height_to_reward_cycle(block_height) - .unwrap(); - let next_cycle_id = cycle_id as u128 + 1; - - with_sortdb(&mut peer, |chainstate, sortdb| { - if let Some(tip) = all_blocks.last() { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.block_id(), - &mut test_signers, - &test_stackers, - next_cycle_id, - ) - } else { - vec![] - } - }) - } else { - vec![] - }; // do a stx transfer in each block to a given recipient let recipient_addr = @@ -2283,12 +2126,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe &mut test_signers, |miner, chainstate, sortdb, blocks_so_far| { if blocks_so_far.len() < 10 { - // Include the aggregate key voting transactions in the first block. - let mut txs = if blocks_so_far.is_empty() { - txs.clone() - } else { - vec![] - }; + let mut txs = vec![]; debug!("\n\nProduce block {}\n\n", blocks_so_far.len()); @@ -2352,9 +2190,13 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = NakamotoChainState::get_ongoing_nakamoto_tenure( - chainstate.db(), - &sort_db.index_handle_at_tip(), + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), ) .unwrap() .unwrap(); @@ -2373,7 +2215,6 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe assert!(last_block.header.consensus_hash == sort_tip.consensus_hash); assert_eq!(highest_tenure.coinbase_height, 12 + i); assert_eq!(highest_tenure.cause, TenureChangeCause::Extended); - assert_eq!(highest_tenure.tenure_index, 10 * (i + 1)); assert_eq!( highest_tenure.num_blocks_confirmed, (blocks.len() as u32) - 1 @@ -2403,9 +2244,6 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe rc_blocks.push(all_blocks.clone()); rc_burn_ops.push(all_burn_ops.clone()); - all_burn_ops.clear(); - all_blocks.clear(); - // in nakamoto, tx fees are rewarded by the next tenure, so the // scheduled rewards come 1 tenure after the coinbase reward matures let miner = p2pkh_from(&stx_miner_key); @@ -2520,6 +2358,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe ); peer.check_nakamoto_migration(); + peer.check_malleablized_blocks(all_blocks, 2); return peer; } From d7e14b88ef49d086ec961127caf9a5e752d8c864 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:03:00 -0400 Subject: [PATCH 0380/1400] chore: API sync --- stackslib/src/chainstate/nakamoto/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 74cc9a2d72..d7412f808e 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -319,7 +319,7 @@ impl NakamotoBlockBuilder { let parent_block_id = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); let parent_coinbase_height = - NakamotoChainState::get_coinbase_height(chainstate.db(), &parent_block_id) + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &parent_block_id) .ok() .flatten() .unwrap_or(0); From e5642e5f2b7bfed6641e71f8f689449d44b9b383 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:03:22 -0400 Subject: [PATCH 0381/1400] feat: MARF the Nakamoto chain state. Any query that is not indexed specifically by a block ID must be carried out relative to a designated chain tip. Adds support for MARF'ing all the data for all affected methods, and rewrites all affected methods to use it. --- stackslib/src/chainstate/nakamoto/mod.rs | 1116 ++++++++++++++++++---- 1 file changed, 906 insertions(+), 210 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index bfa92f3198..f281e71795 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -45,7 +45,9 @@ use stacks_common::types::chainstate::{ StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; use stacks_common::types::{PrivateKey, StacksEpochId}; -use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{ + hex_bytes, to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum, +}; use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; @@ -66,7 +68,7 @@ use super::stacks::boot::{ use super::stacks::db::accounts::MinerReward; use super::stacks::db::{ ChainstateTx, ClarityTx, MinerPaymentSchedule, MinerPaymentTxFees, MinerRewardInfo, - StacksBlockHeaderTypes, StacksDBTx, StacksEpochReceipt, StacksHeaderInfo, + StacksBlockHeaderTypes, StacksEpochReceipt, StacksHeaderInfo, }; use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ @@ -80,11 +82,17 @@ use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error, OnChainRewardSetProvider}; use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; -use crate::chainstate::nakamoto::tenure::{NAKAMOTO_TENURES_SCHEMA_1, NAKAMOTO_TENURES_SCHEMA_2}; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; +use crate::chainstate::nakamoto::tenure::{ + NakamotoTenureId, NAKAMOTO_TENURES_SCHEMA_1, NAKAMOTO_TENURES_SCHEMA_2, + NAKAMOTO_TENURES_SCHEMA_3, +}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{POX_4_NAME, SIGNERS_UPDATE_STATE}; use crate::chainstate::stacks::db::blocks::DummyEventDispatcher; -use crate::chainstate::stacks::db::{DBConfig as ChainstateConfig, StacksChainState}; +use crate::chainstate::stacks::db::{ + DBConfig as ChainstateConfig, StacksChainState, StacksDBConn, StacksDBTx, +}; use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::{ TenureChangeCause, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, @@ -239,6 +247,462 @@ lazy_static! { ADD COLUMN burn_view TEXT; "#.into(), ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_3: Vec = vec![ + NAKAMOTO_TENURES_SCHEMA_3.into(), + r#" + UPDATE db_config SET version = "6"; + "#.into(), + // Add a `height_in_tenure` field to the block header row, so we know how high this block is + // within its tenure. This is needed to process malleablized Nakamoto blocks with the same + // height, as well as accidental forks that can arise from slow miners. + // + // + // + // No default value is needed because at the time of this writing, this table is actually empty. + r#" + ALTER TABLE nakamoto_block_headers + ADD COLUMN height_in_tenure; + "#.into(), + ]; +} + +/// MARF key/value pairs +pub mod nakamoto_keys { + use super::{hex_bytes, to_hex, ConsensusHash, NakamotoTenureId, StacksBlockId}; + + pub fn ongoing_tenure_id() -> &'static str { + "nakamoto::tenures::ongoing_tenure_id" + } + + pub fn ongoing_tenure_coinbase_height(coinbase_height: u64) -> String { + format!( + "nakamoto::tenures::ongoing_tenure_coinbase_height::{}", + coinbase_height + ) + } + + pub fn block_found_tenure_id(tenure_id_consensus_hash: &ConsensusHash) -> String { + format!( + "nakamoto::tenures::block_found_tenure_id::{}", + tenure_id_consensus_hash + ) + } + + pub fn highest_block_in_tenure(tenure_id_consensus_hash: &ConsensusHash) -> String { + format!( + "nakamoto::tenures::highest_block_in_tenure::{}", + tenure_id_consensus_hash + ) + } + + pub fn coinbase_height(ch: &ConsensusHash) -> String { + format!("nakamoto::headers::coinbase_height::{}", ch) + } + + pub fn tenure_start_block_id(ch: &ConsensusHash) -> String { + format!("nakamoto::headers::tenure_start_block_id::{}", ch) + } + + pub fn finished_tenure_consensus_hash(ch: &ConsensusHash) -> String { + format!("nakamoto::tenures::finished_tenure_consensus_hash::{}", ch) + } + + pub fn parent_tenure_consensus_hash(ch: &ConsensusHash) -> String { + format!("nakamoto::tenures::parent_tenure_consensus_hash::{}", ch) + } + + pub fn make_block_id_value(id: &StacksBlockId) -> String { + format!("{}", id) + } + + pub fn make_consensus_hash_value(ch: &ConsensusHash) -> String { + format!("{}", ch) + } + + pub fn make_u64_value(value: u64) -> String { + to_hex(&value.to_be_bytes()) + } + + pub fn make_bool_value(value: bool) -> String { + to_hex(&[if value { 1 } else { 0 }]) + } + + pub fn make_tenure_id_value(value: &NakamotoTenureId) -> String { + format!("{}{}", &value.burn_view_consensus_hash, &value.block_id) + } + + pub fn parse_consensus_hash(value: &str) -> Option { + ConsensusHash::from_hex(value).ok() + } + + pub fn parse_block_id(value: &str) -> Option { + StacksBlockId::from_hex(value).ok() + } + + pub fn parse_u64(value: &str) -> Option { + let bytes = hex_bytes(value).ok()?; + if bytes.len() != 8 { + return None; + } + let mut bytes_u64 = [0u8; 8]; + bytes_u64[0..8].copy_from_slice(&bytes[0..8]); + Some(u64::from_be_bytes(bytes_u64)) + } + + pub fn parse_bool(value: &str) -> Option { + let bytes = hex_bytes(value).ok()?; + if bytes.len() != 1 { + return None; + } + Some(bytes[0] != 0) + } + + pub fn parse_tenure_id_value(value: &str) -> Option { + let bytes = hex_bytes(value).ok()?; + if bytes.len() != 52 { + // ConsensusHash is 20 bytes + // StacksBlockId is 32 bytes + return None; + } + let mut ch_bytes = [0u8; 20]; + let mut block_id_bytes = [0u8; 32]; + ch_bytes[0..20].copy_from_slice(&bytes[0..20]); + block_id_bytes[0..32].copy_from_slice(&bytes[20..52]); + + let id = NakamotoTenureId { + burn_view_consensus_hash: ConsensusHash(ch_bytes), + block_id: StacksBlockId(block_id_bytes), + }; + Some(id) + } +} + +/// Trait for common MARF getters between StacksDBConn and StacksDBTx +pub trait StacksHandle { + /// Get the Nakamoto tenure-start block ID for a given coinbase height. + fn get_nakamoto_block_id_at_coinbase_height( + &mut self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError>; + + /// Get the tenure start block ID for a given tenure. + fn get_tenure_start_block_id( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError>; + + /// Get the coinbase height of a given tenure + fn get_coinbase_height( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError>; + + /// Get the pointer to this block's ongoing tenure -- be it a BlockFound or Extension tenure + fn get_ongoing_tenure_id( + &mut self, + tip: &StacksBlockId, + ) -> Result, DBError>; + + /// Get the highest block in a given tenure + fn get_highest_block_id_in_tenure( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError>; + + /// Get the pointer to the given consensus hash's block-found tenure ID in this Stacks history + fn get_block_found_tenure_id( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError>; + + /// Is a tenure completely processed? + /// Returns Some(true) if there exists both a tenure-start block ID _and_ a tenure-finished key + /// for the given tenure ID consensus hash + /// Returns Some(false) if there exists a tenure-start block ID for this tenure ID consensus + /// hash, but no tenure-finished key. + /// Returns None if there is no tenure-start block ID for this tenure ID consensus hash (means + /// that there's no such tenure). + fn is_tenure_finished( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError>; + + /// Get the parent tenure ID of a given tenure + fn get_parent_tenure_consensus_hash( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError>; + + /// Get the inner sqlite connection + fn sqlite(&self) -> &Connection; +} + +impl StacksHandle for StacksDBConn<'_> { + fn get_nakamoto_block_id_at_coinbase_height( + &mut self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + fn get_tenure_start_block_id( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + fn get_coinbase_height( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::coinbase_height(tenure_id_consensus_hash), + )? + .map(|height_str| nakamoto_keys::parse_u64(&height_str)) + .flatten()) + } + + fn get_ongoing_tenure_id( + &mut self, + tip: &StacksBlockId, + ) -> Result, DBError> { + Ok(self + .get_indexed(tip, nakamoto_keys::ongoing_tenure_id())? + .map(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str)) + .flatten()) + } + + fn get_highest_block_id_in_tenure( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::highest_block_in_tenure(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + fn get_block_found_tenure_id( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::block_found_tenure_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str)) + .flatten()) + } + + fn is_tenure_finished( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + if self + .get_tenure_start_block_id(tip, tenure_id_consensus_hash)? + .is_none() + { + // tenure not started + return Ok(None); + } + if self + .get_indexed( + tip, + &nakamoto_keys::finished_tenure_consensus_hash(tenure_id_consensus_hash), + )? + .is_none() + { + // tenure has started, but is not done yet + return Ok(Some(false)); + } + + // tenure started and finished + Ok(Some(true)) + } + + fn get_parent_tenure_consensus_hash( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::parent_tenure_consensus_hash(tenure_id_consensus_hash), + )? + .map(|ch_str| nakamoto_keys::parse_consensus_hash(&ch_str)) + .flatten()) + } + + fn sqlite(&self) -> &Connection { + self.conn() + } +} + +impl StacksHandle for StacksDBTx<'_> { + fn get_nakamoto_block_id_at_coinbase_height( + &mut self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + fn get_tenure_start_block_id( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + fn get_coinbase_height( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::coinbase_height(tenure_id_consensus_hash), + )? + .map(|height_str| nakamoto_keys::parse_u64(&height_str)) + .flatten()) + } + + fn get_ongoing_tenure_id( + &mut self, + tip: &StacksBlockId, + ) -> Result, DBError> { + Ok(self + .get_indexed(tip, nakamoto_keys::ongoing_tenure_id())? + .map(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str)) + .flatten()) + } + + fn get_highest_block_id_in_tenure( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::highest_block_in_tenure(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten()) + } + + fn get_block_found_tenure_id( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::block_found_tenure_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_tenure_id_value(&id_str)) + .flatten()) + } + + fn is_tenure_finished( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + if self + .get_tenure_start_block_id(tip, tenure_id_consensus_hash)? + .is_none() + { + // tenure not started + return Ok(None); + } + if self + .get_indexed( + tip, + &nakamoto_keys::finished_tenure_consensus_hash(tenure_id_consensus_hash), + )? + .is_none() + { + // tenure has started, but is not done yet + return Ok(Some(false)); + } + + // tenure started and finished + Ok(Some(true)) + } + + fn get_parent_tenure_consensus_hash( + &mut self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::parent_tenure_consensus_hash(tenure_id_consensus_hash), + )? + .map(|ch_str| nakamoto_keys::parse_consensus_hash(&ch_str)) + .flatten()) + } + + fn sqlite(&self) -> &Connection { + self.tx().deref() + } +} + +impl<'a> ChainstateTx<'a> { + pub fn as_tx(&mut self) -> &mut StacksDBTx<'a> { + &mut self.tx + } } /// Matured miner reward schedules @@ -508,8 +972,12 @@ impl NakamotoBlockHeader { } pub fn block_hash(&self) -> BlockHeaderHash { - BlockHeaderHash::from_serializer(self) - .expect("BUG: failed to serialize block header hash struct") + // same as sighash -- we don't commit to signatures + BlockHeaderHash( + self.signer_signature_hash_inner() + .expect("BUG: failed to serialize block header hash struct") + .0, + ) } pub fn block_id(&self) -> StacksBlockId { @@ -538,7 +1006,10 @@ impl NakamotoBlockHeader { /// - At least the minimum number of signatures (based on total signer weight /// and a 70% threshold) /// - Order of signatures is maintained vs signer set - pub fn verify_signer_signatures(&self, reward_set: &RewardSet) -> Result<(), ChainstateError> { + /// + /// Returns the signing weight on success. + /// Returns ChainstateError::InvalidStacksBlock on error + pub fn verify_signer_signatures(&self, reward_set: &RewardSet) -> Result { let message = self.signer_signature_hash(); let Some(signers) = &reward_set.signers else { return Err(ChainstateError::InvalidStacksBlock( @@ -600,12 +1071,12 @@ impl NakamotoBlockHeader { if total_weight_signed < threshold { return Err(ChainstateError::InvalidStacksBlock(format!( - "Not enough signatures. Needed at least {} but got {}", - threshold, total_weight_signed + "Not enough signatures. Needed at least {} but got {} (out of {})", + threshold, total_weight_signed, total_weight, ))); } - return Ok(()); + return Ok(total_weight_signed); } /// Compute the threshold for the minimum number of signers (by weight) required @@ -1025,16 +1496,18 @@ impl NakamotoBlock { /// Verify that the VRF seed of this block's block-commit is the hash of the parent tenure's /// VRF seed. - pub fn validate_vrf_seed( + pub fn validate_vrf_seed( &self, sortdb_conn: &Connection, - chainstate_conn: &Connection, + chainstate_conn: &mut STH, block_commit: &LeaderBlockCommitOp, ) -> Result<(), ChainstateError> { // the block-commit from the miner who created this coinbase must have a VRF seed that // is the hash of the parent tenure's VRF proof. + // Do the query relative to the parent block ID, since this block may not be processed yet. let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( chainstate_conn, + &self.header.parent_block_id, sortdb_conn, &self.header.consensus_hash, &block_commit.txid, @@ -1411,9 +1884,8 @@ impl NakamotoChainState { dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); - let sortition_handle = sort_db.index_handle(canonical_sortition_tip); - let Some((next_ready_block, block_size)) = nakamoto_blocks_db - .next_ready_nakamoto_block(stacks_chain_state.db(), &sortition_handle)? + let Some((next_ready_block, block_size)) = + nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db())? else { // no more blocks test_debug!("No more Nakamoto blocks to process"); @@ -1884,56 +2356,67 @@ impl NakamotoChainState { Ok(()) } - /// Insert a Nakamoto block into the staging blocks DB - pub(crate) fn store_block( + /// Insert a Nakamoto block into the staging blocks DB. + /// We only store a block in the following cases: + /// + /// * No block with this block's sighash exists in the DB + /// * A block with this block's sighash exists, AND + /// * this block represents more signing power + /// + /// If neither of the above is true, then this is a no-op. + pub(crate) fn store_block_if_better( staging_db_tx: &NakamotoStagingBlocksTx, block: &NakamotoBlock, burn_attachable: bool, - ) -> Result<(), ChainstateError> { + signing_weight: u32, + obtain_method: NakamotoBlockObtainMethod, + ) -> Result { let block_id = block.block_id(); - let Ok(tenure_start) = block.is_wellformed_tenure_start_block() else { - return Err(ChainstateError::InvalidStacksBlock( - "Tried to store a tenure-start block that is not well-formed".into(), - )); - }; + let sighash = block.header.signer_signature_hash(); - staging_db_tx.execute( - "INSERT INTO nakamoto_staging_blocks ( - block_hash, - consensus_hash, - parent_block_id, - is_tenure_start, - burn_attachable, - orphaned, - processed, - - height, - index_block_hash, - download_time, - arrival_time, - processed_time, - data - ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", - params![ - &block.header.block_hash(), - &block.header.consensus_hash, - &block.header.parent_block_id, - &tenure_start, - if burn_attachable { 1 } else { 0 }, - 0, - 0, - u64_to_sql(block.header.chain_length)?, - &block_id, - 0, - 0, - 0, - block.serialize_to_vec(), - ], - )?; - if burn_attachable { - staging_db_tx.set_burn_block_processed(&block.header.consensus_hash)?; + // case 1 -- no block with this sighash exists. + if staging_db_tx.try_store_block_with_new_sighash( + block, + burn_attachable, + signing_weight, + obtain_method, + )? { + debug!("Stored block with new sighash"; + "block_id" => %block_id, + "sighash" => %sighash); + return Ok(true); } - Ok(()) + + // case 2 -- the block exists. Consider replacing it, but only if its + // signing weight is higher. + let (existing_block_id, _processed, orphaned, existing_signing_weight) = staging_db_tx.conn().get_block_processed_and_signed_weight(&block.header.consensus_hash, &sighash)? + .ok_or_else(|| { + // this should be unreachable -- there's no record of this block + error!("Could not store block {} ({}) with sighash {} -- no record of its processed status or signing weight!", &block_id, &block.header.consensus_hash, &sighash); + ChainstateError::NoSuchBlockError + })?; + + if orphaned { + // nothing to do + debug!("Will not store alternative copy of block {} ({}) with sighash {}, since a block with the same sighash was orphaned", &block_id, &block.header.consensus_hash, &sighash); + return Ok(false); + } + + let ret = if existing_signing_weight < signing_weight { + staging_db_tx.replace_block(block, signing_weight, obtain_method)?; + debug!("Replaced block"; + "existing_block_id" => %existing_block_id, + "block_id" => %block_id, + "sighash" => %sighash, + "existing_signing_weight" => existing_signing_weight, + "signing_weight" => signing_weight); + true + } else { + debug!("Will not store alternative copy of block {} ({}) with sighash {}, since it has less signing power", &block_id, &block.header.consensus_hash, &sighash); + false + }; + + return Ok(ret); } /// Accept a Nakamoto block into the staging blocks DB. @@ -1951,6 +2434,7 @@ impl NakamotoChainState { staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, reward_set: RewardSet, + obtain_method: NakamotoBlockObtainMethod, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block @@ -1997,22 +2481,35 @@ impl NakamotoChainState { return Ok(false); }; - if let Err(e) = block.header.verify_signer_signatures(&reward_set) { - warn!("Received block, but the signer signatures are invalid"; - "block_id" => %block.block_id(), - "error" => ?e, - ); - return Err(e); - } + let signing_weight = match block.header.verify_signer_signatures(&reward_set) { + Ok(x) => x, + Err(e) => { + warn!("Received block, but the signer signatures are invalid"; + "block_id" => %block.block_id(), + "error" => ?e, + ); + return Err(e); + } + }; // if we pass all the tests, then along the way, we will have verified (in // Self::validate_nakamoto_block_burnchain) that the consensus hash of this block is on the // same sortition history as `db_handle` (and thus it must be burn_attachable) let burn_attachable = true; - Self::store_block(staging_db_tx, block, burn_attachable)?; - test_debug!("Stored Nakamoto block {}", &block.block_id()); - Ok(true) + let ret = Self::store_block_if_better( + staging_db_tx, + block, + burn_attachable, + signing_weight, + obtain_method, + )?; + if ret { + test_debug!("Stored Nakamoto block {}", &block.block_id()); + } else { + test_debug!("Did NOT store Nakamoto block {}", &block.block_id()); + } + Ok(ret) } /// Return the total ExecutionCost consumed during the tenure up to and including @@ -2052,52 +2549,33 @@ impl NakamotoChainState { tip_index_hash: &StacksBlockId, coinbase_height: u64, ) -> Result, ChainstateError> { - // query for block header info at the tenure-height, then check if in fork - let qry = "SELECT DISTINCT tenure_id_consensus_hash AS consensus_hash FROM nakamoto_tenures WHERE coinbase_height = ?1"; - - let candidate_chs: Vec = - query_rows(tx.tx(), qry, &[u64_to_sql(coinbase_height)?])?; - - if candidate_chs.len() == 0 { - // no nakamoto_tenures at that tenure height, check if there's a stack block header where - // block_height = coinbase_height - let Some(ancestor_at_height) = tx - .get_ancestor_block_hash(coinbase_height, tip_index_hash)? - .map(|ancestor| Self::get_block_header(tx.tx(), &ancestor)) - .transpose()? - .flatten() - else { - warn!("No such epoch2 ancestor"; - "coinbase_height" => coinbase_height, - "tip_index_hash" => %tip_index_hash, - ); - return Ok(None); - }; - // only return if it is an epoch-2 block, because that's - // the only case where block_height can be interpreted as - // tenure height. - if ancestor_at_height.is_epoch_2_block() { - return Ok(Some(ancestor_at_height)); - } else { - return Ok(None); - } + // nakamoto block? + if let Some(block_id) = + tx.get_nakamoto_block_id_at_coinbase_height(tip_index_hash, coinbase_height)? + { + return Self::get_block_header_nakamoto(tx.sqlite(), &block_id); } - for candidate_ch in candidate_chs.into_iter() { - let Some(candidate) = Self::get_block_header_by_consensus_hash(tx, &candidate_ch)? - else { - continue; - }; - let Ok(Some(ancestor_at_height)) = - tx.get_ancestor_block_hash(candidate.stacks_block_height, tip_index_hash) - else { - // if there's an error or no result, this candidate doesn't match, so try next candidate - continue; - }; - if ancestor_at_height == candidate.index_block_hash() { - return Ok(Some(candidate)); - } + // epcoh2 block? + let Some(ancestor_at_height) = tx + .get_ancestor_block_hash(coinbase_height, tip_index_hash)? + .map(|ancestor| Self::get_block_header(tx.tx(), &ancestor)) + .transpose()? + .flatten() + else { + warn!("No such epoch2 ancestor"; + "coinbase_height" => coinbase_height, + "tip_index_hash" => %tip_index_hash, + ); + return Ok(None); + }; + // only return if it is an epoch-2 block, because that's + // the only case where block_height can be interpreted as + // tenure height. + if ancestor_at_height.is_epoch_2_block() { + return Ok(Some(ancestor_at_height)); } + Ok(None) } @@ -2168,6 +2646,20 @@ impl NakamotoChainState { Ok(result.is_some()) } + /// Does an epoch2 block header exist? + pub fn has_block_header_epoch2( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result { + let sql = "SELECT 1 FROM block_headers WHERE index_block_hash = ?1"; + let result: Option = + query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + "FATAL: multiple rows for the same block hash".to_string() + })?; + + Ok(result.is_some()) + } + /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_block_header( @@ -2183,55 +2675,108 @@ impl NakamotoChainState { } /// Get the tenure-start block header of a given consensus hash. - /// It might be an epoch 2.x block header - pub fn get_block_header_by_consensus_hash( - chainstate_conn: &Connection, + /// For Nakamoto blocks, this is the first block in the tenure identified by the consensus + /// hash. + /// For epoch2 blocks, this is simply the block whose winning sortition happened in the + /// sortition identified by the consensus hash. + /// + /// `tip_block_id` is the chain tip from which to perform the query. + pub fn get_tenure_start_block_header( + chainstate_conn: &mut STH, + tip_block_id: &StacksBlockId, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { - let nakamoto_header_info = - Self::get_nakamoto_tenure_start_block_header(chainstate_conn, consensus_hash)?; - if nakamoto_header_info.is_some() { - return Ok(nakamoto_header_info); + // nakamoto? + if let Some(hdr) = Self::get_nakamoto_tenure_start_block_header( + chainstate_conn, + tip_block_id, + consensus_hash, + )? { + return Ok(Some(hdr)); } - // parent might be epoch 2 + // epoch2? let epoch2_header_info = StacksChainState::get_stacks_block_header_info_by_consensus_hash( - chainstate_conn, + chainstate_conn.sqlite(), consensus_hash, )?; Ok(epoch2_header_info) } + /// Get the first block header in a Nakamoto tenure + pub fn get_nakamoto_tenure_start_block_header( + chainstate_conn: &mut STH, + tip_block_id: &StacksBlockId, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let Some(block_id) = + chainstate_conn.get_tenure_start_block_id(tip_block_id, consensus_hash)? + else { + return Ok(None); + }; + Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) + } + + /// Get the highest block in the given tenure + /// TODO: unit test + pub fn get_highest_block_header_in_tenure( + chainstate_conn: &mut STH, + tip_block_id: &StacksBlockId, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let Some(block_id) = + chainstate_conn.get_highest_block_id_in_tenure(tip_block_id, consensus_hash)? + else { + return Ok(None); + }; + Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) + } + /// Get the VRF proof for a Stacks block. - /// This works for either Nakamoto or epoch 2.x - pub fn get_block_vrf_proof( - chainstate_conn: &Connection, + /// For Nakamoto blocks, this is the VRF proof contained in the coinbase of the tenure-start + /// block of the given tenure identified by the consensus hash. + /// + /// For epoch 2.x blocks, this is the VRF proof in the block header itself, whose sortition is + /// identified by the consensus hash. + pub fn get_block_vrf_proof( + chainstate_conn: &mut STH, + tip_block_id: &StacksBlockId, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { - let Some(start_header) = NakamotoChainState::get_block_header_by_consensus_hash( + let Some(start_header) = NakamotoChainState::get_tenure_start_block_header( chainstate_conn, + tip_block_id, consensus_hash, )? else { + warn!("No tenure-start block"; + "consensus_hash" => %consensus_hash, + "tip_block_id" => %tip_block_id); return Ok(None); }; let vrf_proof = match start_header.anchored_header { StacksBlockHeaderTypes::Epoch2(epoch2_header) => Some(epoch2_header.proof), StacksBlockHeaderTypes::Nakamoto(..) => { - NakamotoChainState::get_nakamoto_tenure_vrf_proof(chainstate_conn, consensus_hash)? + NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate_conn.sqlite(), + &start_header.index_block_hash(), + )? } }; Ok(vrf_proof) } - /// Get the VRF proof of the parent tenure (either Nakamoto or epoch 2.x) of the block + /// Get the VRF proof of the parent tenure (either Nakamoto or epoch 2.x) of the tenure /// identified by the given consensus hash. - /// The parent must already have been processed. + /// The parent tenure's tenure-start block must already have been processed. /// - /// `consensus_hash` identifies the child block. - /// `block_commit_txid` identifies the child block's tenure's block-commit tx + /// `tip_block_id` identifies the tip of the chain history to search. It can be the child + /// block's block ID, or any descendant. + /// `consensus_hash` identifies the child block's tenure. + /// `block_commit_txid` identifies the child block's tenure's block-commit tx, which in turn + /// contains the hash of the start-block of the tenure prior to the child's tenure. /// /// Returns the proof of this block's parent tenure on success. /// @@ -2240,8 +2785,9 @@ impl NakamotoChainState { /// /// Returns NoSuchBlockError if the block header for `consensus_hash` does not exist, or if the /// parent block header info does not exist (i.e. the chainstate DB is missing something) - pub fn get_parent_vrf_proof( - chainstate_conn: &Connection, + pub fn get_parent_vrf_proof( + chainstate_conn: &mut STH, + tip_block_id: &StacksBlockId, sortdb_conn: &Connection, consensus_hash: &ConsensusHash, block_commit_txid: &Txid, @@ -2266,10 +2812,11 @@ impl NakamotoChainState { )?; let parent_vrf_proof = - Self::get_block_vrf_proof(chainstate_conn, &parent_sn.consensus_hash)? + Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &parent_sn.consensus_hash)? .ok_or(ChainstateError::NoSuchBlockError) .map_err(|e| { - warn!("Nakamoto block has no parent"; + warn!("Could not find parent VRF proof"; + "tip_block_id" => %tip_block_id, "block consensus_hash" => %consensus_hash); e })?; @@ -2320,21 +2867,25 @@ impl NakamotoChainState { /// Get the VRF proof for a Nakamoto block, if it exists. /// Returns None if the Nakamoto block's VRF proof is not found (e.g. because there is no - /// Nakamoto block) - pub fn get_nakamoto_tenure_vrf_proof( + /// Nakamoto block, or becuase this isn't a tenure-start block) + pub(crate) fn get_nakamoto_tenure_vrf_proof( chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, + tenure_start_block_id: &StacksBlockId, ) -> Result, ChainstateError> { - let sql = "SELECT vrf_proof FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND tenure_changed = 1"; - let args: &[&dyn ToSql] = &[consensus_hash]; + let sql = r#"SELECT IFNULL(vrf_proof,"") FROM nakamoto_block_headers WHERE index_block_hash = ?1"#; + let args: &[&dyn ToSql] = &[tenure_start_block_id]; let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; if let Some(bytes) = proof_bytes { + if bytes.len() == 0 { + // no VRF proof + return Ok(None); + } let proof = VRFProof::from_hex(&bytes) .ok_or(DBError::Corruption) .map_err(|e| { warn!("Failed to load VRF proof: could not decode"; "vrf_proof" => %bytes, - "consensus_hash" => %consensus_hash + "tenure_start_block_id" => %tenure_start_block_id, ); e })?; @@ -2344,9 +2895,39 @@ impl NakamotoChainState { } } - /// Verify that a nakamoto block's block-commit's VRF seed is consistent with the VRF proof - fn check_block_commit_vrf_seed( - chainstate_conn: &Connection, + /// Return the coinbase height of `block` if it was a nakamoto block, or the + /// Stacks block height of `block` if it was an epoch-2 block + /// + /// In Stacks 2.x, the coinbase height and block height are the + /// same. A miner's tenure in Stacks 2.x is entirely encompassed + /// in the single Bitcoin-anchored Stacks block they produce, as + /// well as the microblock stream they append to it. But in Nakamoto, + /// the coinbase height and block height are decoupled. + pub fn get_coinbase_height( + chainstate_conn: &mut STH, + block: &StacksBlockId, + ) -> Result, ChainstateError> { + // nakamoto header? + if let Some(hdr) = Self::get_block_header_nakamoto(chainstate_conn.sqlite(), block)? { + return Ok(chainstate_conn.get_coinbase_height(block, &hdr.consensus_hash)?); + } + + // epoch2 header + let epoch_2_qry = "SELECT block_height FROM block_headers WHERE index_block_hash = ?1"; + let opt_height: Option = chainstate_conn + .sqlite() + .query_row(epoch_2_qry, &[block], |row| row.get(0)) + .optional()?; + opt_height + .map(u64::try_from) + .transpose() + .map_err(|_| ChainstateError::DBError(DBError::ParseError)) + } + + /// Verify that a nakamoto block's block-commit's VRF seed is consistent with the VRF proof. + /// Specifically, it must be the hash of the parent tenure's VRF proof. + pub(crate) fn check_block_commit_vrf_seed( + chainstate_conn: &mut STH, sortdb_conn: &Connection, block: &NakamotoBlock, ) -> Result<(), ChainstateError> { @@ -2367,6 +2948,7 @@ impl NakamotoChainState { e })?; + // N.B. passing block.block_id() here means that we'll look into the parent tenure block.validate_vrf_seed(sortdb_conn, chainstate_conn, &block_commit) } @@ -2382,6 +2964,7 @@ impl NakamotoChainState { block_cost: &ExecutionCost, total_tenure_cost: &ExecutionCost, tenure_changed: bool, + height_in_tenure: u32, tenure_tx_fees: u128, ) -> Result<(), ChainstateError> { assert_eq!(tip_info.stacks_block_height, header.chain_length,); @@ -2440,6 +3023,7 @@ impl NakamotoChainState { if tenure_changed { &1i64 } else { &0i64 }, &vrf_proof_bytes.as_ref(), &header.pox_treatment, + &height_in_tenure, tip_info.burn_view.as_ref().ok_or_else(|| { error!( "Attempted to store nakamoto block header information without burnchain view"; @@ -2453,27 +3037,36 @@ impl NakamotoChainState { chainstate_tx.execute( "INSERT INTO nakamoto_block_headers - (block_height, index_root, consensus_hash, - burn_header_hash, burn_header_height, - burn_header_timestamp, block_size, - - header_type, - version, chain_length, burn_spent, - miner_signature, signer_signature, tx_merkle_root, state_index_root, - timestamp, - - block_hash, - index_block_hash, - cost, - total_tenure_cost, - tenure_tx_fees, - parent_block_id, - tenure_changed, - vrf_proof, - signer_bitvec, - burn_view - ) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25, ?26)", + (block_height, + index_root, + consensus_hash, + burn_header_hash, + burn_header_height, + burn_header_timestamp, + block_size, + + header_type, + version, + chain_length, + burn_spent, + miner_signature, + signer_signature, + tx_merkle_root, + state_index_root, + timestamp, + + block_hash, + index_block_hash, + cost, + total_tenure_cost, + tenure_tx_fees, + parent_block_id, + tenure_changed, + vrf_proof, + signer_bitvec, + height_in_tenure, + burn_view) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25, ?26, ?27)", args )?; @@ -2486,7 +3079,7 @@ impl NakamotoChainState { headers_tx: &mut StacksDBTx, parent_tip: &StacksBlockHeaderTypes, parent_consensus_hash: &ConsensusHash, - new_tip: &NakamotoBlockHeader, + new_block: &NakamotoBlock, new_vrf_proof: Option<&VRFProof>, new_burn_header_hash: &BurnchainHeaderHash, new_burnchain_height: u32, @@ -2502,9 +3095,11 @@ impl NakamotoChainState { burn_delegate_stx_ops: Vec, burn_vote_for_aggregate_key_ops: Vec, new_tenure: bool, + coinbase_height: u64, block_fees: u128, burn_view: &ConsensusHash, ) -> Result { + let new_tip = &new_block.header; if new_tip.parent_block_id != StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { @@ -2535,10 +3130,92 @@ impl NakamotoChainState { let new_block_hash = new_tip.block_hash(); let index_block_hash = new_tip.block_id(); + let mut marf_keys = vec![]; + let mut marf_values = vec![]; + + if new_tenure { + // make the coinbase height point to this tenure-start block + marf_keys.push(nakamoto_keys::ongoing_tenure_coinbase_height( + coinbase_height, + )); + marf_values.push(nakamoto_keys::make_block_id_value(&new_tip.block_id())); + + // point this tenure to its start block + marf_keys.push(nakamoto_keys::tenure_start_block_id( + &new_tip.consensus_hash, + )); + marf_values.push(nakamoto_keys::make_block_id_value(&new_tip.block_id())); + + // record coinbase height of this tenure + marf_keys.push(nakamoto_keys::coinbase_height(&new_tip.consensus_hash)); + marf_values.push(nakamoto_keys::make_u64_value(coinbase_height)); + + // record that this previous tenure is done + let Some(tenure_change_tx) = new_block.get_tenure_change_tx_payload() else { + // should be unreachable + error!( + "Block {} is a tenure-change block, but does not contain a tenure-change tx", + new_tip.block_id() + ); + return Err(ChainstateError::InvalidStacksBlock( + "Tenure-change block does not have a tenure-change tx".into(), + )); + }; + + marf_keys.push(nakamoto_keys::finished_tenure_consensus_hash( + &tenure_change_tx.prev_tenure_consensus_hash, + )); + marf_values.push(nakamoto_keys::make_bool_value(true)); + + // record parent tenure linkage + marf_keys.push(nakamoto_keys::parent_tenure_consensus_hash( + &tenure_change_tx.tenure_consensus_hash, + )); + marf_values.push(nakamoto_keys::make_consensus_hash_value( + &tenure_change_tx.prev_tenure_consensus_hash, + )); + + // record last block-found tenure + let block_found_tenure_id = NakamotoTenureId { + burn_view_consensus_hash: tenure_change_tx.burn_view_consensus_hash.clone(), + block_id: new_tip.block_id(), + }; + + marf_keys.push(nakamoto_keys::block_found_tenure_id( + &tenure_change_tx.tenure_consensus_hash, + )); + marf_values.push(nakamoto_keys::make_tenure_id_value(&block_found_tenure_id)); + } + + if let Some(tenure_tx) = new_block.get_tenure_tx_payload() { + // either a block-found or a tenure-extend, but we have a new tenure ID in this fork + let tenure_id = NakamotoTenureId { + burn_view_consensus_hash: tenure_tx.burn_view_consensus_hash.clone(), + block_id: new_tip.block_id(), + }; + + marf_keys.push(nakamoto_keys::ongoing_tenure_id().to_string()); + marf_values.push(nakamoto_keys::make_tenure_id_value(&tenure_id)); + } + + // record the highest block in this tenure + marf_keys.push(nakamoto_keys::highest_block_in_tenure( + &new_block.header.consensus_hash, + )); + marf_values.push(nakamoto_keys::make_block_id_value(&new_tip.block_id())); + + for (key, value) in marf_keys.iter().zip(marf_values.iter()) { + debug!("Set Nakamoto MARF pair '{}' = '{}'", &key, &value); + } + // store each indexed field test_debug!("Headers index_put_begin {parent_hash}-{index_block_hash}"); - let root_hash = - headers_tx.put_indexed_all(&parent_hash, &index_block_hash, &vec![], &vec![])?; + let root_hash = headers_tx.put_indexed_all( + &parent_hash, + &index_block_hash, + &marf_keys, + &marf_values, + )?; test_debug!("Headers index_indexed_all finished {parent_hash}-{index_block_hash}"); let new_tip_info = StacksHeaderInfo { @@ -2568,6 +3245,24 @@ impl NakamotoChainState { })? }; + let height_in_tenure = if new_tenure { + 1 + } else { + let parent_height_in_tenure = + Self::get_nakamoto_tenure_length(headers_tx.sqlite(), &parent_hash)?; + if parent_height_in_tenure == 0 { + // means that there's no parent -- every tenure stored in the DB has length of at least 1 + warn!("Failed to fetch parent block's tenure height"; + "parent_block_id" => %parent_hash, + "block_id" => %index_block_hash, + ); + return Err(ChainstateError::NoSuchBlockError); + } + parent_height_in_tenure.checked_add(1).ok_or_else(|| { + ChainstateError::InvalidStacksBlock("Tenure height exceeds maximum".into()) + })? + }; + Self::insert_stacks_block_header( headers_tx.deref_mut(), &new_tip_info, @@ -2576,6 +3271,7 @@ impl NakamotoChainState { anchor_block_cost, total_tenure_cost, new_tenure, + height_in_tenure, tenure_fees, )?; if let Some(block_reward) = block_reward { @@ -2730,15 +3426,34 @@ impl NakamotoChainState { None }; - // TODO: only need to do this if this is a tenure-start block let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops, vote_for_agg_key_ops) = - StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( - chainstate_tx, - &parent_index_hash, - sortition_dbconn.sqlite_conn(), - &burn_header_hash, - burn_header_height.into(), - )?; + if new_tenure { + StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( + chainstate_tx, + &parent_index_hash, + sortition_dbconn.sqlite_conn(), + &burn_header_hash, + burn_header_height.into(), + )? + } else { + (vec![], vec![], vec![], vec![]) + }; + + // Nakamoto must load block cost from parent if this block isn't a tenure change. + // If this is a tenure-extend, then the execution cost is reset. + let initial_cost = if new_tenure || tenure_extend { + ExecutionCost::zero() + } else { + let parent_cost_total = + Self::get_total_tenure_cost_at(chainstate_tx.as_tx(), &parent_index_hash)? + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock(format!( + "Failed to load total tenure cost from parent. parent_stacks_block_id = {}", + &parent_index_hash + )) + })?; + parent_cost_total + }; let mut clarity_tx = StacksChainState::chainstate_block_begin( chainstate_tx, @@ -2757,7 +3472,6 @@ impl NakamotoChainState { Self::calculate_matured_miner_rewards( &mut clarity_tx, sortition_dbconn.sqlite_conn(), - // coinbase_height + 1, coinbase_height, matured_rewards_schedule, ) @@ -2765,22 +3479,6 @@ impl NakamotoChainState { .transpose()? .flatten(); - // Nakamoto must load block cost from parent if this block isn't a tenure change. - // If this is a tenure-extend, then the execution cost is reset. - let initial_cost = if new_tenure || tenure_extend { - ExecutionCost::zero() - } else { - let parent_cost_total = - Self::get_total_tenure_cost_at(&chainstate_tx.deref().deref(), &parent_index_hash)? - .ok_or_else(|| { - ChainstateError::InvalidStacksBlock(format!( - "Failed to load total tenure cost from parent. parent_stacks_block_id = {}", - &parent_index_hash - )) - })?; - parent_cost_total - }; - clarity_tx.reset_cost(initial_cost); // is this stacks block the first of a new epoch? @@ -3091,12 +3789,7 @@ impl NakamotoChainState { })?; // this block is mined in the ongoing tenure. if !new_tenure - && !Self::check_tenure_continuity( - chainstate_tx, - burn_dbconn, - &parent_ch, - &block.header, - )? + && !Self::check_tenure_continuity(chainstate_tx.as_tx(), &parent_ch, &block.header)? { // this block is not part of the ongoing tenure; it's invalid return Err(ChainstateError::ExpectedTenureChange); @@ -3114,7 +3807,7 @@ impl NakamotoChainState { let parent_coinbase_height = if block.is_first_mined() { 0 } else { - Self::get_coinbase_height(chainstate_tx.deref(), &parent_block_id)?.ok_or_else( + Self::get_coinbase_height(chainstate_tx.as_tx(), &parent_block_id)?.ok_or_else( || { warn!( "Parent of Nakamoto block is not in block headers DB yet"; @@ -3177,15 +3870,17 @@ impl NakamotoChainState { // (note that we can't check this earlier, since we need the parent tenure to have been // processed) if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { - let parent_tenure_start_header = - Self::get_nakamoto_tenure_start_block_header(chainstate_tx.tx(), &parent_ch)? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: no start-tenure block for parent"; - "parent_consensus_hash" => %parent_ch, - "block_id" => %block.header.block_id()); - - ChainstateError::NoSuchBlockError - })?; + let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header( + chainstate_tx.as_tx(), + &parent_block_id, + &parent_ch, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: no start-tenure block for parent"; + "parent_consensus_hash" => %parent_ch, + "block_id" => %block.header.block_id()); + ChainstateError::NoSuchBlockError + })?; if parent_tenure_start_header.index_block_hash() != tenure_block_commit.last_tenure_id() { @@ -3203,7 +3898,7 @@ impl NakamotoChainState { // only need to do this once per tenure // get the resulting vrf proof bytes let vrf_proof_opt = if new_tenure { - Self::check_block_commit_vrf_seed(chainstate_tx.deref(), burn_dbconn, block)?; + Self::check_block_commit_vrf_seed(chainstate_tx.as_tx(), burn_dbconn, block)?; Some( block .get_vrf_proof() @@ -3406,7 +4101,7 @@ impl NakamotoChainState { &mut chainstate_tx.tx, &parent_chain_tip.anchored_header, &parent_chain_tip.consensus_hash, - &block.header, + &block, vrf_proof_opt, chain_tip_burn_header_hash, chain_tip_burn_header_height, @@ -3422,6 +4117,7 @@ impl NakamotoChainState { burn_delegate_stx_ops, burn_vote_for_aggregate_key_ops, new_tenure, + coinbase_height, block_fees, burnchain_view, ) From b29a93bc26028d2b5733478f2dfab0f205d4d1f0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:04:11 -0400 Subject: [PATCH 0382/1400] chore: debug --- stackslib/src/chainstate/nakamoto/signer_set.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index b6c0aefaa1..9b4d2927b8 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -239,6 +239,7 @@ impl NakamotoSigners { let reward_set = StacksChainState::make_reward_set(threshold, reward_slots, StacksEpochId::Epoch30); + test_debug!("Reward set for cycle {}: {:?}", &reward_cycle, &reward_set); let stackerdb_list = if participation == 0 { vec![] } else { From 17824179a7915630164c1f946511598b98db56d4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:04:52 -0400 Subject: [PATCH 0383/1400] chore: fix nakamoto staging blocks table to record signing weight and obtain method for blocks, and rewrite all methods to use the MARF for queries that are not specific to a block ID --- .../src/chainstate/nakamoto/staging_blocks.rs | 351 ++++++++++++++---- 1 file changed, 282 insertions(+), 69 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 0702a89070..fb51c6d870 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -14,15 +14,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::fs; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; +use std::{fmt, fs}; use lazy_static::lazy_static; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; @@ -33,10 +34,24 @@ use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::{Error as ChainstateError, StacksBlock, StacksBlockHeader}; use crate::stacks_common::codec::StacksMessageCodec; use crate::util_lib::db::{ - query_int, query_row, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, - DBConn, Error as DBError, FromRow, + query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, + tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, }; +/// The means by which a block is obtained. +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum NakamotoBlockObtainMethod { + Downloaded, + Pushed, + Mined, +} + +impl fmt::Display for NakamotoBlockObtainMethod { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ r#" -- Table for staging nakamoto blocks @@ -70,7 +85,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ -- when this block was processed processed_time INT NOT NULL, - -- block data + -- block data, including its header data BLOB NOT NULL, PRIMARY KEY(block_hash,consensus_hash) @@ -80,6 +95,59 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ r#"CREATE INDEX nakamoto_staging_blocks_by_tenure_start_block ON nakamoto_staging_blocks(is_tenure_start,consensus_hash);"#, ]; +pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ + r#" + DROP TABLE nakamoto_staging_blocks; + "#, + r#" + -- Table for staging nakamoto blocks + CREATE TABLE nakamoto_staging_blocks ( + -- SHA512/256 hash of this block (derived value from `data`) + block_hash TEXT NOT NULL, + -- The consensus hash of the burnchain block that selected this block's miner's block-commit. + -- This identifies the tenure to which this block belongs. + consensus_hash TEXT NOT NULL, + -- the parent index_block_hash + parent_block_id TEXT NOT NULL, + -- whether or not this is the first block in its tenure + is_tenure_start BOOL NOT NULL, + + -- has the burnchain block with this block's `consensus_hash` been processed? + burn_attachable INT NOT NULL, + -- has this block been processed? + processed INT NOT NULL, + -- set to 1 if this block can never be attached + orphaned INT NOT NULL, + + -- block height + height INT NOT NULL, + + -- used internally -- this is the StacksBlockId of this block's consensus hash and block hash + -- (derived value from `data`) + index_block_hash TEXT UNIQUE NOT NULL, + -- when this block was processed + processed_time INT NOT NULL, + -- how the block was obtained -- was it pushed? downloaded? uploaded? etc. + -- (encoded as text for forwards-compatibility) + obtain_method TEXT NOT NULL, + -- signing weight of this block + signing_weight INTEGER NOT NULL, + + -- block data, including its header + data BLOB NOT NULL, + + PRIMARY KEY(block_hash,consensus_hash) + );"#, + r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash ON nakamoto_staging_blocks(index_block_hash);"#, + r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash_and_consensus_hash ON nakamoto_staging_blocks(index_block_hash,consensus_hash);"#, + r#"CREATE INDEX nakamoto_staging_blocks_by_tenure_start_block ON nakamoto_staging_blocks(is_tenure_start,consensus_hash);"#, + r#"CREATE INDEX nakamoto_staging_blocks_by_burn_attachable ON nakamoto_staging_blocks(consensus_hash,burn_attachable);"#, + r#"CREATE TABLE db_version ( + version INTEGER NOT NULL + );"#, + r#"INSERT INTO db_version (version) VALUES (2)"#, +]; + pub struct NakamotoStagingBlocksConn(rusqlite::Connection); impl Deref for NakamotoStagingBlocksConn { @@ -160,43 +228,11 @@ impl NakamotoStagingBlocksConn { } impl<'a> NakamotoStagingBlocksConnRef<'a> { - /// Determine if there exists any unprocessed Nakamoto blocks + /// Determine if we have a particular block with the given index hash. /// Returns Ok(true) if so /// Returns Ok(false) if not - pub fn has_any_unprocessed_nakamoto_block(&self) -> Result { - let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE processed = 0 LIMIT 1"; - let res: Option = query_row(self, qry, NO_PARAMS)?; - Ok(res.is_some()) - } - - /// Determine whether or not we have processed at least one Nakamoto block in this sortition history. - /// NOTE: the relevant field queried from `nakamoto_staging_blocks` is updated by a separate - /// tx from block-processing, so it's imperative that the thread that calls this function is - /// the *same* thread as the one that processes blocks. - /// Returns Ok(true) if at least one block in `nakamoto_staging_blocks` has `processed = 1` - /// Returns Ok(false) if not /// Returns Err(..) on DB error - fn has_processed_nakamoto_block( - &self, - sortition_handle: &SH, - ) -> Result { - let Some((ch, bhh, _height)) = sortition_handle.get_nakamoto_tip()? else { - return Ok(false); - }; - - // this block must be a processed Nakamoto block - let ibh = StacksBlockId::new(&ch, &bhh); - let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE processed = 1 AND index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[&ibh]; - let res: Option = query_row(self, qry, args)?; - Ok(res.is_some()) - } - - /// Determine if we have a particular block - /// Returns Ok(true) if so - /// Returns Ok(false) if not - /// Returns Err(..) on DB error - pub fn has_nakamoto_block( + pub fn has_nakamoto_block_with_index_hash( &self, index_block_hash: &StacksBlockId, ) -> Result { @@ -206,26 +242,31 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { Ok(res.is_some()) } - /// Get a staged Nakamoto tenure-start block - pub fn get_nakamoto_tenure_start_block( + /// Get the block ID, processed-status, orphan-status, and signing weight of the non-orphaned + /// block with the given consensus hash and sighash with the most amount of signatures. + /// There will be at most one such block. + /// + /// NOTE: for Nakamoto blocks, the sighash is the same as the block hash. + pub(crate) fn get_block_processed_and_signed_weight( &self, consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let qry = "SELECT data FROM nakamoto_staging_blocks WHERE is_tenure_start = 1 AND consensus_hash = ?1"; - let args: &[&dyn ToSql] = &[consensus_hash]; - let data: Option> = query_row(self, qry, args)?; - let Some(block_bytes) = data else { - return Ok(None); - }; - let block = NakamotoBlock::consensus_deserialize(&mut block_bytes.as_slice())?; - if &block.header.consensus_hash != consensus_hash { - error!( - "Staging DB corruption: expected {}, got {}", - consensus_hash, block.header.consensus_hash - ); - return Err(DBError::Corruption.into()); + sighash: &Sha512Trunc256Sum, + ) -> Result, ChainstateError> { + let sql = "SELECT index_block_hash,processed,orphaned,signing_weight FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2 ORDER BY signing_weight DESC, index_block_hash LIMIT 1"; + let args = rusqlite::params![consensus_hash, sighash]; + + let mut stmt = self.deref().prepare(sql)?; + let mut qry = stmt.query(args)?; + + while let Some(row) = qry.next()? { + let block_id: StacksBlockId = row.get(0)?; + let processed: bool = row.get(1)?; + let orphaned: bool = row.get(2)?; + let signing_weight: u32 = row.get(3)?; + + return Ok(Some((block_id, processed, orphaned, signing_weight))); } - Ok(Some(block)) + Ok(None) } /// Get the rowid of a Nakamoto block @@ -289,10 +330,9 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { /// tx from block-processing, so it's imperative that the thread that calls this function is /// the *same* thread that goes to process blocks. /// Returns (the block, the size of the block) - pub(crate) fn next_ready_nakamoto_block( + pub(crate) fn next_ready_nakamoto_block( &self, header_conn: &Connection, - sortition_handle: &SH, ) -> Result, ChainstateError> { let query = "SELECT child.data FROM nakamoto_staging_blocks child JOIN nakamoto_staging_blocks parent ON child.parent_block_id = parent.index_block_hash @@ -318,18 +358,12 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { // This query can fail if the parent of `child` is not a Nakamoto block, which // is allowed -- a Nakamoto block can descend from an epoch2 block (but since // Nakamoto does not fork without a Bitcoin fork, it'll be the only such child - // within that Bitcoin forok). + // within that Bitcoin fork unless either signers screw up, or there are + // multiple malleablized copies of this first-ever block available). // - // So, if at least one Nakamoto block is processed in this Bitcoin fork, - // then the next ready block's parent *must* be a Nakamoto block. So - // if the below is true, then there are no ready blocks. - if self.has_processed_nakamoto_block(sortition_handle)? { - return Ok(None); - } - - // no nakamoto blocks processed yet, so the parent *must* be an epoch2 block! - // go find it. Note that while this is expensive, it only has to be done - // _once_, and it will only touch at most one reward cycle's worth of blocks. + // Regardless, this query usually returns zero rows. It will return one or + // more rows in the above case for an epoch2 parent, or when there are + // discontiguous Nakamoto blocks available for processing. let sql = "SELECT index_block_hash,parent_block_id FROM nakamoto_staging_blocks WHERE processed = 0 AND orphaned = 0 AND burn_attachable = 1 ORDER BY height ASC"; let mut stmt = self.deref().prepare(sql)?; let mut qry = stmt.query(NO_PARAMS)?; @@ -338,7 +372,9 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { let index_block_hash : StacksBlockId = row.get(0)?; let parent_block_id : StacksBlockId = row.get(1)?; - let Some(_parent_epoch2_block) = NakamotoChainState::get_block_header_epoch2(header_conn, &parent_block_id)? else { + // this naturally will ignore nakamoto blocks whose parent nakamoto blocks + // are not yet known -- they won't be epoch2 blocks either! + if !NakamotoChainState::has_block_header_epoch2(header_conn, &parent_block_id)? { continue; }; @@ -357,6 +393,27 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { } }) } + + /// Given a block ID, determine if it has children that have been processed and accepted + pub fn has_children(&self, index_block_hash: &StacksBlockId) -> Result { + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE parent_block_id = ?1 AND processed = 1 AND orphaned = 0 LIMIT 1"; + let args = rusqlite::params![index_block_hash]; + let children_flags: Option = query_row(self, qry, args)?; + Ok(children_flags.is_some()) + } + + /// Given a consensus hash, determine if the burn block has been processed. + /// Because this is stored in a denormalized way, we'll want to do this whenever we store a + /// block (so we can set `burn_attachable` accordingly) + pub fn is_burn_block_processed( + &self, + consensus_hash: &ConsensusHash, + ) -> Result { + let sql = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND burn_attachable = 1"; + let args = rusqlite::params![consensus_hash]; + let res: Option = query_row(self, sql, args)?; + Ok(res.is_some()) + } } impl<'a> NakamotoStagingBlocksTx<'a> { @@ -407,6 +464,114 @@ impl<'a> NakamotoStagingBlocksTx<'a> { Ok(()) } + + /// Store a block into the staging DB. + pub(crate) fn store_block( + &self, + block: &NakamotoBlock, + burn_attachable: bool, + signing_weight: u32, + obtain_method: NakamotoBlockObtainMethod, + ) -> Result<(), ChainstateError> { + let Ok(tenure_start) = block.is_wellformed_tenure_start_block() else { + return Err(ChainstateError::InvalidStacksBlock( + "Tried to store a tenure-start block that is not well-formed".into(), + )); + }; + + let burn_attachable = if !burn_attachable { + // if it's burn_attachable before, it's burn_attachable always + self.conn() + .is_burn_block_processed(&block.header.consensus_hash)? + } else { + burn_attachable + }; + + self.execute( + "INSERT INTO nakamoto_staging_blocks ( + block_hash, + consensus_hash, + parent_block_id, + is_tenure_start, + burn_attachable, + orphaned, + processed, + + height, + index_block_hash, + processed_time, + obtain_method, + signing_weight, + + data + ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", + params![ + &block.header.block_hash(), + &block.header.consensus_hash, + &block.header.parent_block_id, + &tenure_start, + if burn_attachable { 1 } else { 0 }, + 0, + 0, + u64_to_sql(block.header.chain_length)?, + &block.block_id(), + 0, + obtain_method.to_string(), + signing_weight, + block.serialize_to_vec(), + ], + )?; + if burn_attachable { + self.set_burn_block_processed(&block.header.consensus_hash)?; + } + Ok(()) + } + + /// Do we have a block with the given sighash? + /// NOTE: the block hash and sighash are the same for Nakamoto blocks + pub(crate) fn has_nakamoto_block_with_sighash( + &self, + consensus_hash: &ConsensusHash, + sighash: &Sha512Trunc256Sum, + ) -> Result { + let qry = + "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; + let args = rusqlite::params![consensus_hash, sighash]; + let present: Option = query_row(self, qry, args)?; + Ok(present.is_some()) + } + + /// Store a block into the staging DB if its sighash has never been seen before. + /// NOTE: the block hash and sighash are the same for Nakamoto blocks, so this is equivalent to + /// storing a new block. + /// Return true if stored; false if not. + pub(crate) fn try_store_block_with_new_sighash( + &self, + block: &NakamotoBlock, + burn_attachable: bool, + signing_weight: u32, + obtain_method: NakamotoBlockObtainMethod, + ) -> Result { + let sighash = block.header.signer_signature_hash(); + if self.has_nakamoto_block_with_sighash(&block.header.consensus_hash, &sighash)? { + return Ok(false); + } + self.store_block(block, burn_attachable, signing_weight, obtain_method)?; + Ok(true) + } + + /// Replace an already-stored block with the given sighash with a newer copy with more signing + /// power. Arguments will not be validated; the caller must do this. + pub(crate) fn replace_block( + &self, + block: &NakamotoBlock, + signing_weight: u32, + obtain_method: NakamotoBlockObtainMethod, + ) -> Result<(), ChainstateError> { + self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3 WHERE consensus_hash = ?4 AND block_hash = ?5", + params![&block.serialize_to_vec(), &signing_weight, &obtain_method.to_string(), &block.header.consensus_hash, &block.header.block_hash()])?; + Ok(()) + } } impl StacksChainState { @@ -464,6 +629,48 @@ impl StacksChainState { Self::static_get_nakamoto_staging_blocks_path(PathBuf::from(self.root_path.as_str())) } + /// Get the database version + pub fn get_nakamoto_staging_blocks_db_version( + conn: &Connection, + ) -> Result { + let qry = "SELECT version FROM db_version ORDER BY version DESC LIMIT 1"; + let args = NO_PARAMS; + let version: Option = match query_row(&conn, qry, args) { + Ok(x) => x, + Err(e) => { + debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); + return Ok(1); + } + }; + + match version { + Some(ver_i64) => { + let ver = u32::try_from(ver_i64) + .map_err(|_e| ChainstateError::DBError(DBError::Corruption))?; + Ok(ver) + } + None => { + debug!("No version present in Nakamoto staging blocks DB; defaulting to 1"); + Ok(1) + } + } + } + + /// Perform migrations + pub fn migrate_nakamoto_staging_blocks(conn: &Connection) -> Result<(), ChainstateError> { + let mut version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + if version < 2 { + debug!("Migrate Nakamoto staging blocks DB to schema 2"); + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_2.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + assert_eq!(version, 2, "Nakamoto staging DB migration failure"); + debug!("Migrated Nakamoto staging blocks DB to schema 2"); + } + Ok(()) + } + /// Open and set up a DB for nakamoto staging blocks. /// If it doesn't exist, then instantiate it if `readwrite` is true. pub fn open_nakamoto_staging_blocks( @@ -490,7 +697,13 @@ impl StacksChainState { for cmd in NAKAMOTO_STAGING_DB_SCHEMA_1.iter() { conn.execute(cmd, NO_PARAMS)?; } + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_2.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + } else if readwrite { + Self::migrate_nakamoto_staging_blocks(&conn)?; } + Ok(NakamotoStagingBlocksConn(conn)) } } From fdd9039b54e60b1bf16f8c9e7da359f6ca7fe164 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:05:37 -0400 Subject: [PATCH 0384/1400] chore: MARF the nakamoto tenures table, and remove the `tenure_index` field and change its primary key. A tenure is recorded for a given block with a tenure change. --- stackslib/src/chainstate/nakamoto/tenure.rs | 433 +++++++++----------- 1 file changed, 184 insertions(+), 249 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 198143d1a9..cdb3511737 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -96,7 +96,7 @@ use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; use crate::chainstate::nakamoto::{ MaturedMinerPaymentSchedules, MaturedMinerRewards, NakamotoBlock, NakamotoBlockHeader, - NakamotoChainState, + NakamotoChainState, StacksHandle, }; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::{ @@ -197,6 +197,56 @@ pub static NAKAMOTO_TENURES_SCHEMA_2: &'static str = r#" CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); "#; +pub static NAKAMOTO_TENURES_SCHEMA_3: &'static str = r#" + -- Drop the nakamoto_tenures table if it exists + DROP TABLE IF EXISTS nakamoto_tenures; + + -- This table records each tenure-change, be it a BlockFound or Extended tenure. + CREATE TABLE nakamoto_tenures ( + -- consensus hash of start-tenure block (i.e. the consensus hash of the sortition in which the miner's block-commit + -- was mined) + tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the previous tenure's start-tenure block + prev_tenure_id_consensus_hash TEXT NOT NULL, + -- consensus hash of the last-processed sortition + burn_view_consensus_hash TEXT NOT NULL, + -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). + -- this is equal to the `cause` field in a TenureChange + cause INTEGER NOT NULL, + -- block hash of start-tenure block + block_hash TEXT NOT NULL, + -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) + block_id TEXT NOT NULL, + -- this field is the total number of _sortition-induced_ tenures in the chain history (including this tenure), + -- as of the _end_ of this block. A tenure can contain multiple TenureChanges; if so, then this + -- is the height of the _sortition-induced_ TenureChange that created it. + coinbase_height INTEGER NOT NULL, + -- number of blocks this tenure. + -- * for tenure-changes induced by sortitions, this is the number of blocks in the previous tenure + -- * for tenure-changes induced by extension, this is the number of blocks in the current tenure so far. + num_blocks_confirmed INTEGER NOT NULL, + + -- key each tenure by its tenure-start block, and the burn view (since the tenure can span multiple sortitions, and thus + -- there can be multiple burn_view_consensus_hash values per block_id) + PRIMARY KEY(burn_view_consensus_hash,block_id) + ); + CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); + CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); + CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); + CREATE INDEX nakamoto_tenures_by_burn_view_consensus_hash ON nakamoto_tenures(burn_view_consensus_hash); + CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); +"#; + +/// Primary key into nakamoto_tenures. +/// Used for MARF lookups +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NakamotoTenureId { + /// last sortition in this tenure + pub burn_view_consensus_hash: ConsensusHash, + /// start block ID of this tenure + pub block_id: StacksBlockId, +} + #[derive(Debug, Clone, PartialEq)] pub struct NakamotoTenure { /// consensus hash of start-tenure block @@ -212,11 +262,8 @@ pub struct NakamotoTenure { pub block_hash: BlockHeaderHash, /// block ID of this start block pub block_id: StacksBlockId, - /// number of sortition-tenures so far, including this one. - /// This is, equivalently, the number of coinbases emitted so far. + /// coinbase height of this tenure pub coinbase_height: u64, - /// number of tenure-change transactions so far, including this one - pub tenure_index: u64, /// number of blocks this tenure confirms pub num_blocks_confirmed: u32, } @@ -231,13 +278,8 @@ impl FromRow for NakamotoTenure { let block_hash = row.get("block_hash")?; let block_id = row.get("block_id")?; let coinbase_height_i64: i64 = row.get("coinbase_height")?; - let coinbase_height = coinbase_height_i64 - .try_into() - .map_err(|_| DBError::ParseError)?; - let tenure_index_i64: i64 = row.get("tenure_index")?; - let tenure_index = tenure_index_i64 - .try_into() - .map_err(|_| DBError::ParseError)?; + let coinbase_height = + u64::try_from(coinbase_height_i64).map_err(|_| DBError::ParseError)?; let num_blocks_confirmed: u32 = row.get("num_blocks_confirmed")?; Ok(NakamotoTenure { tenure_id_consensus_hash, @@ -247,7 +289,6 @@ impl FromRow for NakamotoTenure { block_hash, block_id, coinbase_height, - tenure_index, num_blocks_confirmed, }) } @@ -388,67 +429,28 @@ impl NakamotoChainState { Ok(matured_miner_rewards_opt) } - /// Return the coinbase height of `block` if it was a nakamoto block, or the - /// Stacks block height of `block` if it was an epoch-2 block - /// - /// In Stacks 2.x, the coinbase height and block height are the - /// same. A miner's tenure in Stacks 2.x is entirely encompassed - /// in the single Bitcoin-anchored Stacks block they produce, as - /// well as the microblock stream they append to it. But in Nakamoto, - /// the coinbase height and block height are decoupled. - pub fn get_coinbase_height( - chainstate_conn: &Connection, - block: &StacksBlockId, - ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let result: Option = - query_row_panic(chainstate_conn, sql, &[&block], || { - "FATAL: multiple rows for the same block hash".to_string() - })?; - if let Some(nak_hdr) = result { - let nak_qry = "SELECT coinbase_height FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let opt_height: Option = chainstate_conn - .query_row(nak_qry, &[&nak_hdr.consensus_hash], |row| row.get(0)) - .optional()?; - if let Some(height) = opt_height { - return Ok(Some( - u64::try_from(height).map_err(|_| DBError::ParseError)?, - )); - } else { - // should be unreachable - return Err(DBError::NotFoundError.into()); - } - } - - let epoch_2_qry = "SELECT block_height FROM block_headers WHERE index_block_hash = ?1"; - let opt_height: Option = chainstate_conn - .query_row(epoch_2_qry, &[block], |row| row.get(0)) - .optional()?; - opt_height - .map(u64::try_from) - .transpose() - .map_err(|_| ChainstateError::DBError(DBError::ParseError)) - } - /// Determine if a tenure has been fully processed. - pub fn has_processed_nakamoto_tenure( - conn: &Connection, + /// That is, we've processed both its tenure-start block, and we've processed a tenure-change that + /// claims this tenure as its parent tenure. + /// + /// If we haven't processed a tenure-start block for this tenure, then return false. + pub fn has_processed_nakamoto_tenure( + conn: &mut STH, + tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result { - // a tenure will have been processed if any of its children have been processed - let sql = "SELECT 1 FROM nakamoto_tenures WHERE prev_tenure_id_consensus_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[tenure_id_consensus_hash]; - let found: Option = query_row(conn, sql, args)?; - Ok(found.is_some()) + Ok(conn + .is_tenure_finished(tip_block_id, tenure_id_consensus_hash)? + .unwrap_or(false)) } /// Insert a nakamoto tenure. + /// `block_header` is the header of the block containing `tenure`. /// No validation will be done. pub(crate) fn insert_nakamoto_tenure( tx: &Connection, block_header: &NakamotoBlockHeader, coinbase_height: u64, - tenure_index: u64, tenure: &TenureChangePayload, ) -> Result<(), ChainstateError> { // NOTE: this is checked with check_nakamoto_tenure() @@ -461,15 +463,14 @@ impl NakamotoChainState { &block_header.block_hash(), &block_header.block_id(), &u64_to_sql(coinbase_height)?, - &u64_to_sql(tenure_index)?, &tenure.previous_tenure_blocks, ]; tx.execute( "INSERT INTO nakamoto_tenures (tenure_id_consensus_hash, prev_tenure_id_consensus_hash, burn_view_consensus_hash, cause, - block_hash, block_id, coinbase_height, tenure_index, num_blocks_confirmed) + block_hash, block_id, coinbase_height, num_blocks_confirmed) VALUES - (?1,?2,?3,?4,?5,?6,?7,?8,?9)", + (?1,?2,?3,?4,?5,?6,?7,?8)", args, )?; @@ -490,176 +491,76 @@ impl NakamotoChainState { Ok(()) } - /// Get the first block header in a Nakamoto tenure - pub fn get_nakamoto_tenure_start_block_header( - chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height ASC LIMIT 1"; - query_row_panic(chainstate_conn, sql, &[&consensus_hash], || { - "FATAL: multiple rows for the same consensus hash".to_string() - }) - .map_err(ChainstateError::DBError) - } - /// Get the consensus hash of the parent tenure /// Used by the p2p code. /// Don't use in consensus code. - pub fn get_nakamoto_parent_tenure_id_consensus_hash( - chainstate_conn: &Connection, + pub fn get_nakamoto_parent_tenure_id_consensus_hash( + chainstate_conn: &mut STH, + tip_block_id: &StacksBlockId, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { - let sql = "SELECT prev_tenure_id_consensus_hash AS consensus_hash FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[consensus_hash]; - query_row(chainstate_conn, sql, args).map_err(ChainstateError::DBError) + Ok(chainstate_conn.get_parent_tenure_consensus_hash(tip_block_id, consensus_hash)?) } - /// Get the last block header in a Nakamoto tenure - pub fn get_nakamoto_tenure_finish_block_header( - chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height DESC LIMIT 1"; - query_row_panic(chainstate_conn, sql, &[&consensus_hash], || { - "FATAL: multiple rows for the same consensus hash".to_string() - }) - .map_err(ChainstateError::DBError) - } - - /// Get the number of blocks in a tenure. + /// Get the number of blocks in a tenure, given a block ID. /// Only works for Nakamoto blocks, not Stacks epoch2 blocks. - /// Returns 0 if the consensus hash is not found. + /// Returns 0 if there are no blocks in this tenure pub fn get_nakamoto_tenure_length( chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, + block_id: &StacksBlockId, ) -> Result { - let sql = "SELECT IFNULL(COUNT(block_hash),0) FROM nakamoto_block_headers WHERE consensus_hash = ?1"; - let count_i64 = query_int(chainstate_conn, sql, &[&consensus_hash])?; - let count: u32 = count_i64 - .try_into() - .expect("FATAL: too many blocks in tenure"); - Ok(count) - } - - /// Get the highest coinbase height processed. - /// Returns Ok(Some(coinbase_height)) if we have processed at least one tenure - /// Returns Ok(None) if we have not yet processed a Nakamoto tenure - /// Returns Err(..) on database errors - pub fn get_highest_nakamoto_coinbase_height( - conn: &Connection, - max: u64, - ) -> Result, ChainstateError> { - match conn - .query_row( - "SELECT IFNULL(MAX(coinbase_height), 0) FROM nakamoto_tenures WHERE coinbase_height < ?1", - &[&u64_to_sql(max)?], - |row| Ok(u64::from_row(row).expect("Expected u64 in database")), - ) - .optional()? - { - Some(0) => { - // this never happens, so it's None - Ok(None) + // at least one block in this tenure + let sql = "SELECT height_in_tenure FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let count = match query_int(chainstate_conn, sql, &[block_id]) { + Ok(count_i64) => { + let count: u32 = count_i64 + .try_into() + .expect("FATAL: too many blocks in tenure"); + count } - Some(height_i64) => { - Ok(Some( - height_i64.try_into().map_err(|_| DBError::ParseError)?, - )) + Err(DBError::NotFoundError) => 0, + Err(e) => { + return Err(e.into()); } - None => Ok(None), - } + }; + Ok(count) } - /// Get the nakamoto tenure by id - pub fn get_nakamoto_tenure_change_by_tenure_id( + /// Get a Nakamoto tenure change by its ID + pub fn get_nakamoto_tenure_change( headers_conn: &Connection, - tenure_consensus_hash: &ConsensusHash, + tenure_id: &NakamotoTenureId, ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[&tenure_consensus_hash]; - let tenure_opt: Option = query_row(headers_conn, sql, args)?; - Ok(tenure_opt) + let sql = + "SELECT * FROM nakamoto_tenures WHERE burn_view_consensus_hash = ?1 AND block_id = ?2"; + let args = rusqlite::params![&tenure_id.burn_view_consensus_hash, &tenure_id.block_id]; + Ok(query_row(headers_conn, sql, args)?) } - /// Get the nakamoto tenure by burn view - pub fn get_nakamoto_tenure_change_by_burn_view( - headers_conn: &Connection, - burn_view: &ConsensusHash, + /// Get the tenure-change most recently processed in the history tipped by the given block. + /// This can be a block-found or an extended tenure change. + pub fn get_ongoing_tenure( + headers_conn: &mut STH, + tip_block_id: &StacksBlockId, ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_tenures WHERE burn_view_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args = rusqlite::params![burn_view]; - let tenure_opt: Option = query_row(headers_conn, sql, args)?; - Ok(tenure_opt) + let Some(tenure_id) = headers_conn.get_ongoing_tenure_id(tip_block_id)? else { + return Ok(None); + }; + Self::get_nakamoto_tenure_change(headers_conn.sqlite(), &tenure_id) } - /// Get a nakamoto tenure-change by its tenure ID consensus hash. - /// Get the highest such record. It will be the last-processed BlockFound tenure - /// for the given sortition consensus hash. - pub fn get_highest_nakamoto_tenure_change_by_tenure_id( - headers_conn: &Connection, + /// Get the block-found tenure-change for a given tenure ID consensus hash + pub fn get_block_found_tenure( + headers_conn: &mut STH, + tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 AND cause = ?2 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[ - tenure_id_consensus_hash, - &TenureChangeCause::BlockFound.as_u8(), - ]; - let tenure_opt: Option = query_row(headers_conn, sql, args)?; - Ok(tenure_opt) - } - - /// Get the highest non-empty processed tenure-change on the canonical sortition history. - /// It will be a BlockFound tenure. - pub fn get_highest_nakamoto_tenure( - headers_conn: &Connection, - sortdb_conn: &SH, - ) -> Result, ChainstateError> { - // NOTE: we do a *search* here in case the canonical Stacks pointer stored on the canonical - // sortition gets invalidated through a reorg. - let mut cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &sortdb_conn.tip())? - .ok_or(ChainstateError::NoSuchBlockError)?; - - // if there's been no activity for more than 2*reward_cycle_length sortitions, then the - // chain is dead anyway - for _ in 0..(2 * sortdb_conn.pox_constants().reward_cycle_length) { - if let Some(tenure) = Self::get_highest_nakamoto_tenure_change_by_tenure_id( - headers_conn, - &cursor.consensus_hash, - )? { - return Ok(Some(tenure)); - } - cursor = - SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? - .ok_or(ChainstateError::NoSuchBlockError)?; - } - Ok(None) - } - - /// Get the ongoing tenure (i.e. last tenure-change tx record) from the sortition pointed to by - /// sortdb_conn. - /// It will be a BlockFound or an Extension tenure. - pub fn get_ongoing_nakamoto_tenure( - headers_conn: &Connection, - sortdb_conn: &SH, - ) -> Result, ChainstateError> { - // NOTE: we do a *search* here in case the canonical Stacks pointer stored on the canonical - // sortition gets invalidated through a reorg. - let mut cursor = SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &sortdb_conn.tip())? - .ok_or(ChainstateError::NoSuchBlockError)?; - - // if there's been no activity for more than 2*reward_cycle_length sortitions, then the - // chain is dead anyway - for _ in 0..(2 * sortdb_conn.pox_constants().reward_cycle_length) { - if let Some(tenure) = - Self::get_nakamoto_tenure_change_by_burn_view(headers_conn, &cursor.consensus_hash)? - { - return Ok(Some(tenure)); - } - cursor = - SortitionDB::get_block_snapshot(sortdb_conn.sqlite(), &cursor.parent_sortition_id)? - .ok_or(ChainstateError::NoSuchBlockError)?; - } - Ok(None) + let Some(tenure_id) = + headers_conn.get_block_found_tenure_id(tip_block_id, tenure_id_consensus_hash)? + else { + return Ok(None); + }; + Self::get_nakamoto_tenure_change(headers_conn.sqlite(), &tenure_id) } /// Verify that a tenure change tx is a valid first-ever tenure change. It must connect to an @@ -683,19 +584,26 @@ impl NakamotoChainState { let Some(parent_header) = Self::get_block_header(headers_conn, &tenure_payload.previous_tenure_end)? else { - warn!("Invalid tenure-change: no parent epoch2 header"; + warn!("Invalid tenure-change from epoch2: no parent epoch2 header"; "consensus_hash" => %tenure_payload.tenure_consensus_hash, "previous_tenure_end" => %tenure_payload.previous_tenure_end ); return Ok(None); }; if tenure_payload.previous_tenure_blocks != 1 { - warn!("Invalid tenure-change: expected 1 previous tenure block"; + warn!("Invalid tenure-change from epoch2: expected 1 previous tenure block"; "consensus_hash" => %tenure_payload.tenure_consensus_hash, "previous_tenure_blocks" => %tenure_payload.previous_tenure_blocks ); return Ok(None); } + if tenure_payload.prev_tenure_consensus_hash != parent_header.consensus_hash { + warn!("Invalid tenure-change from epoch2: parent tenure consensus hash mismatch"; + "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash, + "parent_header.consensus_hash" => %parent_header.consensus_hash + ); + return Ok(None); + } let Some(epoch2_header_info) = parent_header.anchored_header.as_stacks_epoch2() else { warn!("Invalid tenure-change: parent header is not epoch2"; "consensus_hash" => %tenure_payload.tenure_consensus_hash, @@ -716,8 +624,6 @@ impl NakamotoChainState { &epoch2_header_info.block_hash(), ), coinbase_height: epoch2_header_info.total_work.work, - // NOTE: first Nakamoto tenure and tenure index will have height 1 - tenure_index: 0, num_blocks_confirmed: 1, }; Ok(Some(last_epoch2_tenure)) @@ -756,11 +662,14 @@ impl NakamotoChainState { /// * previous_tenure_blocks /// * cause /// + /// `block_header` is the block header of a tenure-change block, which includes + /// `tenure_payload` as its first transaction. + /// /// Returns Ok(Some(processed-tenure)) on success /// Returns Ok(None) if the tenure change is invalid /// Returns Err(..) on DB error - pub(crate) fn check_nakamoto_tenure( - headers_conn: &Connection, + pub(crate) fn check_nakamoto_tenure( + headers_conn: &mut STH, sort_handle: &mut SH, block_header: &NakamotoBlockHeader, tenure_payload: &TenureChangePayload, @@ -774,7 +683,16 @@ impl NakamotoChainState { return Ok(None); } - // all consensus hashes must be on the canonical fork, if they're not the first-ever + // this tenure_payload must point to the parent block + if tenure_payload.previous_tenure_end != block_header.parent_block_id { + warn!("Invalid tenure-change: does not confirm parent block"; + "previous_tenure_end" => %tenure_payload.previous_tenure_end, + "parent_block_id" => %block_header.parent_block_id + ); + return Ok(None); + } + + // all consensus hashes must be on the canonical burnchain fork, if they're not the first-ever let Some(tenure_sn) = Self::check_valid_consensus_hash(sort_handle, &tenure_payload.tenure_consensus_hash)? else { @@ -790,7 +708,7 @@ impl NakamotoChainState { // tenure_sn must be no more recent than sortition_sn if tenure_sn.block_height > sortition_sn.block_height { - warn!("Invalid tenure-change: tenure snapshot comes sortition snapshot"; "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, "burn_view_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash); + warn!("Invalid tenure-change: tenure snapshot comes before sortition snapshot"; "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash, "burn_view_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash); return Ok(None); } @@ -843,20 +761,34 @@ impl NakamotoChainState { return Ok(None); } - // Note in the extend case, this will actually return the current tenure, not the parent as prev_tenure_consensus_hash will be the same as tenure_consensus_hash - let Some(tenure) = Self::get_nakamoto_tenure_change_by_tenure_id( - headers_conn, - &tenure_payload.prev_tenure_consensus_hash, - )? + // What tenure are we building off of? This is the tenure in which the parent block + // resides. Note that if this block is a tenure-extend block, then parent_block_id and + // this block reside in the same tenure (but this block will insert a tenure-extend record + // into the tenure-changes table). + let Some(parent_tenure) = + Self::get_ongoing_tenure(headers_conn, &block_header.parent_block_id)? else { // not building off of a previous Nakamoto tenure. This is the first tenure change. It should point to an epoch // 2.x block. - return Self::check_first_nakamoto_tenure_change(headers_conn, tenure_payload); + return Self::check_first_nakamoto_tenure_change(headers_conn.sqlite(), tenure_payload); }; // validate cause match tenure_payload.cause { - TenureChangeCause::BlockFound => {} + TenureChangeCause::BlockFound => { + // this tenure_payload's prev_consensus_hash must match the parent block tenure's + // tenure_consensus_hash -- i.e. this tenure must be distinct from the parent + // block's tenure + if parent_tenure.tenure_id_consensus_hash + != tenure_payload.prev_tenure_consensus_hash + { + warn!("Invalid tenure-change: tenure block-found does not confirm parent block's tenure"; + "parent_tenure.tenure_consensus_hash" => %parent_tenure.tenure_id_consensus_hash, + "prev_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash + ); + return Ok(None); + } + } TenureChangeCause::Extended => { // tenure extensions don't begin a new tenure (since the miner isn't changing), so // the tenure consensus hash must be the same as the previous tenure consensus hash @@ -875,10 +807,9 @@ impl NakamotoChainState { // If there is a succession of tenure-extensions for a given tenure, then the reported tenure // length must report the number of blocks since the last _sortition-induced_ tenure // change. - let tenure_len = Self::get_nakamoto_tenure_length( - headers_conn, - &tenure_payload.prev_tenure_consensus_hash, - )?; + let tenure_len = + Self::get_nakamoto_tenure_length(headers_conn.sqlite(), &block_header.parent_block_id)?; + if tenure_len != tenure_payload.previous_tenure_blocks { // invalid -- does not report the correct number of blocks in the past tenure warn!("Invalid tenure-change: wrong number of blocks"; @@ -890,7 +821,7 @@ impl NakamotoChainState { return Ok(None); } - Ok(Some(tenure)) + Ok(Some(parent_tenure)) } /// Advance the tenures table with a validated block's tenure data. @@ -923,53 +854,57 @@ impl NakamotoChainState { } }; - let Some(processed_tenure) = - Self::check_nakamoto_tenure(headers_tx, handle, &block.header, tenure_payload)? - else { + if Self::check_nakamoto_tenure(headers_tx, handle, &block.header, tenure_payload)?.is_none() + { return Err(ChainstateError::InvalidStacksTransaction( "Invalid tenure tx".into(), false, )); }; - Self::insert_nakamoto_tenure( - headers_tx, - &block.header, - coinbase_height, - processed_tenure - .tenure_index - .checked_add(1) - .expect("too many tenure-changes"), - tenure_payload, - )?; + Self::insert_nakamoto_tenure(headers_tx, &block.header, coinbase_height, tenure_payload)?; return Ok(coinbase_height); } /// Check that this block is in the same tenure as its parent, and that this tenure is the /// highest-seen tenure. Use this to check blocks that do _not_ have BlockFound tenure-changes. /// + /// `parent_ch` is the tenure ID consensus hash of the given block's parent. + /// /// Returns Ok(bool) to indicate whether or not this block is in the same tenure as its parent. /// Returns Err(..) on DB error - pub(crate) fn check_tenure_continuity( - headers_conn: &Connection, - sortdb_conn: &SH, + pub(crate) fn check_tenure_continuity( + headers_conn: &mut STH, parent_ch: &ConsensusHash, block_header: &NakamotoBlockHeader, ) -> Result { // block must have the same consensus hash as its parent if block_header.is_first_mined() || parent_ch != &block_header.consensus_hash { + test_debug!("Block is discontinuous with tenure: either first-mined or has a different tenure ID"; + "parent_ch" => %parent_ch, + "block_header.consensus_hash" => %block_header.consensus_hash, + "is_first_mined()" => block_header.is_first_mined(), + ); return Ok(false); } // block must be in the same tenure as the highest-processed tenure. - let Some(highest_tenure) = Self::get_highest_nakamoto_tenure(headers_conn, sortdb_conn)? + let Some(highest_tenure) = + Self::get_ongoing_tenure(headers_conn, &block_header.parent_block_id)? else { // no tenures yet, so definitely not continuous + test_debug!("Block is discontinuous with tenure: no ongoing tenure"; + "block_header.parent_block_id" => %block_header.parent_block_id, + ); return Ok(false); }; if &highest_tenure.tenure_id_consensus_hash != parent_ch { // this block is not in the highest-known tenure, so it can't be continuous + test_debug!("Block is discontinuous with tenure: parent is not in current tenure"; + "parent_ch" => %parent_ch, + "highest_tenure.tenure_id_consensus_hash" => %highest_tenure.tenure_id_consensus_hash, + ); return Ok(false); } From 8b6bbf905df6df16d9c9e6c54033170d2b5d94f2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:06:17 -0400 Subject: [PATCH 0385/1400] chore: strip out MARF-dependent tests and move them to a separate test battery in TestPeer --- .../src/chainstate/nakamoto/tests/mod.rs | 940 ++++++++---------- 1 file changed, 438 insertions(+), 502 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index bf1a09fc17..88f28ec60b 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -62,7 +62,9 @@ use crate::chainstate::coordinator::tests::{ use crate::chainstate::nakamoto::coordinator::tests::boot_nakamoto; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; -use crate::chainstate::nakamoto::staging_blocks::NakamotoStagingBlocksConnRef; +use crate::chainstate::nakamoto::staging_blocks::{ + NakamotoBlockObtainMethod, NakamotoStagingBlocksConnRef, +}; use crate::chainstate::nakamoto::tenure::NakamotoTenure; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::node::TestStacker; @@ -92,88 +94,6 @@ use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as db_error; use crate::util_lib::strings::StacksString; -/// WSTS aggregate public keys are not shipping immediately in Nakamoto, but there is still a lot -/// of test coverage for it. The code here is preserved to keep these tests working until WSTS's -/// coordinator implementaiton is ready. -impl NakamotoChainState { - /// Get the aggregate public key for the given block from the signers-voting contract - pub(crate) fn load_aggregate_public_key( - sortdb: &SortitionDB, - sort_handle: &SH, - chainstate: &mut StacksChainState, - for_burn_block_height: u64, - at_block_id: &StacksBlockId, - warn_if_not_found: bool, - ) -> Result { - // Get the current reward cycle - let Some(rc) = sort_handle.pox_constants().block_height_to_reward_cycle( - sort_handle.first_burn_block_height(), - for_burn_block_height, - ) else { - // This should be unreachable, but we'll return an error just in case. - let msg = format!( - "BUG: Failed to determine reward cycle of burn block height: {}.", - for_burn_block_height - ); - warn!("{msg}"); - return Err(ChainstateError::InvalidStacksBlock(msg)); - }; - - test_debug!( - "get-approved-aggregate-key at block {}, cycle {}", - at_block_id, - rc - ); - match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { - Some(key) => Ok(key), - None => { - // this can happen for a whole host of reasons - if warn_if_not_found { - warn!( - "Failed to get aggregate public key"; - "block_id" => %at_block_id, - "reward_cycle" => rc, - ); - } - Err(ChainstateError::InvalidStacksBlock( - "Failed to get aggregate public key".into(), - )) - } - } - } - - /// Get the aggregate public key for a block. - /// TODO: The block at which the aggregate public key is queried needs to be better defined. - /// See https://github.com/stacks-network/stacks-core/issues/4109 - pub fn get_aggregate_public_key( - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - sort_handle: &SH, - block: &NakamotoBlock, - ) -> Result { - let block_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &block.header.consensus_hash)? - .ok_or(ChainstateError::DBError(db_error::NotFoundError))?; - let aggregate_key_block_header = - Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); - let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), block_sn.block_height)? - .ok_or(ChainstateError::InvalidStacksBlock( - "Failed to get epoch ID".into(), - ))? - .epoch_id; - - let aggregate_public_key = Self::load_aggregate_public_key( - sortdb, - sort_handle, - chainstate, - block_sn.block_height, - &aggregate_key_block_header.index_block_hash(), - epoch_id >= StacksEpochId::Epoch30, - )?; - Ok(aggregate_public_key) - } -} - impl<'a> NakamotoStagingBlocksConnRef<'a> { pub fn get_all_blocks_in_tenure( &self, @@ -645,47 +565,7 @@ pub fn test_nakamoto_first_tenure_block_syntactic_validation() { ); } -struct MockSortitionHandle { - nakamoto_tip: (ConsensusHash, BlockHeaderHash, u64), -} - -impl MockSortitionHandle { - pub fn new(consensus_hash: ConsensusHash, bhh: BlockHeaderHash, height: u64) -> Self { - Self { - nakamoto_tip: (consensus_hash, bhh, height), - } - } -} - -impl SortitionHandle for MockSortitionHandle { - fn get_block_snapshot_by_height( - &mut self, - block_height: u64, - ) -> Result, db_error> { - unimplemented!() - } - - fn first_burn_block_height(&self) -> u64 { - unimplemented!() - } - - fn pox_constants(&self) -> &PoxConstants { - unimplemented!() - } - - fn sqlite(&self) -> &Connection { - unimplemented!() - } - - fn tip(&self) -> SortitionId { - unimplemented!() - } - - fn get_nakamoto_tip(&self) -> Result, db_error> { - Ok(Some(self.nakamoto_tip.clone())) - } -} - +/// Tests for non-MARF'ed block storage #[test] pub fn test_load_store_update_nakamoto_blocks() { let test_name = function_name!(); @@ -777,7 +657,7 @@ pub fn test_load_store_update_nakamoto_blocks() { let tenure_change_payload = TenureChangePayload { tenure_consensus_hash: ConsensusHash([0x04; 20]), // same as in nakamoto header - prev_tenure_consensus_hash: ConsensusHash([0x01; 20]), + prev_tenure_consensus_hash: epoch2_consensus_hash.clone(), burn_view_consensus_hash: ConsensusHash([0x04; 20]), previous_tenure_end: epoch2_parent_block_id.clone(), previous_tenure_blocks: 1, @@ -812,6 +692,14 @@ pub fn test_load_store_update_nakamoto_blocks() { stx_transfer_tx_3.chain_id = 0x80000000; stx_transfer_tx_3.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut stx_transfer_tx_4 = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + TransactionPayload::TokenTransfer(recipient_addr.into(), 125, TokenTransferMemo([0u8; 34])), + ); + stx_transfer_tx_4.chain_id = 0x80000000; + stx_transfer_tx_4.anchor_mode = TransactionAnchorMode::OnChainOnly; + let nakamoto_txs = vec![tenure_change_tx.clone(), coinbase_tx.clone()]; let nakamoto_tx_merkle_root = { let txid_vecs = nakamoto_txs @@ -842,6 +730,16 @@ pub fn test_load_store_update_nakamoto_blocks() { MerkleTree::::new(&txid_vecs).root() }; + let nakamoto_txs_4 = vec![stx_transfer_tx_4.clone()]; + let nakamoto_tx_merkle_root_4 = { + let txid_vecs = nakamoto_txs_4 + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + let header_signatures = vec![ MessageSignature::from_bytes(&[0x01; 65]).unwrap(), MessageSignature::from_bytes(&[0x02; 65]).unwrap(), @@ -963,9 +861,85 @@ pub fn test_load_store_update_nakamoto_blocks() { let nakamoto_block_3 = NakamotoBlock { header: nakamoto_header_3.clone(), - txs: nakamoto_txs_3, + txs: nakamoto_txs_3.clone(), }; + // third nakamoto block, but with a higher signing weight + let nakamoto_header_3_weight_2 = NakamotoBlockHeader { + version: 1, + chain_length: 459, + burn_spent: 128, + consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), + parent_block_id: nakamoto_header_2.block_id(), + tx_merkle_root: nakamoto_tx_merkle_root_3, + state_index_root: TrieHash([0x07; 32]), + timestamp: 9, + miner_signature: MessageSignature::empty(), + signer_signature: vec![MessageSignature::from_bytes(&[0x01; 65]).unwrap()], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let nakamoto_header_info_3_weight_2 = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(nakamoto_header_3_weight_2.clone()), + microblock_tail: None, + stacks_block_height: nakamoto_header_2.chain_length, + index_root: TrieHash([0x67; 32]), + consensus_hash: nakamoto_header_2.consensus_hash.clone(), + burn_header_hash: BurnchainHeaderHash([0x88; 32]), + burn_header_height: 200, + burn_header_timestamp: 1001, + anchored_block_size: 123, + burn_view: Some(nakamoto_header_3.consensus_hash), + }; + + let nakamoto_block_3_weight_2 = NakamotoBlock { + header: nakamoto_header_3_weight_2.clone(), + txs: nakamoto_txs_3.clone(), + }; + + // fourth nakamoto block -- confirms nakamoto_block_3_weight_2 + let nakamoto_header_4 = NakamotoBlockHeader { + version: 1, + chain_length: 460, + burn_spent: 128, + consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), + parent_block_id: nakamoto_header_3_weight_2.block_id(), + tx_merkle_root: nakamoto_tx_merkle_root_4, + state_index_root: TrieHash([0x71; 32]), + timestamp: 10, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let nakamoto_header_info_4 = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(nakamoto_header_4.clone()), + microblock_tail: None, + stacks_block_height: nakamoto_header_4.chain_length, + index_root: TrieHash([0x71; 32]), + consensus_hash: nakamoto_header_3_weight_2.consensus_hash.clone(), + burn_header_hash: BurnchainHeaderHash([0x88; 32]), + burn_header_height: 200, + burn_header_timestamp: 1001, + anchored_block_size: 123, + burn_view: Some(nakamoto_header_4.consensus_hash), + }; + + let nakamoto_block_4 = NakamotoBlock { + header: nakamoto_header_4.clone(), + txs: nakamoto_txs_4.clone(), + }; + + // nakamoto block 3 only differs in signers + assert_eq!( + nakamoto_block_3.block_id(), + nakamoto_block_3_weight_2.block_id() + ); + assert_eq!( + nakamoto_block_3.header.signer_signature_hash(), + nakamoto_block_3_weight_2.header.signer_signature_hash() + ); + let mut total_nakamoto_execution_cost = nakamoto_execution_cost.clone(); total_nakamoto_execution_cost .add(&nakamoto_execution_cost_2) @@ -979,7 +953,6 @@ pub fn test_load_store_update_nakamoto_blocks() { block_hash: nakamoto_block.header.block_hash(), block_id: nakamoto_block.header.block_id(), coinbase_height: epoch2_header.total_work.work + 1, - tenure_index: 1, num_blocks_confirmed: 1, }; @@ -1000,17 +973,14 @@ pub fn test_load_store_update_nakamoto_blocks() { // tenure length doesn't apply to epoch2 blocks assert_eq!( - NakamotoChainState::get_nakamoto_tenure_length(&tx, &epoch2_header_info.consensus_hash) - .unwrap(), + NakamotoChainState::get_nakamoto_tenure_length( + &tx, + &epoch2_header_info.index_block_hash() + ) + .unwrap(), 0 ); - // no tenure rows - assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64).unwrap(), - None - ); - // but, this upcoming tenure-change payload should be the first-ever tenure-change payload! assert!(NakamotoChainState::check_first_nakamoto_tenure_change( &tx, @@ -1021,46 +991,25 @@ pub fn test_load_store_update_nakamoto_blocks() { // no tenure yet, so zero blocks assert_eq!( - NakamotoChainState::get_nakamoto_tenure_length( - &tx, - &nakamoto_block.header.consensus_hash - ) - .unwrap(), + NakamotoChainState::get_nakamoto_tenure_length(&tx, &nakamoto_block.header.block_id(),) + .unwrap(), 0 ); - // no tenure rows - assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64).unwrap(), - None - ); - // add the tenure for these blocks NakamotoChainState::insert_nakamoto_tenure( &tx, &nakamoto_header, epoch2_header.total_work.work + 1, - 1, &tenure_change_payload, ) .unwrap(); // no blocks yet, so zero blocks assert_eq!( - NakamotoChainState::get_nakamoto_tenure_length( - &tx, - &nakamoto_block.header.consensus_hash - ) - .unwrap(), - 0 - ); - - // have a tenure - assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64) - .unwrap() + NakamotoChainState::get_nakamoto_tenure_length(&tx, &nakamoto_block.header.block_id(),) .unwrap(), - epoch2_header.total_work.work + 1 + 0 ); // this succeeds now @@ -1072,27 +1021,24 @@ pub fn test_load_store_update_nakamoto_blocks() { &nakamoto_execution_cost, &nakamoto_execution_cost, true, + 1, 300, ) .unwrap(); - NakamotoChainState::store_block(&staging_tx, &nakamoto_block, false).unwrap(); + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block, + false, + 1, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); // tenure has one block assert_eq!( - NakamotoChainState::get_nakamoto_tenure_length( - &tx, - &nakamoto_block.header.consensus_hash - ) - .unwrap(), - 1 - ); - - // same tenure - assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64) - .unwrap() + NakamotoChainState::get_nakamoto_tenure_length(&tx, &nakamoto_block.header.block_id(),) .unwrap(), - epoch2_header.total_work.work + 1 + 1 ); // this succeeds now @@ -1104,32 +1050,94 @@ pub fn test_load_store_update_nakamoto_blocks() { &nakamoto_execution_cost, &total_nakamoto_execution_cost, false, + 2, 400, ) .unwrap(); - NakamotoChainState::store_block(&staging_tx, &nakamoto_block_2, false).unwrap(); + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_2, + false, + 1, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); // tenure has two blocks assert_eq!( NakamotoChainState::get_nakamoto_tenure_length( &tx, - &nakamoto_block.header.consensus_hash + &nakamoto_block_2.header.block_id(), ) .unwrap(), 2 ); + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_length(&tx, &nakamoto_block.header.block_id(),) + .unwrap(), + 1 + ); - // same tenure + // store, but do not process, a block + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3, + false, + 1, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); assert_eq!( - NakamotoChainState::get_highest_nakamoto_coinbase_height(&tx, i64::MAX as u64) + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_3.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_3 + ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3.consensus_hash, + &nakamoto_header_3.signer_signature_hash() + ) .unwrap() .unwrap(), - epoch2_header.total_work.work + 1 + (nakamoto_header_3.block_id(), false, false, 1) ); - // store, but do not process, a block - NakamotoChainState::store_block(&staging_tx, &nakamoto_block_3, false).unwrap(); + // store, but do not process, the same block with a heavier weight + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3_weight_2, + false, + 2, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_3_weight_2.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_3_weight_2 + ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3.consensus_hash, + &nakamoto_header_3.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), false, false, 2) + ); staging_tx.commit().unwrap(); tx.commit().unwrap(); @@ -1178,6 +1186,19 @@ pub fn test_load_store_update_nakamoto_blocks() { (true, false) ); + // however, in the staging DB, this block is not yet marked as processed + assert_eq!( + chainstate + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &nakamoto_header.consensus_hash, + &nakamoto_header.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header.block_id(), false, false, 1) + ); + // same goes for block 2 assert_eq!( NakamotoChainState::get_nakamoto_block_status( @@ -1190,20 +1211,43 @@ pub fn test_load_store_update_nakamoto_blocks() { .unwrap(), (true, false) ); + assert_eq!( + chainstate + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &nakamoto_header.consensus_hash, + &nakamoto_header_2.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_2.block_id(), false, false, 1) + ); // block 3 has only been stored, but no header has been added assert_eq!( NakamotoChainState::get_nakamoto_block_status( chainstate.nakamoto_blocks_db(), chainstate.db(), - &nakamoto_header_3.consensus_hash, - &nakamoto_header_3.block_hash() + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), (false, false) ); + assert_eq!( + chainstate + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), false, false, 2) + ); + // this method doesn't return data for epoch2 assert_eq!( NakamotoChainState::get_nakamoto_block_status( @@ -1216,41 +1260,224 @@ pub fn test_load_store_update_nakamoto_blocks() { None ); - // set nakamoto block processed + // set nakamoto block processed, and store a sibling if it's the chain tip { let (tx, staging_tx) = chainstate.headers_and_staging_tx_begin().unwrap(); + staging_tx - .set_block_processed(&nakamoto_header_3.block_id()) + .set_block_processed(&nakamoto_header_3_weight_2.block_id()) .unwrap(); assert_eq!( NakamotoChainState::get_nakamoto_block_status( staging_tx.conn(), &tx, - &nakamoto_header_3.consensus_hash, - &nakamoto_header_3.block_hash() + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), (true, false) ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), true, false, 2) + ); + + // store a sibling with more weight, even though this block has been processed. + // This is allowed because we don't commit to signatures. + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3, + false, + 3, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_3.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_3 + ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3.consensus_hash, + &nakamoto_header_3.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3.block_id(), true, false, 3) + ); + } + + // set nakamoto block processed, and store a processed children, and verify that we'll still + // accept siblings with higher signing power. + { + let (tx, staging_tx) = chainstate.headers_and_staging_tx_begin().unwrap(); + + // set block 3 weight 2 processed + staging_tx + .set_block_processed(&nakamoto_header_3_weight_2.block_id()) + .unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + staging_tx.conn(), + &tx, + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (true, false) + ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), true, false, 2) + ); + + // store block 4, which descends from block 3 weight 2 + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_4, + false, + 1, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_4.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_4 + ); + + // set block 4 processed + staging_tx + .set_block_processed(&nakamoto_header_4.block_id()) + .unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + staging_tx.conn(), + &tx, + &nakamoto_header_4.consensus_hash, + &nakamoto_header_4.block_hash() + ) + .unwrap() + .unwrap(), + (true, false) + ); + + NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3, + false, + 3, + NakamotoBlockObtainMethod::Downloaded, + ) + .unwrap(); + assert_eq!( + staging_tx + .conn() + .get_nakamoto_block(&nakamoto_header_3.block_id()) + .unwrap() + .unwrap() + .0, + nakamoto_block_3 + ); } // set nakamoto block orphaned { let (tx, staging_tx) = chainstate.headers_and_staging_tx_begin().unwrap(); staging_tx - .set_block_orphaned(&nakamoto_header.block_id()) + .set_block_orphaned(&nakamoto_header_3_weight_2.block_id()) .unwrap(); assert_eq!( NakamotoChainState::get_nakamoto_block_status( staging_tx.conn(), &tx, - &nakamoto_header.consensus_hash, - &nakamoto_header.block_hash() + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), (true, true) ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), true, true, 2) + ); + + // can't re-store it, even if its signing power is better + assert!(!NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3_weight_2, + false, + 3, + NakamotoBlockObtainMethod::Downloaded + ) + .unwrap()); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + staging_tx.conn(), + &tx, + &nakamoto_header_3_weight_2.consensus_hash, + &nakamoto_header_3_weight_2.block_hash() + ) + .unwrap() + .unwrap(), + (true, true) + ); + + // can't store a sibling with the same sighash either, since if a block with the given sighash is orphaned, then + // it doesn't matter how many signers it has + assert!(!NakamotoChainState::store_block_if_better( + &staging_tx, + &nakamoto_block_3, + false, + 3, + NakamotoBlockObtainMethod::Downloaded + ) + .unwrap()); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header_3.consensus_hash, + &nakamoto_header_3.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header_3_weight_2.block_id(), true, true, 2) + ); } // orphan nakamoto block by parent { @@ -1269,48 +1496,19 @@ pub fn test_load_store_update_nakamoto_blocks() { .unwrap(), (false, true) ); + assert_eq!( + staging_tx + .conn() + .get_block_processed_and_signed_weight( + &nakamoto_header.consensus_hash, + &nakamoto_header.signer_signature_hash() + ) + .unwrap() + .unwrap(), + (nakamoto_header.block_id(), false, true, 1) + ); } - // check start/finish - assert_eq!( - NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &nakamoto_header.consensus_hash - ) - .unwrap() - .unwrap(), - nakamoto_header_info - ); - assert_eq!( - NakamotoChainState::get_nakamoto_tenure_finish_block_header( - chainstate.db(), - &nakamoto_header.consensus_hash - ) - .unwrap() - .unwrap(), - nakamoto_header_info_2 - ); - - // can query the tenure-start and epoch2 headers by consensus hash - assert_eq!( - NakamotoChainState::get_block_header_by_consensus_hash( - chainstate.db(), - &nakamoto_header.consensus_hash - ) - .unwrap() - .unwrap(), - nakamoto_header_info - ); - assert_eq!( - NakamotoChainState::get_block_header_by_consensus_hash( - chainstate.db(), - &epoch2_consensus_hash - ) - .unwrap() - .unwrap(), - epoch2_header_info - ); - // can query the tenure-start and epoch2 headers by block ID assert_eq!( NakamotoChainState::get_block_header(chainstate.db(), &nakamoto_header.block_id()) @@ -1334,29 +1532,6 @@ pub fn test_load_store_update_nakamoto_blocks() { epoch2_header_info ); - // can get tenure height of nakamoto blocks and epoch2 blocks - assert_eq!( - NakamotoChainState::get_coinbase_height(chainstate.db(), &nakamoto_header.block_id()) - .unwrap() - .unwrap(), - epoch2_header_info.anchored_header.height() + 1 - ); - assert_eq!( - NakamotoChainState::get_coinbase_height(chainstate.db(), &nakamoto_header_2.block_id()) - .unwrap() - .unwrap(), - epoch2_header_info.anchored_header.height() + 1 - ); - assert_eq!( - NakamotoChainState::get_coinbase_height( - chainstate.db(), - &epoch2_header_info.index_block_hash() - ) - .unwrap() - .unwrap(), - epoch2_header_info.anchored_header.height() - ); - // can get total tenure cost for nakamoto blocks, but not epoch2 blocks assert_eq!( NakamotoChainState::get_total_tenure_cost_at(chainstate.db(), &nakamoto_header.block_id()) @@ -1410,33 +1585,22 @@ pub fn test_load_store_update_nakamoto_blocks() { None ); - // can get block VRF proof for both nakamoto and epoch2 blocks - assert_eq!( - NakamotoChainState::get_block_vrf_proof(chainstate.db(), &nakamoto_header.consensus_hash) - .unwrap() - .unwrap(), - nakamoto_proof - ); - assert_eq!( - NakamotoChainState::get_block_vrf_proof(chainstate.db(), &epoch2_consensus_hash) - .unwrap() - .unwrap(), - epoch2_proof - ); - // can get nakamoto VRF proof only for nakamoto blocks assert_eq!( NakamotoChainState::get_nakamoto_tenure_vrf_proof( chainstate.db(), - &nakamoto_header.consensus_hash + &nakamoto_header.block_id(), ) .unwrap() .unwrap(), nakamoto_proof ); assert_eq!( - NakamotoChainState::get_nakamoto_tenure_vrf_proof(chainstate.db(), &epoch2_consensus_hash) - .unwrap(), + NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap(), None ); @@ -1445,16 +1609,8 @@ pub fn test_load_store_update_nakamoto_blocks() { { let (tx, staging_tx) = chainstate.headers_and_staging_tx_begin().unwrap(); let staging_conn = staging_tx.conn(); - let sh = MockSortitionHandle::new( - nakamoto_block_2.header.consensus_hash.clone(), - nakamoto_block_2.header.block_hash(), - nakamoto_block_2.header.chain_length, - ); - assert_eq!( - staging_conn.next_ready_nakamoto_block(&tx, &sh).unwrap(), - None - ); + assert_eq!(staging_conn.next_ready_nakamoto_block(&tx).unwrap(), None); // set parent epoch2 block processed staging_tx @@ -1462,10 +1618,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .unwrap(); // but it's not enough -- child's consensus hash needs to be burn_processable - assert_eq!( - staging_conn.next_ready_nakamoto_block(&tx, &sh).unwrap(), - None - ); + assert_eq!(staging_conn.next_ready_nakamoto_block(&tx).unwrap(), None); // set burn processed staging_tx @@ -1475,7 +1628,7 @@ pub fn test_load_store_update_nakamoto_blocks() { // this works now assert_eq!( staging_conn - .next_ready_nakamoto_block(&tx, &sh) + .next_ready_nakamoto_block(&tx) .unwrap() .unwrap() .0, @@ -1490,7 +1643,7 @@ pub fn test_load_store_update_nakamoto_blocks() { // next nakamoto block assert_eq!( staging_conn - .next_ready_nakamoto_block(&tx, &sh) + .next_ready_nakamoto_block(&tx) .unwrap() .unwrap() .0, @@ -1712,223 +1865,6 @@ fn test_nakamoto_block_static_verification() { .is_err()); } -/// Mock block arrivals -fn make_fork_run_with_arrivals( - sort_db: &mut SortitionDB, - start_snapshot: &BlockSnapshot, - length: u64, - bit_pattern: u8, -) -> Vec { - let mut last_snapshot = start_snapshot.clone(); - let mut new_snapshots = vec![]; - for i in last_snapshot.block_height..(last_snapshot.block_height + length) { - let snapshot = BlockSnapshot { - accumulated_coinbase_ustx: 0, - pox_valid: true, - block_height: last_snapshot.block_height + 1, - burn_header_timestamp: get_epoch_time_secs(), - burn_header_hash: BurnchainHeaderHash([(i as u8) | bit_pattern; 32]), - sortition_id: SortitionId([(i as u8) | bit_pattern; 32]), - parent_sortition_id: last_snapshot.sortition_id.clone(), - parent_burn_header_hash: last_snapshot.burn_header_hash.clone(), - consensus_hash: ConsensusHash([((i + 1) as u8) | bit_pattern; 20]), - ops_hash: OpsHash([(i as u8) | bit_pattern; 32]), - total_burn: 0, - sortition: true, - sortition_hash: SortitionHash([(i as u8) | bit_pattern; 32]), - winning_block_txid: Txid([(i as u8) | bit_pattern; 32]), - winning_stacks_block_hash: BlockHeaderHash([(i as u8) | bit_pattern; 32]), - index_root: TrieHash([0u8; 32]), - num_sortitions: last_snapshot.num_sortitions + 1, - stacks_block_accepted: false, - stacks_block_height: 0, - arrival_index: 0, - canonical_stacks_tip_height: last_snapshot.canonical_stacks_tip_height + 10, - canonical_stacks_tip_hash: BlockHeaderHash([((i + 1) as u8) | bit_pattern; 32]), - canonical_stacks_tip_consensus_hash: ConsensusHash([((i + 1) as u8) | bit_pattern; 20]), - miner_pk_hash: None, - }; - new_snapshots.push(snapshot.clone()); - { - let mut tx = SortitionHandleTx::begin(sort_db, &last_snapshot.sortition_id).unwrap(); - let _index_root = tx - .append_chain_tip_snapshot( - &last_snapshot, - &snapshot, - &vec![], - &vec![], - None, - None, - None, - ) - .unwrap(); - tx.test_update_canonical_stacks_tip( - &snapshot.sortition_id, - &snapshot.canonical_stacks_tip_consensus_hash, - &snapshot.canonical_stacks_tip_hash, - snapshot.canonical_stacks_tip_height, - ) - .unwrap(); - tx.commit().unwrap(); - } - last_snapshot = SortitionDB::get_block_snapshot(sort_db.conn(), &snapshot.sortition_id) - .unwrap() - .unwrap(); - } - new_snapshots -} - -/// Tests that getting the highest nakamoto tenure works in the presence of forks -#[test] -pub fn test_get_highest_nakamoto_tenure() { - let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); - let mut peer = boot_nakamoto( - function_name!(), - vec![], - &mut test_signers, - &test_stackers, - None, - ); - - // extract chainstate and sortdb -- we don't need the peer anymore - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let sort_db = peer.sortdb.as_mut().unwrap(); - - // seed a single fork of tenures - let last_snapshot = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - - // mock block arrivals - let snapshots = make_fork_run_with_arrivals(sort_db, &last_snapshot, 5, 0); - - let mut last_header: Option = None; - let mut last_tenure_change: Option = None; - let mut all_headers = vec![]; - let mut all_tenure_changes = vec![]; - for (i, sn) in snapshots.iter().enumerate() { - let block_header = NakamotoBlockHeader { - version: 0, - chain_length: sn.canonical_stacks_tip_height, - burn_spent: i as u64, - consensus_hash: sn.consensus_hash.clone(), - parent_block_id: last_header - .as_ref() - .map(|hdr| hdr.block_id()) - .unwrap_or(FIRST_STACKS_BLOCK_ID.clone()), - tx_merkle_root: Sha512Trunc256Sum([0x00; 32]), - state_index_root: TrieHash([0x00; 32]), - timestamp: get_epoch_time_secs(), - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::zeros(1).unwrap(), - }; - let tenure_change = TenureChangePayload { - tenure_consensus_hash: sn.consensus_hash.clone(), - prev_tenure_consensus_hash: last_tenure_change - .as_ref() - .map(|tc| tc.tenure_consensus_hash.clone()) - .unwrap_or(last_snapshot.consensus_hash.clone()), - burn_view_consensus_hash: sn.consensus_hash.clone(), - previous_tenure_end: block_header.block_id(), - previous_tenure_blocks: 10, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0x00; 20]), - }; - - let tx = chainstate.db_tx_begin().unwrap(); - NakamotoChainState::insert_nakamoto_tenure( - &tx, - &block_header, - 1 + i as u64, - 1 + i as u64, - &tenure_change, - ) - .unwrap(); - tx.commit().unwrap(); - - all_headers.push(block_header.clone()); - all_tenure_changes.push(tenure_change.clone()); - - last_header = Some(block_header); - last_tenure_change = Some(tenure_change); - } - - // highest tenure should be the last one we inserted - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let (stacks_ch, stacks_bhh, stacks_height) = - SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sort_db.conn()).unwrap(); - debug!("tip = {:?}", &tip); - debug!( - "stacks tip = {},{},{}", - &stacks_ch, &stacks_bhh, stacks_height - ); - let highest_tenure = NakamotoChainState::get_highest_nakamoto_tenure( - chainstate.db(), - &sort_db.index_handle_at_tip(), - ) - .unwrap() - .unwrap(); - - let last_tenure_change = last_tenure_change.unwrap(); - let last_header = last_header.unwrap(); - assert_eq!( - highest_tenure.tenure_id_consensus_hash, - last_tenure_change.tenure_consensus_hash - ); - assert_eq!( - highest_tenure.prev_tenure_id_consensus_hash, - last_tenure_change.prev_tenure_consensus_hash - ); - assert_eq!( - highest_tenure.burn_view_consensus_hash, - last_tenure_change.burn_view_consensus_hash - ); - assert_eq!(highest_tenure.cause, last_tenure_change.cause); - assert_eq!(highest_tenure.block_hash, last_header.block_hash()); - assert_eq!(highest_tenure.block_id, last_header.block_id()); - assert_eq!(highest_tenure.coinbase_height, 5); - assert_eq!(highest_tenure.tenure_index, 5); - assert_eq!(highest_tenure.num_blocks_confirmed, 10); - - // uh oh, a bitcoin fork! - let last_snapshot = snapshots[2].clone(); - let snapshots = make_fork_run(sort_db, &last_snapshot, 7, 0x80); - - let new_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - debug!("tip = {:?}", &new_tip); - debug!( - "stacks tip = {},{},{}", - &stacks_ch, &stacks_bhh, stacks_height - ); - - // new tip doesn't include the last two tenures - let highest_tenure = NakamotoChainState::get_highest_nakamoto_tenure( - chainstate.db(), - &sort_db.index_handle_at_tip(), - ) - .unwrap() - .unwrap(); - let last_tenure_change = &all_tenure_changes[2]; - let last_header = &all_headers[2]; - assert_eq!( - highest_tenure.tenure_id_consensus_hash, - last_tenure_change.tenure_consensus_hash - ); - assert_eq!( - highest_tenure.prev_tenure_id_consensus_hash, - last_tenure_change.prev_tenure_consensus_hash - ); - assert_eq!( - highest_tenure.burn_view_consensus_hash, - last_tenure_change.burn_view_consensus_hash - ); - assert_eq!(highest_tenure.cause, last_tenure_change.cause); - assert_eq!(highest_tenure.block_hash, last_header.block_hash()); - assert_eq!(highest_tenure.block_id, last_header.block_id()); - assert_eq!(highest_tenure.coinbase_height, 3); - assert_eq!(highest_tenure.tenure_index, 3); - assert_eq!(highest_tenure.num_blocks_confirmed, 10); -} - /// Test that we can generate a .miners stackerdb config. /// The config must be stable across sortitions -- if a miner is given slot i, then it continues /// to have slot i in subsequent sortitions. From 60c87648222d2f87038f3575e2cacc9436313841 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:06:45 -0400 Subject: [PATCH 0386/1400] feat: malleablize blocks when processing them in each TestPeer-driven test, so we can simulate the presence of multiple (!!) nakamoto forks per Bitcoin fork and verify that the node still behaves correctly. Also, add torture tests for MARF'ed methods, to be performed for each processed block. --- .../src/chainstate/nakamoto/tests/node.rs | 960 +++++++++++++++++- 1 file changed, 909 insertions(+), 51 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 1995bb2d0c..d1c294bf41 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -32,6 +32,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, }; use stacks_common::util::hash::Hash160; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; use wsts::curve::point::Point; @@ -53,6 +54,7 @@ use crate::chainstate::nakamoto::coordinator::{ get_nakamoto_next_recipients, load_nakamoto_reward_set, }; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; @@ -75,10 +77,16 @@ use crate::util_lib::db::Error as db_error; #[derive(Debug, Clone)] pub struct TestStacker { + /// Key used to send stacking transactions pub stacker_private_key: StacksPrivateKey, + /// Signer key for this stacker pub signer_private_key: StacksPrivateKey, + /// amount of uSTX stacked pub amount: u128, - pub pox_address: Option, + /// PoX address to stack to (defaults to a fixed PoX address if not given) + pub pox_addr: Option, + /// Maximum amount to stack (defaults to u128::MAX) + pub max_amount: Option, } impl TestStacker { @@ -92,7 +100,8 @@ impl TestStacker { stacker_private_key, signer_private_key, amount: 1_000_000_000_000_000_000, - pox_address: None, + pox_addr: None, + max_amount: None, } } @@ -112,13 +121,56 @@ impl TestStacker { signer_private_key: signing_key.clone(), stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), amount: Self::DEFAULT_STACKER_AMOUNT, - pox_address: None, + pox_addr: None, + max_amount: None, }) .collect::>(); let test_signers = TestSigners::new(vec![signing_key]); (test_signers, stackers) } + + /// make a set of stackers who will share a set of keys and stack with + /// `Self::DEFAULT_STACKER_AMOUNT` + /// + /// `key_distribution.len()` stackers will be created + /// `key_distribution[i]` is the ID of key that the ith stacker will use. + /// The ID is opaque -- it's used as a seed to generate the key. + /// Each set of stackers with the same key ID will be given its own PoX address + pub fn multi_signing_set(key_distribution: &[u8]) -> (TestSigners, Vec) { + let stackers = key_distribution + .iter() + .enumerate() + .map(|(index, key_seed)| { + let signing_key = StacksPrivateKey::from_seed(&[*key_seed]); + let pox_key = StacksPrivateKey::from_seed(&[*key_seed, *key_seed]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&pox_key)); + let pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); + + TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: Self::DEFAULT_STACKER_AMOUNT, + pox_addr: Some(pox_addr), + max_amount: Some(u128::MAX - u128::try_from(index).unwrap()), + } + }) + .collect::>(); + + // N.B. the .to_hex() is needed because Secp256k1PrivateKey does not implement Hash + let unique_signers: HashSet<_> = stackers + .iter() + .map(|st| st.signer_private_key.to_hex()) + .collect(); + let test_signers = TestSigners::new( + unique_signers + .into_iter() + .map(|sk_hex| Secp256k1PrivateKey::from_hex(&sk_hex).unwrap()) + .collect(), + ); + (test_signers, stackers) + } } impl TestBurnchainBlock { @@ -294,7 +346,8 @@ impl TestStacksNode { .unwrap() .unwrap(); let vrf_proof = NakamotoChainState::get_block_vrf_proof( - self.chainstate.db(), + &mut self.chainstate.index_conn(), + &parent_block.index_block_hash(), &parent_block.consensus_hash, ) .unwrap() @@ -458,13 +511,22 @@ impl TestStacksNode { // building atop nakamoto let tenure_len = NakamotoChainState::get_nakamoto_tenure_length( self.chainstate.db(), - &hdr.consensus_hash, + &hdr.index_block_hash(), ) .unwrap(); - debug!("Tenure length of {} is {}", &hdr.consensus_hash, tenure_len); + debug!( + "Tenure length of Nakamoto tenure {} is {}; tipped at {}", + &hdr.consensus_hash, + tenure_len, + &hdr.index_block_hash() + ); (hdr.index_block_hash(), hdr.consensus_hash, tenure_len) } else { // building atop epoch2 + debug!( + "Tenure length of epoch2 tenure {} is {}; tipped at {}", + &parent_block_snapshot.consensus_hash, 1, &last_tenure_id + ); ( last_tenure_id, parent_block_snapshot.consensus_hash.clone(), @@ -483,6 +545,8 @@ impl TestStacksNode { pubkey_hash: miner.nakamoto_miner_hash160(), }; + test_debug!("TenureChangePayload: {:?}", &tenure_change_payload); + let block_commit_op = self.make_nakamoto_tenure_commitment( sortdb, burn_block, @@ -505,9 +569,15 @@ impl TestStacksNode { /// /// The first block will contain a coinbase, if coinbase is Some(..) /// Process the blocks via the chains coordinator as we produce them. + /// + /// Returns a list of + /// * the block + /// * its size + /// * its execution cost + /// * a list of malleablized blocks with the same sighash pub fn make_nakamoto_tenure_blocks<'a, S, F, G>( chainstate: &mut StacksChainState, - sortdb: &SortitionDB, + sortdb: &mut SortitionDB, miner: &mut TestMiner, signers: &mut TestSigners, tenure_id_consensus_hash: &ConsensusHash, @@ -525,7 +595,7 @@ impl TestStacksNode { mut miner_setup: S, mut block_builder: F, mut after_block: G, - ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + ) -> Vec<(NakamotoBlock, u64, ExecutionCost, Vec)> where S: FnMut(&mut NakamotoBlockBuilder), F: FnMut( @@ -537,6 +607,7 @@ impl TestStacksNode { G: FnMut(&mut NakamotoBlock) -> bool, { let mut blocks = vec![]; + let mut all_malleablized_blocks = vec![]; let mut block_count = 0; loop { let mut txs = vec![]; @@ -553,13 +624,61 @@ impl TestStacksNode { break; } - let parent_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap(); + // there may be a tenure-extend here. Go find it if so + let mut parent_id_opt = None; + for tx in txs.iter() { + if let TransactionPayload::TenureChange(payload) = &tx.payload { + parent_id_opt = Some(payload.previous_tenure_end.clone()); + } + } + + let parent_tip_opt = if let Some(parent_id) = parent_id_opt { + if let Some(nakamoto_parent) = + NakamotoChainState::get_block_header(chainstate.db(), &parent_id).unwrap() + { + debug!( + "Use parent tip identified by produced TenureChange ({})", + &parent_id + ); + Some(nakamoto_parent) + } else { + warn!("Produced Tenure change transaction does not point to a real block"); + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + } + } else if let Some(tenure_change) = tenure_change.as_ref() { + // make sure parent tip is consistent with a tenure change + if let TransactionPayload::TenureChange(payload) = &tenure_change.payload { + if let Some(nakamoto_parent) = NakamotoChainState::get_block_header( + chainstate.db(), + &payload.previous_tenure_end, + ) + .unwrap() + { + debug!( + "Use parent tip identified by given TenureChange ({})", + &payload.previous_tenure_end + ); + Some(nakamoto_parent) + } else { + debug!("Use parent tip identified by canonical tip pointer (no parent block {})", &payload.previous_tenure_end); + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + } + } else { + panic!("Tenure change transaction does not have a TenureChange payload"); + } + } else { + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap() + }; + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); debug!( - "Build Nakamoto block in tenure {} sortition {}", - &tenure_id_consensus_hash, &burn_tip.consensus_hash + "Build Nakamoto block in tenure {} sortition {} parent_tip {:?}", + &tenure_id_consensus_hash, + &burn_tip.consensus_hash, + &parent_tip_opt.clone().map(|blk| blk.index_block_hash()) ); // make a block @@ -621,6 +740,7 @@ impl TestStacksNode { &sort_tip_sn.sortition_id, &miner.burnchain, chainstate, + &nakamoto_block.header.parent_block_id, sortdb, &OnChainRewardSetProvider::new(), ) @@ -658,46 +778,106 @@ impl TestStacksNode { let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); - info!("Processing the new nakamoto block"); - let accepted = match Relayer::process_new_nakamoto_block( - &miner.burnchain, - sortdb, - &mut sort_handle, - chainstate, - &nakamoto_block, - None, - ) { - Ok(accepted) => accepted, - Err(e) => { - error!( - "Failed to process nakamoto block: {:?}\n{:?}", - &e, &nakamoto_block + let stacks_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); + + let mut block_to_store = nakamoto_block.clone(); + let mut processed_blocks = vec![]; + let mut malleablized_blocks = vec![]; + loop { + // don't process if we don't have enough signatures + if let Err(e) = block_to_store.header.verify_signer_signatures(&reward_set) { + info!( + "Will stop processing malleablized blocks for {}: {:?}", + &block_id, &e ); - panic!(); + break; + } + if block_to_store.block_id() == block_id { + info!("Processing the new nakamoto block {}", &block_id); + } else { + info!( + "Processing the new malleablized nakamoto block {}, original is {}", + &block_to_store.block_id(), + &block_id + ); + malleablized_blocks.push(block_to_store.clone()); } - }; - if accepted { - test_debug!("Accepted Nakamoto block {}", &block_id); - coord.handle_new_nakamoto_stacks_block().unwrap(); - // confirm that the chain tip advanced - let stacks_chain_tip = - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + let accepted = match Relayer::process_new_nakamoto_block( + &miner.burnchain, + sortdb, + &mut sort_handle, + chainstate, + &stacks_tip, + &block_to_store, + None, + NakamotoBlockObtainMethod::Pushed, + ) { + Ok(accepted) => accepted, + Err(e) => { + error!( + "Failed to process nakamoto block: {:?}\n{:?}", + &e, &nakamoto_block + ); + panic!(); + } + }; + if accepted { + test_debug!("Accepted Nakamoto block {}", &block_to_store.block_id()); + coord.handle_new_nakamoto_stacks_block().unwrap(); + processed_blocks.push(block_to_store.clone()); + + if block_to_store.block_id() == block_id { + // confirm that the chain tip advanced + let stacks_chain_tip = NakamotoChainState::get_canonical_block_header( + chainstate.db(), + &sortdb, + ) .unwrap() .unwrap(); - let nakamoto_chain_tip = stacks_chain_tip - .anchored_header - .as_stacks_nakamoto() - .expect("FATAL: chain tip is not a Nakamoto block"); - assert_eq!(nakamoto_chain_tip, &nakamoto_block.header); - } else { - test_debug!("Did NOT accept Nakamoto block {}", &block_id); + let nakamoto_chain_tip = stacks_chain_tip + .anchored_header + .as_stacks_nakamoto() + .expect("FATAL: chain tip is not a Nakamoto block"); + assert_eq!(nakamoto_chain_tip, &nakamoto_block.header); + } + } else { + test_debug!( + "Did NOT accept Nakamoto block {}", + &block_to_store.block_id() + ); + break; + } + + let num_sigs = block_to_store.header.signer_signature.len(); + + // force this block to have a different sighash, in addition to different + // signatures, so that both blocks are valid at a consensus level + block_to_store.header.version += 1; + block_to_store.header.signer_signature.clear(); + + miner.sign_nakamoto_block(&mut block_to_store); + signers.sign_block_with_reward_set(&mut block_to_store, &reward_set); + + while block_to_store.header.signer_signature.len() >= num_sigs { + block_to_store.header.signer_signature.pop(); + } } + for processed_block in processed_blocks { + debug!("Begin check Nakamoto block {}", &processed_block.block_id()); + TestPeer::check_processed_nakamoto_block(sortdb, chainstate, &processed_block); + debug!("End check Nakamoto block {}", &processed_block.block_id()); + } blocks.push((nakamoto_block, size, cost)); + all_malleablized_blocks.push(malleablized_blocks); block_count += 1; } blocks + .into_iter() + .zip(all_malleablized_blocks.into_iter()) + .map(|((blk, sz, cost), mals)| (blk, sz, cost, mals)) + .collect() } pub fn make_nakamoto_block_from_txs( @@ -968,6 +1148,7 @@ impl<'a> TestPeer<'a> { &tip, &mut sortdb, &mut stacks_node.chainstate, + &tenure_change_payload.previous_tenure_end, &self.config.burnchain, ) { Ok(recipients) => { @@ -1046,13 +1227,16 @@ impl<'a> TestPeer<'a> { pub fn try_process_block(&mut self, block: &NakamotoBlock) -> Result { let mut sort_handle = self.sortdb.as_ref().unwrap().index_handle_at_tip(); + let stacks_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); let accepted = Relayer::process_new_nakamoto_block( &self.config.burnchain, self.sortdb.as_ref().unwrap(), &mut sort_handle, &mut self.stacks_node.as_mut().unwrap().chainstate, + &stacks_tip, block, None, + NakamotoBlockObtainMethod::Pushed, )?; if !accepted { return Ok(false); @@ -1131,14 +1315,14 @@ impl<'a> TestPeer<'a> { { let cycle = self.get_reward_cycle(); let mut stacks_node = self.stacks_node.take().unwrap(); - let sortdb = self.sortdb.take().unwrap(); + let mut sortdb = self.sortdb.take().unwrap(); // Ensure the signers are setup for the current cycle signers.generate_aggregate_key(cycle); let blocks = TestStacksNode::make_nakamoto_tenure_blocks( &mut stacks_node.chainstate, - &sortdb, + &mut sortdb, &mut self.miner, signers, &tenure_change @@ -1157,14 +1341,30 @@ impl<'a> TestPeer<'a> { let just_blocks = blocks .clone() .into_iter() - .map(|(block, _, _)| block) + .map(|(block, _, _, _)| block) .collect(); + stacks_node.add_nakamoto_tenure_blocks(just_blocks); + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .map(|(_, _, _, malleablized)| malleablized) + .flatten() + .collect(); + + self.malleablized_blocks.append(&mut malleablized_blocks); + + let block_data = blocks + .clone() + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); + self.stacks_node = Some(stacks_node); self.sortdb = Some(sortdb); - blocks + block_data } /// Produce and process a Nakamoto tenure extension. @@ -1187,7 +1387,7 @@ impl<'a> TestPeer<'a> { ) -> Vec, { let mut stacks_node = self.stacks_node.take().unwrap(); - let sortdb = self.sortdb.take().unwrap(); + let mut sortdb = self.sortdb.take().unwrap(); let tenure_extend_payload = if let TransactionPayload::TenureChange(ref tc) = &tenure_extend_tx.payload { @@ -1212,7 +1412,7 @@ impl<'a> TestPeer<'a> { let blocks = TestStacksNode::make_nakamoto_tenure_blocks( &mut stacks_node.chainstate, - &sortdb, + &mut sortdb, &mut self.miner, signers, &tenure_extend_tx @@ -1231,28 +1431,44 @@ impl<'a> TestPeer<'a> { let just_blocks = blocks .clone() .into_iter() - .map(|(block, _, _)| block) + .map(|(block, _, _, _)| block) .collect(); + stacks_node.add_nakamoto_extended_blocks(just_blocks); + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .map(|(_, _, _, malleablized)| malleablized) + .flatten() + .collect(); + + self.malleablized_blocks.append(&mut malleablized_blocks); + + let block_data = blocks + .clone() + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); + self.stacks_node = Some(stacks_node); self.sortdb = Some(sortdb); - blocks + block_data } /// Accept a new Nakamoto tenure via the relayer, and then try to process them. pub fn process_nakamoto_tenure(&mut self, blocks: Vec) { debug!("Peer will process {} Nakamoto blocks", blocks.len()); - let sortdb = self.sortdb.take().unwrap(); + let mut sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); let tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); - let mut sort_handle = sortdb.index_handle(&tip); node.add_nakamoto_tenure_blocks(blocks.clone()); for block in blocks.into_iter() { + let mut sort_handle = sortdb.index_handle(&tip); let block_id = block.block_id(); debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); let accepted = Relayer::process_new_nakamoto_block( @@ -1260,13 +1476,19 @@ impl<'a> TestPeer<'a> { &sortdb, &mut sort_handle, &mut node.chainstate, + &self.network.stacks_tip.block_id(), &block, None, + NakamotoBlockObtainMethod::Pushed, ) .unwrap(); if accepted { test_debug!("Accepted Nakamoto block {}", &block_id); self.coord.handle_new_nakamoto_stacks_block().unwrap(); + + debug!("Begin check Nakamoto block {}", &block.block_id()); + TestPeer::check_processed_nakamoto_block(&mut sortdb, &mut node.chainstate, &block); + debug!("Eegin check Nakamoto block {}", &block.block_id()); } else { test_debug!("Did NOT accept Nakamoto block {}", &block_id); } @@ -1275,4 +1497,640 @@ impl<'a> TestPeer<'a> { self.sortdb = Some(sortdb); self.stacks_node = Some(node); } + + /// Get the tenure-start block of the parent tenure of `tenure_id_consensus_hash` + fn get_parent_tenure_start_header( + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip_block_id: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> StacksHeaderInfo { + let Ok(Some(tenure_start_header)) = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &tip_block_id, + tenure_id_consensus_hash, + ) else { + panic!( + "No tenure-start block header for tenure {}", + tenure_id_consensus_hash + ); + }; + + let Ok(Some((tenure_start_block, _))) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&tenure_start_header.index_block_hash()) + else { + panic!( + "Unable to load tenure-start block {}", + &tenure_start_header.index_block_hash() + ); + }; + + let Some(tenure_start_tx) = tenure_start_block.get_tenure_change_tx_payload() else { + panic!( + "Tenure-start block {} has no tenure-change tx", + &tenure_start_header.index_block_hash() + ); + }; + + let prev_tenure_consensus_hash = &tenure_start_tx.prev_tenure_consensus_hash; + + // get the tenure-start block of the last tenure + let Ok(Some(prev_tenure_start_header)) = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &tip_block_id, + prev_tenure_consensus_hash, + ) else { + panic!( + "No tenure-start block header for tenure {}", + tenure_id_consensus_hash + ); + }; + + prev_tenure_start_header + } + + /// Get the block-commit for a tenure. It corresponds to the tenure-start block of + /// its parent tenure. + fn get_tenure_block_commit( + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip_block_id: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> LeaderBlockCommitOp { + let prev_tenure_start_header = Self::get_parent_tenure_start_header( + sortdb, + chainstate, + tip_block_id, + tenure_id_consensus_hash, + ); + let block_hash = BlockHeaderHash(prev_tenure_start_header.index_block_hash().0); + let Ok(Some(block_commit)) = SortitionDB::get_block_commit_for_stacks_block( + sortdb.conn(), + tenure_id_consensus_hash, + &block_hash, + ) else { + panic!( + "No block-commit for tenure {}: parent tenure-start was {} {:?}", + tenure_id_consensus_hash, + &prev_tenure_start_header.index_block_hash(), + &prev_tenure_start_header + ); + }; + block_commit + } + + /// Load up all blocks from the given block back to the last tenure-change block-found tx + fn load_nakamoto_tenure( + chainstate: &StacksChainState, + tip_block_id: &StacksBlockId, + ) -> Vec { + // count up the number of blocks between `tip_block_id` and its ancestral tenure-change + let mut ancestors = vec![]; + let mut cursor = tip_block_id.clone(); + loop { + let block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&cursor) + .unwrap() + .unwrap() + .0; + cursor = block.header.parent_block_id.clone(); + let is_tenure_change = block.get_tenure_change_tx_payload().is_some(); + ancestors.push(block); + + if is_tenure_change { + break; + } + } + ancestors + } + + /// Check various properties of the chainstate regarding this nakamoto block. + /// Tests: + /// * get_coinbase_height + /// * get_tenure_start_block_header + /// * get_nakamoto_tenure_start_block_header + /// * get_block_vrf_proof + /// * get_nakamoto_tenure_vrf_proof + /// * get_parent_vrf_proof + /// * validate_vrf_seed + /// * check_block_commit_vrf_seed + /// * get_nakamoto_parent_tenure_id_consensus_hash + /// * get_ongoing_tenure + /// * get_block_found_tenure + /// * get_nakamoto_tenure_length + /// * has_processed_nakamoto_tenure + /// * check_nakamoto_tenure + /// * check_tenure_continuity + pub fn check_processed_nakamoto_block( + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + block: &NakamotoBlock, + ) { + let Ok(Some(parent_block_header)) = + NakamotoChainState::get_block_header(chainstate.db(), &block.header.parent_block_id) + else { + panic!("No parent block for {:?}", &block); + }; + + // get_coinbase_height + // Verify that it only increases if the given block has a tenure-change block-found + // transaction + let block_coinbase_height = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &block.block_id(), + ) + .unwrap() + .unwrap(); + let parent_coinbase_height = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &block.header.parent_block_id, + ) + .unwrap() + .unwrap(); + + if let Some(tenure_tx) = block.get_tenure_change_tx_payload() { + // crosses a tenure block-found boundary + assert_eq!(parent_coinbase_height + 1, block_coinbase_height); + } else { + assert_eq!(parent_coinbase_height, block_coinbase_height); + } + + // get_tenure_start_block_header + // Verify that each Nakamoto block's tenure-start header is defined + let Ok(Some(tenure_start_header)) = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash, + ) else { + panic!("No tenure-start block header for {:?}", &block); + }; + + // get_nakamoto_tenure_start_block_header + // Verify that if this tenure_start_header is a Nakamoto block, then we can load it. + // Otherwise, we shouldn't be able to load it + if tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + assert_eq!( + tenure_start_header, + NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .is_none()); + } + + // only blocks with a tenure-change block-found transaction are tenure-start blocks + if block.get_tenure_change_tx_payload().is_some() { + assert_eq!( + &block.header, + tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .unwrap() + ); + } else { + assert_ne!( + &block.header, + tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .unwrap() + ); + } + + // get_block_vrf_proof + // Verify that a VRF proof is defined for each tenure + let Ok(Some(vrf_proof)) = NakamotoChainState::get_block_vrf_proof( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash, + ) else { + panic!( + "No VRF proof defined for tenure {}", + &block.header.consensus_hash + ); + }; + + // get_nakamoto_tenure_vrf_proof + // if this is the tenure-start block, then the block VRF proof must be the VRF proof stored in the headers + // DB fo it. Otherwise, there must not be a VRF proof for this block. + if block.get_tenure_change_tx_payload().is_some() { + let Ok(Some(block_vrf_proof)) = NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &block.block_id(), + ) else { + panic!( + "No VRF proof stored for tenure-start block {}: {:?}", + &block.block_id(), + &block + ); + }; + assert_eq!(block_vrf_proof, vrf_proof); + } else { + // this block has no VRF proof defined + assert!(NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &block.block_id() + ) + .unwrap() + .is_none()); + } + + // get_parent_vrf_proof + // The parent VRF proof needs to be the same as the VRF proof for the parent tenure + let parent_tenure_start = Self::get_parent_tenure_start_header( + sortdb, + chainstate, + &block.block_id(), + &block.header.consensus_hash, + ); + let tenure_block_commit = Self::get_tenure_block_commit( + sortdb, + chainstate, + &block.block_id(), + &block.header.consensus_hash, + ); + let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( + &mut chainstate.index_conn(), + &block.block_id(), + &sortdb.conn(), + &block.header.consensus_hash, + &tenure_block_commit.txid, + ) + .unwrap(); + + if let Ok(Some(expected_parent_vrf_proof)) = + NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &parent_tenure_start.index_block_hash(), + ) + { + assert_eq!(expected_parent_vrf_proof, parent_vrf_proof); + } else if parent_tenure_start + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + panic!( + "No VRF proof stored for parent Nakamoto tenure-start block {}: {:?}", + &parent_tenure_start.index_block_hash(), + &parent_tenure_start + ); + }; + + // get_nakamoto_parent_tenure_id_consensus_hash + // The parent tenure start header must have the parent tenure's consensus hash. + assert_eq!( + NakamotoChainState::get_nakamoto_parent_tenure_id_consensus_hash( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap(), + parent_tenure_start.consensus_hash + ); + + // get_ongoing_tenure + // changes when we cross _any_ boundary + if let Some(tenure_tx) = block.get_tenure_tx_payload() { + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + // crosses a tenure block-found or extend boundary + assert_ne!( + NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &block.block_id() + ) + .unwrap() + .unwrap(), + NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &parent_block_header.index_block_hash() + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &parent_block_header.index_block_hash() + ) + .unwrap() + .is_none()); + } + } else { + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + assert_eq!( + NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &block.block_id() + ) + .unwrap() + .unwrap(), + NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &parent_block_header.index_block_hash() + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &parent_block_header.index_block_hash() + ) + .unwrap() + .is_none()); + } + } + + // get_block_found_tenure + // changes when we cross a tenure-change block-found boundary + if let Some(tenure_tx) = block.get_tenure_change_tx_payload() { + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + // crosses a tenure block-found or extend boundary + assert_ne!( + NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap(), + NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_block_header.consensus_hash + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_block_header.consensus_hash + ) + .unwrap() + .is_none()); + } + } else { + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + assert_eq!( + NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap(), + NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_block_header.consensus_hash + ) + .unwrap() + .unwrap() + ); + } else { + assert!(NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_block_header.consensus_hash + ) + .unwrap() + .is_none()); + } + } + + // get_nakamoto_tenure_length + // compare the DB to the block's ancestors + let ancestors = Self::load_nakamoto_tenure(chainstate, &block.block_id()); + assert!(ancestors.len() > 0); + assert_eq!( + ancestors.len(), + NakamotoChainState::get_nakamoto_tenure_length(chainstate.db(), &block.block_id()) + .unwrap() as usize + ); + + // has_processed_nakamoto_tenure + // this tenure is unprocessed as of this block. + // the parent tenure is already processed. + assert!(!NakamotoChainState::has_processed_nakamoto_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap()); + if parent_tenure_start + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + // MARF stores parent tenure info for Nakamoto + assert!(NakamotoChainState::has_processed_nakamoto_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_tenure_start.consensus_hash + ) + .unwrap()); + } else { + // MARF does NOT store parent tenure info for epoch2 + assert!(!NakamotoChainState::has_processed_nakamoto_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &parent_tenure_start.consensus_hash + ) + .unwrap()); + } + + // validate_vrf_seed + // Check against the tenure block-commit + assert!(block + .validate_vrf_seed( + sortdb.conn(), + &mut chainstate.index_conn(), + &tenure_block_commit + ) + .is_ok()); + let mut bad_commit = tenure_block_commit.clone(); + bad_commit.new_seed = VRFSeed([0xff; 32]); + assert!(block + .validate_vrf_seed(sortdb.conn(), &mut chainstate.index_conn(), &bad_commit) + .is_err()); + + // check_block_commit_vrf_seed + assert!(NakamotoChainState::check_block_commit_vrf_seed( + &mut chainstate.index_conn(), + sortdb.conn(), + &block + ) + .is_ok()); + + if let Some(tenure_tx) = block.get_tenure_tx_payload() { + if let Some(expected_tenure) = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &block.header.parent_block_id, + ) + .unwrap() + { + // this block connects to its parent's tenure + assert_eq!( + expected_tenure, + NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + tenure_tx + ) + .unwrap() + .unwrap() + ); + } else { + // this block connects to the last epoch 2.x tenure + assert_eq!( + NakamotoChainState::check_first_nakamoto_tenure_change( + chainstate.db(), + tenure_tx + ) + .unwrap() + .unwrap(), + NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + tenure_tx + ) + .unwrap() + .unwrap() + ); + } + + if tenure_tx.cause == TenureChangeCause::BlockFound { + // block-founds are always in new tenures + assert!(!NakamotoChainState::check_tenure_continuity( + &mut chainstate.index_conn(), + &parent_block_header.consensus_hash, + &block.header + ) + .unwrap()); + } else { + // extends are in the same tenure as their parents + assert!(NakamotoChainState::check_tenure_continuity( + &mut chainstate.index_conn(), + &parent_block_header.consensus_hash, + &block.header + ) + .unwrap()); + } + + // get a valid but too-old consensus hash + let prev_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tx.prev_tenure_consensus_hash, + ) + .unwrap() + .unwrap(); + let invalid_tenure_sn = + SortitionDB::get_block_snapshot(sortdb.conn(), &prev_tenure_sn.parent_sortition_id) + .unwrap() + .unwrap(); + + // this fails if we change any tenure-identifying fields + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.tenure_consensus_hash = invalid_tenure_sn.consensus_hash.clone(); + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.prev_tenure_consensus_hash = invalid_tenure_sn.consensus_hash.clone(); + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.burn_view_consensus_hash = invalid_tenure_sn.consensus_hash.clone(); + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.previous_tenure_end = + StacksBlockId(prev_tenure_sn.winning_stacks_block_hash.clone().0); + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + + let mut bad_tenure_tx = tenure_tx.clone(); + bad_tenure_tx.previous_tenure_blocks = u32::MAX; + assert!(NakamotoChainState::check_nakamoto_tenure( + &mut chainstate.index_conn(), + &mut sortdb.index_handle_at_tip(), + &block.header, + &bad_tenure_tx + ) + .unwrap() + .is_none()); + } else { + assert!(NakamotoChainState::check_tenure_continuity( + &mut chainstate.index_conn(), + &parent_block_header.consensus_hash, + &block.header + ) + .unwrap()); + } + } } From 7ce5771b1b57075e594d06316dec91613cece75c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:07:37 -0400 Subject: [PATCH 0387/1400] chore: API sync --- stackslib/src/chainstate/stacks/boot/mod.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 1be4fa9385..18706743e9 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -51,7 +51,7 @@ use crate::burnchains::bitcoin::address::BitcoinAddress; use crate::burnchains::{Address, Burnchain, PoxConstants}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; -use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::db::{StacksChainState, StacksDBConn}; use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::Error; use crate::clarity_vm::clarity::{ClarityConnection, ClarityTransactionConnection}; @@ -597,11 +597,12 @@ impl StacksChainState { code: &str, ) -> Result { let iconn = sortdb.index_handle_at_block(self, stacks_block_id)?; - let dbconn = self.state_index.sqlite_conn(); + let ro_index = self.state_index.reopen_readonly()?; + let headers_db = HeadersDBConn(StacksDBConn::new(&ro_index, ())); self.clarity_state .eval_read_only( &stacks_block_id, - &HeadersDBConn(dbconn), + &headers_db, &iconn, &boot::boot_code_id(boot_contract_name, self.mainnet), code, From c09c87e501398330050443f01258763eca6285d2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:07:47 -0400 Subject: [PATCH 0388/1400] chore: API sync --- stackslib/src/chainstate/stacks/boot/pox_2_tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index b5f1859355..7ae25d00f6 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -53,7 +53,7 @@ use crate::chainstate::stacks::boot::{ POX_3_NAME, }; use crate::chainstate::stacks::db::{ - MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, + MinerPaymentSchedule, StacksChainState, StacksDBConn, StacksHeaderInfo, MINER_REWARD_MATURITY, }; use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::index::marf::MarfConnection; @@ -666,7 +666,7 @@ where F: FnOnce(&mut ClarityDatabase) -> R, { with_sortdb(peer, |ref mut c, ref sortdb| { - let headers_db = HeadersDBConn(c.state_index.sqlite_conn()); + let headers_db = HeadersDBConn(StacksDBConn::new(&c.state_index, ())); let burn_db = sortdb.index_handle_at_tip(); let mut read_only_clar = c .clarity_state From fc126adf2d89abb5c14d004e981f1bfbeebfe2ec Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:07:57 -0400 Subject: [PATCH 0389/1400] chore: API sync --- stackslib/src/chainstate/stacks/db/accounts.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index affc0bb9d8..00e1764818 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -1131,7 +1131,7 @@ mod test { block_reward.block_hash = new_tip.anchored_header.block_hash(); block_reward.consensus_hash = new_tip.consensus_hash.clone(); - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let tip = StacksChainState::advance_tip( &mut tx, parent_header_info @@ -1187,7 +1187,7 @@ mod test { ); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let ancestor_0 = StacksChainState::get_tip_ancestor( &mut tx, &StacksHeaderInfo::regtest_genesis(), @@ -1204,7 +1204,7 @@ mod test { ); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let ancestor_0 = StacksChainState::get_tip_ancestor(&mut tx, &parent_tip, 0).unwrap(); let ancestor_1 = StacksChainState::get_tip_ancestor(&mut tx, &parent_tip, 1).unwrap(); @@ -1217,7 +1217,7 @@ mod test { let tip = advance_tip(&mut chainstate, &parent_tip, &mut tip_reward); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let ancestor_2 = StacksChainState::get_tip_ancestor(&mut tx, &tip, 2).unwrap(); let ancestor_1 = StacksChainState::get_tip_ancestor(&mut tx, &tip, 1).unwrap(); let ancestor_0 = StacksChainState::get_tip_ancestor(&mut tx, &tip, 0).unwrap(); @@ -1263,7 +1263,7 @@ mod test { let tip = advance_tip(&mut chainstate, &parent_tip, &mut tip_reward); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let payments_0 = StacksChainState::get_scheduled_block_rewards_in_fork_at_height(&mut tx, &tip, 0) .unwrap(); @@ -1313,7 +1313,7 @@ mod test { let tip = advance_tip(&mut chainstate, &parent_tip, &mut tip_reward); { - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); let payments_0 = StacksChainState::get_scheduled_block_rewards_in_fork_at_height(&mut tx, &tip, 0) .unwrap(); From 194ae0435b8c7285e02b16cbf5192a669e88add9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:08:10 -0400 Subject: [PATCH 0390/1400] chore: API sync and DB migrations --- stackslib/src/chainstate/stacks/db/mod.rs | 46 +++++++++++++++++------ 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index d9b6d47775..91db57905d 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -54,6 +54,7 @@ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, + NAKAMOTO_CHAINSTATE_SCHEMA_3, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -318,10 +319,16 @@ impl DBConfig { self.version == "3" || self.version == "4" || self.version == "5" } StacksEpochId::Epoch25 => { - self.version == "3" || self.version == "4" || self.version == "5" + self.version == "3" + || self.version == "4" + || self.version == "5" + || self.version == "6" } StacksEpochId::Epoch30 => { - self.version == "3" || self.version == "4" || self.version == "5" + self.version == "3" + || self.version == "4" + || self.version == "5" + || self.version == "6" } } } @@ -370,6 +377,16 @@ impl StacksBlockHeaderTypes { _ => None, } } + + /// Get the sighash of a block. + /// * In Nakamoto blocks, this is the hash signers must sign + /// * In epoch 2, this is simply the blocok hash (there are no signers) + pub fn sighash(&self) -> Sha512Trunc256Sum { + match &self { + StacksBlockHeaderTypes::Nakamoto(ref x) => x.signer_signature_hash(), + StacksBlockHeaderTypes::Epoch2(ref x) => Sha512Trunc256Sum(x.block_hash().0), + } + } } impl StacksHeaderInfo { @@ -696,7 +713,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "5"; +pub const CHAINSTATE_VERSION: &'static str = "6"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1114,6 +1131,13 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "5" => { + // migrate to nakamoto 3 + info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", @@ -1662,7 +1686,7 @@ impl StacksChainState { { // add a block header entry for the boot code - let mut tx = chainstate.index_tx_begin()?; + let mut tx = chainstate.index_tx_begin(); let parent_hash = StacksBlockId::sentinel(); let first_index_hash = StacksBlockHeader::make_index_block_hash( &FIRST_BURNCHAIN_CONSENSUS_HASH, @@ -1881,12 +1905,12 @@ impl StacksChainState { /// Begin a transaction against the (indexed) stacks chainstate DB. /// Does not create a Clarity instance. - pub fn index_tx_begin<'a>(&'a mut self) -> Result, Error> { - Ok(StacksDBTx::new(&mut self.state_index, ())) + pub fn index_tx_begin<'a>(&'a mut self) -> StacksDBTx<'a> { + StacksDBTx::new(&mut self.state_index, ()) } - pub fn index_conn<'a>(&'a self) -> Result, Error> { - Ok(StacksDBConn::new(&self.state_index, ())) + pub fn index_conn<'a>(&'a self) -> StacksDBConn<'a> { + StacksDBConn::new(&self.state_index, ()) } /// Begin a transaction against the underlying DB @@ -1922,7 +1946,7 @@ impl StacksChainState { ) -> Value { let result = self.clarity_state.eval_read_only( parent_id_bhh, - &HeadersDBConn(self.state_index.sqlite_conn()), + &HeadersDBConn(StacksDBConn::new(&self.state_index, ())), burn_dbconn, contract, code, @@ -1941,7 +1965,7 @@ impl StacksChainState { ) -> Result { self.clarity_state.eval_read_only( parent_id_bhh, - &HeadersDBConn(self.state_index.sqlite_conn()), + &HeadersDBConn(StacksDBConn::new(&self.state_index, ())), burn_dbconn, contract, code, @@ -1960,7 +1984,7 @@ impl StacksChainState { function: &str, args: &[Value], ) -> Result { - let headers_db = HeadersDBConn(self.state_index.sqlite_conn()); + let headers_db = HeadersDBConn(StacksDBConn::new(&self.state_index, ())); let mut conn = self.clarity_state.read_only_connection_checked( parent_id_bhh, &headers_db, From 6e4996358332a3574669a97feab5bc4fdabdfa94 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:08:21 -0400 Subject: [PATCH 0391/1400] chore: API sync --- stackslib/src/chainstate/stacks/db/unconfirmed.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index f986503ec6..52afaceb66 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -232,7 +232,8 @@ impl UnconfirmedState { mblocks.len() ); - let headers_db = HeadersDBConn(chainstate.db()); + let stacks_dbconn = chainstate.index_conn(); + let headers_db = HeadersDBConn(stacks_dbconn); let burn_block_hash = headers_db .get_burn_header_hash_for_block(&self.confirmed_chain_tip) .expect("BUG: unable to get burn block hash based on chain tip"); @@ -260,14 +261,13 @@ impl UnconfirmedState { if mblocks.len() > 0 { let cur_cost = self.cost_so_far.clone(); - let headers_db_conn = HeadersDBConn(chainstate.db()); // NOTE: we *must* commit the clarity_tx now that it's begun. // Otherwise, microblock miners can leave the MARF in a partially-initialized state, // leading to a node crash. let mut clarity_tx = StacksChainState::chainstate_begin_unconfirmed( db_config, - &headers_db_conn, + &headers_db, &mut self.clarity_inst, burn_dbconn, &self.confirmed_chain_tip, From 99c5fc91af52c58e03de925fc7da93fe71618985 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:08:31 -0400 Subject: [PATCH 0392/1400] chore: API sync --- stackslib/src/chainstate/stacks/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index ded4835c01..cda74cb46d 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1005,7 +1005,7 @@ pub fn get_all_mining_rewards( block_height: u64, ) -> Vec> { let mut ret = vec![]; - let mut tx = chainstate.index_tx_begin().unwrap(); + let mut tx = chainstate.index_tx_begin(); for i in 0..block_height { let block_rewards = From a3ce626a981976af69fc53d4119f752950dfdf9f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:08:44 -0400 Subject: [PATCH 0393/1400] chore: close #4900 by relying on the MARF for querying the tenure-start block in the clarity VM --- stackslib/src/clarity_vm/database/mod.rs | 191 +++++++++++++++-------- 1 file changed, 128 insertions(+), 63 deletions(-) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 8fffc76d7d..8bcbfa33ab 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -24,22 +24,89 @@ use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, get_ancestor_sort_id_tx, SortitionDB, SortitionDBConn, SortitionHandle, SortitionHandleConn, SortitionHandleTx, }; -use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::nakamoto::{nakamoto_keys, NakamotoChainState, StacksHandle}; use crate::chainstate::stacks::boot::PoxStartCycleInfo; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::{ - ChainstateTx, MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, + ChainstateTx, MinerPaymentSchedule, StacksChainState, StacksDBConn, StacksDBTx, + StacksHeaderInfo, }; use crate::chainstate::stacks::index::marf::{MarfConnection, MARF}; use crate::chainstate::stacks::index::{ClarityMarfTrieId, MarfTrieId, TrieMerkleProof}; use crate::chainstate::stacks::Error as ChainstateError; use crate::clarity_vm::special::handle_contract_call_special_cases; use crate::core::{StacksEpoch, StacksEpochId}; -use crate::util_lib::db::{DBConn, FromColumn, FromRow}; +use crate::util_lib::db::{DBConn, Error as DBError, FromColumn, FromRow}; pub mod marf; -pub struct HeadersDBConn<'a>(pub &'a Connection); +pub trait GetTenureStartId { + fn get_tenure_block_id( + &self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError>; + fn conn(&self) -> &Connection; +} + +impl GetTenureStartId for StacksDBConn<'_> { + fn get_tenure_block_id( + &self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed( + tip, + &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten() + .map(|block_id| TenureBlockId::from(block_id))) + } + + fn conn(&self) -> &Connection { + self.sqlite() + } +} + +impl GetTenureStartId for StacksDBTx<'_> { + fn get_tenure_block_id( + &self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + Ok(self + .get_indexed_ref( + tip, + &nakamoto_keys::tenure_start_block_id(tenure_id_consensus_hash), + )? + .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) + .flatten() + .map(|block_id| TenureBlockId::from(block_id))) + } + + fn conn(&self) -> &Connection { + self.sqlite() + } +} + +impl GetTenureStartId for MARF { + fn get_tenure_block_id( + &self, + tip: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, DBError> { + let dbconn = StacksDBConn::new(self, ()); + dbconn.get_tenure_block_id(tip, tenure_id_consensus_hash) + } + + fn conn(&self) -> &Connection { + self.sqlite_conn() + } +} + +pub struct HeadersDBConn<'a>(pub StacksDBConn<'a>); impl<'a> HeadersDB for HeadersDBConn<'a> { fn get_stacks_block_header_hash_for_block( @@ -48,7 +115,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { epoch: &StacksEpochId, ) -> Option { get_stacks_header_column_from_table( - self.0, + self.0.conn(), id_bhh, "block_hash", &|r| { @@ -62,7 +129,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { &self, id_bhh: &StacksBlockId, ) -> Option { - get_stacks_header_column(self.0, id_bhh, "burn_header_hash", |r| { + get_stacks_header_column(self.0.conn(), id_bhh, "burn_header_hash", |r| { BurnchainHeaderHash::from_row(r).expect("FATAL: malformed burn_header_hash") }) } @@ -73,7 +140,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { epoch: &StacksEpochId, ) -> Option { get_stacks_header_column_from_table( - self.0, + self.0.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), @@ -88,14 +155,14 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { ) -> Option { if let Some(epoch) = epoch_opt { get_stacks_header_column_from_table( - self.0, + self.0.conn(), id_bhh, "burn_header_timestamp", &|r| u64::from_row(r).expect("FATAL: malformed burn_header_timestamp"), epoch.uses_nakamoto_blocks(), ) } else { - get_stacks_header_column(self.0, id_bhh, "burn_header_timestamp", |r| { + get_stacks_header_column(self.0.conn(), id_bhh, "burn_header_timestamp", |r| { u64::from_row(r).expect("FATAL: malformed burn_header_timestamp") }) } @@ -103,7 +170,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { fn get_stacks_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { get_stacks_header_column_from_table( - self.0, + self.0.conn(), id_bhh, "timestamp", &|r| u64::from_row(r).expect("FATAL: malformed timestamp"), @@ -112,7 +179,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { - get_stacks_header_column(self.0, id_bhh, "burn_header_height", |r| { + get_stacks_header_column(self.0.conn(), id_bhh, "burn_header_height", |r| { u64::from_row(r) .expect("FATAL: malformed burn_header_height") .try_into() @@ -125,14 +192,14 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { ("vrf_proof", true) } else { ("proof", false) }; get_stacks_header_column_from_table( - self.0, + self.0.conn(), &tenure_id_bhh.0, column_name, &|r| { @@ -148,8 +215,8 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); - get_miner_column(self.0, &tenure_id_bhh, "address", |r| { + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + get_miner_column(self.0.conn(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); addr @@ -161,10 +228,13 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); - get_miner_column(self.0, &tenure_id_bhh, "burnchain_sortition_burn", |r| { - u64::from_row(r).expect("FATAL: malformed sortition burn") - }) + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + get_miner_column( + self.0.conn(), + &tenure_id_bhh, + "burnchain_sortition_burn", + |r| u64::from_row(r).expect("FATAL: malformed sortition burn"), + ) .map(|x| x.into()) } @@ -173,10 +243,13 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); - get_miner_column(self.0, &tenure_id_bhh, "burnchain_commit_burn", |r| { - u64::from_row(r).expect("FATAL: malformed commit burn") - }) + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + get_miner_column( + self.0.conn(), + &tenure_id_bhh, + "burnchain_commit_burn", + |r| u64::from_row(r).expect("FATAL: malformed commit burn"), + ) .map(|x| x.into()) } @@ -185,8 +258,8 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.0, id_bhh, Some(epoch)); - get_matured_reward(self.0, &tenure_id_bhh, epoch).map(|x| x.total().into()) + let tenure_id_bhh = get_first_block_in_tenure(&self.0, id_bhh, Some(epoch)); + get_matured_reward(&self.0, &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -274,7 +347,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { ("vrf_proof", true) } else { @@ -297,7 +370,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); get_miner_column(self.deref().deref(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); @@ -310,7 +383,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); get_miner_column( self.deref().deref(), &tenure_id_bhh, @@ -325,7 +398,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); get_miner_column( self.deref().deref(), &tenure_id_bhh, @@ -340,8 +413,8 @@ impl<'a> HeadersDB for ChainstateTx<'a> { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.deref().deref(), id_bhh, Some(epoch)); - get_matured_reward(self.deref().deref(), &tenure_id_bhh, epoch).map(|x| x.total().into()) + let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); + get_matured_reward(self.deref(), &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -429,7 +502,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); let (column_name, nakamoto) = if epoch.uses_nakamoto_blocks() { ("vrf_proof", true) } else { @@ -452,7 +525,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); get_miner_column(self.sqlite_conn(), &tenure_id_bhh, "address", |r| { let s: String = r.get_unwrap("address"); let addr = StacksAddress::from_string(&s).expect("FATAL: malformed address"); @@ -465,7 +538,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); get_miner_column( self.sqlite_conn(), &tenure_id_bhh, @@ -480,7 +553,7 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); get_miner_column( self.sqlite_conn(), &tenure_id_bhh, @@ -495,8 +568,8 @@ impl HeadersDB for MARF { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option { - let tenure_id_bhh = get_first_block_in_tenure(self.sqlite_conn(), id_bhh, Some(epoch)); - get_matured_reward(self.sqlite_conn(), &tenure_id_bhh, epoch).map(|x| x.total().into()) + let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); + get_matured_reward(self, &tenure_id_bhh, epoch).map(|x| x.total().into()) } } @@ -548,8 +621,8 @@ where } } -fn get_first_block_in_tenure( - conn: &DBConn, +fn get_first_block_in_tenure( + conn: >S, id_bhh: &StacksBlockId, epoch_opt: Option<&StacksEpochId>, ) -> TenureBlockId { @@ -559,7 +632,7 @@ fn get_first_block_in_tenure( return id_bhh.clone().into(); } else { get_stacks_header_column_from_table( - conn, + conn.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), @@ -569,7 +642,7 @@ fn get_first_block_in_tenure( } None => { if let Some(_) = get_stacks_header_column_from_table( - conn, + conn.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), @@ -578,7 +651,7 @@ fn get_first_block_in_tenure( return id_bhh.clone().into(); } else { get_stacks_header_column_from_table( - conn, + conn.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), @@ -587,26 +660,17 @@ fn get_first_block_in_tenure( } } }; + + // SAFETY: if we reach this point, then `id_bhh` is a Nakamoto block and has a well-defined + // tenure-start block ID. let ch = consensus_hash .expect("Unexpected SQL failure querying block header table for 'consensus_hash'"); - let args: &[&dyn ToSql] = &[&ch]; - conn.query_row( - " - SELECT index_block_hash - FROM nakamoto_block_headers - WHERE consensus_hash = ? - ORDER BY block_height ASC - LIMIT 1;", - args, - |x| { - Ok(StacksBlockId::from_column(x, "index_block_hash") - .expect("Bad index_block_hash in database") - .into()) - }, - ) - .unwrap_or_else(|_| { - panic!("Unexpected SQL failure querying block header table for 'index_block_hash'") - }) + let tenure_start_id: TenureBlockId = conn + .get_tenure_block_id(id_bhh, &ch) + .expect("FATAL: failed to query DB for tenure-start block") + .expect("FATAL: no tenure start block for Nakamoto block"); + + tenure_start_id } fn get_miner_column( @@ -636,8 +700,8 @@ where }) } -fn get_matured_reward( - conn: &DBConn, +fn get_matured_reward( + conn: >S, child_id_bhh: &TenureBlockId, epoch: &StacksEpochId, ) -> Option { @@ -647,6 +711,7 @@ fn get_matured_reward( "block_headers" }; let parent_id_bhh = conn + .conn() .query_row( &format!("SELECT parent_block_id FROM {table_name} WHERE index_block_hash = ?"), [child_id_bhh.0].iter(), @@ -660,7 +725,7 @@ fn get_matured_reward( if let Some(parent_id_bhh) = parent_id_bhh { let parent_tenure_id = get_first_block_in_tenure(conn, &parent_id_bhh, None); - StacksChainState::get_matured_miner_payment(conn, &parent_tenure_id, child_id_bhh) + StacksChainState::get_matured_miner_payment(conn.conn(), &parent_tenure_id, child_id_bhh) .expect("Unexpected SQL failure querying miner reward table") } else { None From 8e2c81ccf4031cba837f350398c531292897ff17 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:09:17 -0400 Subject: [PATCH 0394/1400] chore: better debugging --- stackslib/src/core/mempool.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 9eb9b7cf80..6d675b2948 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1751,20 +1751,26 @@ impl MemPoolDB { ) { Ordering::Less => { debug!( - "Mempool: unexecutable: drop tx {}:{} ({})", + "Mempool: unexecutable: drop tx {} {}:{} ({}) expected {},{}", + &candidate.txid, candidate.origin_address, candidate.origin_nonce, - candidate.fee_rate.unwrap_or_default() + candidate.fee_rate.unwrap_or_default(), + expected_origin_nonce, + expected_sponsor_nonce ); // This transaction cannot execute in this pass, just drop it continue; } Ordering::Greater => { debug!( - "Mempool: nonces too high, cached for later {}:{} ({})", + "Mempool: nonces too high, cached for later {} {}:{} ({}) expected {},{}", + &candidate.txid, candidate.origin_address, candidate.origin_nonce, - candidate.fee_rate.unwrap_or_default() + candidate.fee_rate.unwrap_or_default(), + expected_origin_nonce, + expected_sponsor_nonce ); // This transaction could become runnable in this pass, save it for later candidate_cache.push(candidate); @@ -2064,9 +2070,7 @@ impl MemPoolDB { return Ok(true); } - let headers_conn = &chainstate - .index_conn() - .map_err(|_e| db_error::Other("ChainstateError".to_string()))?; + let headers_conn = &chainstate.index_conn(); let height_of_first_with_second_tip = headers_conn.get_ancestor_block_height(&second_block, &first_block)?; let height_of_second_with_first_tip = From 87a676b3a8ccfefe61243cb04773b1ed9885bdef Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:09:28 -0400 Subject: [PATCH 0395/1400] chore: bump peer epoch --- stackslib/src/core/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index b4a3d834f5..eb8533da7a 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -63,7 +63,7 @@ pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b; // this should be updated to the latest network epoch version supported by // this node. this will be checked by the `validate_epochs()` method. -pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_5 as u32; +pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32; // set the fourth byte of the peer version pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; @@ -193,7 +193,8 @@ pub const POX_V3_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = pub const POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT as u32) + 1; -// The threshold % of weighted votes on a block to approve it in Nakamoto +// The threshold of weighted votes on a block to approve it in Nakamoto. +// This is out of 10, so 7 means "70%". pub const NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD: u64 = 7; /// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet From 922152aab17159940d984c24c339150c55a1abf5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:09:43 -0400 Subject: [PATCH 0396/1400] chore: API sync --- stackslib/src/net/api/get_tenures_fork_info.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 778e4cbf68..8bcf32ce1d 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -152,6 +152,7 @@ impl TenureForkingInfo { sn: &BlockSnapshot, sortdb: &SortitionDB, chainstate: &StacksChainState, + tip_block_id: &StacksBlockId, ) -> Result { let first_block_mined = if !sn.sortition { None @@ -174,7 +175,8 @@ impl TenureForkingInfo { .map(|header| header.index_block_hash()) } else { NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), + &mut chainstate.index_conn(), + tip_block_id, &sn.consensus_hash, )? .map(|header| header.index_block_hash()) @@ -206,7 +208,7 @@ impl RPCRequestHandler for GetTenuresForkInfo { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let result = node.with_node_state(|_network, sortdb, chainstate, _mempool, _rpc_args| { + let result = node.with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { let start_from = self .stop_sortition .clone() @@ -224,7 +226,10 @@ impl RPCRequestHandler for GetTenuresForkInfo { let mut cursor = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &start_from)? .ok_or_else(|| ChainError::NoSuchBlockError)?; results.push(TenureForkingInfo::from_snapshot( - &cursor, sortdb, chainstate, + &cursor, + sortdb, + chainstate, + &network.stacks_tip.block_id(), )?); let handle = sortdb.index_handle(&cursor.sortition_id); let mut depth = 0; @@ -236,7 +241,10 @@ impl RPCRequestHandler for GetTenuresForkInfo { cursor = handle .get_last_snapshot_with_sortition(cursor.block_height.saturating_sub(1))?; results.push(TenureForkingInfo::from_snapshot( - &cursor, sortdb, chainstate, + &cursor, + sortdb, + chainstate, + &network.stacks_tip.block_id(), )?); } From 1a60607f799bee1bd63403eea6a77da62ec6a6b5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:09:56 -0400 Subject: [PATCH 0397/1400] chore: API sync --- stackslib/src/net/api/getinfo.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 344f9f8286..237205f63a 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -143,9 +143,9 @@ impl RPCPeerInfoData { server_version, network_id: network.local_peer.network_id, parent_network_id: network.local_peer.parent_network_id, - stacks_tip_height: network.stacks_tip.2, - stacks_tip: network.stacks_tip.1.clone(), - stacks_tip_consensus_hash: network.stacks_tip.0.clone(), + stacks_tip_height: network.stacks_tip.height, + stacks_tip: network.stacks_tip.block_hash.clone(), + stacks_tip_consensus_hash: network.stacks_tip.consensus_hash.clone(), unanchored_tip: unconfirmed_tip, unanchored_seq: unconfirmed_seq, exit_at_block_height: exit_at_block_height, From 06dc85c9cca9615cf6351b9619ed48b183133551 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:10:32 -0400 Subject: [PATCH 0398/1400] chore: API sync --- stackslib/src/net/api/gettenureinfo.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/api/gettenureinfo.rs b/stackslib/src/net/api/gettenureinfo.rs index e03b6317f4..44d67a4679 100644 --- a/stackslib/src/net/api/gettenureinfo.rs +++ b/stackslib/src/net/api/gettenureinfo.rs @@ -22,7 +22,7 @@ use serde::de::Error as de_Error; use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks_common::types::net::PeerHost; -use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use {serde, serde_json}; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; @@ -116,15 +116,18 @@ impl RPCRequestHandler for RPCNakamotoTenureInfoRequestHandler { ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { let info = node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { RPCGetTenureInfo { - consensus_hash: network.stacks_tip.0.clone(), + consensus_hash: network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: network.tenure_start_block_id.clone(), - parent_consensus_hash: network.parent_stacks_tip.0.clone(), + parent_consensus_hash: network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &network.parent_stacks_tip.0, - &network.parent_stacks_tip.1, + &network.parent_stacks_tip.consensus_hash, + &network.parent_stacks_tip.block_hash, ), - tip_block_id: StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1), - tip_height: network.stacks_tip.2, + tip_block_id: StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, + ), + tip_height: network.stacks_tip.height, reward_cycle: network .burnchain .block_height_to_reward_cycle(network.burnchain_tip.block_height) From 5ebb5540600d06d029fc3e17328d336d5a800529 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:10:49 -0400 Subject: [PATCH 0399/1400] chore: API sync -- use nakamoto tip from p2p state machine to query chainstate within a particular fork, and deduce when we're ready to process unconfirmed tenures by the absence of confirmed tenure downloaders --- .../nakamoto/download_state_machine.rs | 208 +++++++++++------- 1 file changed, 129 insertions(+), 79 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index c1f41fcd8a..77e3b30ab2 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -118,10 +118,12 @@ pub struct NakamotoDownloadStateMachine { tenure_start_blocks: HashMap, /// comms to remote neighbors pub(super) neighbor_rpc: NeighborRPC, + /// Nakamoto chain tip + nakamoto_tip: StacksBlockId, } impl NakamotoDownloadStateMachine { - pub fn new(nakamoto_start_height: u64) -> Self { + pub fn new(nakamoto_start_height: u64, nakamoto_tip: StacksBlockId) -> Self { Self { nakamoto_start_height, reward_cycle: 0, // will be calculated at runtime @@ -137,6 +139,7 @@ impl NakamotoDownloadStateMachine { unconfirmed_tenure_downloads: HashMap::new(), tenure_start_blocks: HashMap::new(), neighbor_rpc: NeighborRPC::new(), + nakamoto_tip, } } @@ -309,6 +312,7 @@ impl NakamotoDownloadStateMachine { nakamoto_start: u64, wanted_tenures: &mut [WantedTenure], chainstate: &StacksChainState, + stacks_tip: &StacksBlockId, ) -> Result<(), NetError> { for wt in wanted_tenures.iter_mut() { test_debug!("update_processed_wanted_tenures: consider {:?}", &wt); @@ -321,7 +325,8 @@ impl NakamotoDownloadStateMachine { continue; } if NakamotoChainState::has_processed_nakamoto_tenure( - chainstate.db(), + &mut chainstate.index_conn(), + stacks_tip, &wt.tenure_id_consensus_hash, )? { test_debug!("Tenure {} is now processed", &wt.tenure_id_consensus_hash); @@ -347,6 +352,7 @@ impl NakamotoDownloadStateMachine { self.nakamoto_start_height, prev_wanted_tenures, chainstate, + &self.nakamoto_tip, )?; } test_debug!("update_processed_wanted_tenures: update wanted_tenures"); @@ -354,6 +360,7 @@ impl NakamotoDownloadStateMachine { self.nakamoto_start_height, &mut self.wanted_tenures, chainstate, + &self.nakamoto_tip, ) } @@ -369,17 +376,33 @@ impl NakamotoDownloadStateMachine { /// Returns Err(..) on DB error. pub(crate) fn load_tenure_start_blocks( wanted_tenures: &[WantedTenure], - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, + tip_block_id: &StacksBlockId, tenure_start_blocks: &mut HashMap, ) -> Result<(), NetError> { for wt in wanted_tenures { - let Some(tenure_start_block) = chainstate - .nakamoto_blocks_db() - .get_nakamoto_tenure_start_block(&wt.tenure_id_consensus_hash)? + let Some(tenure_start_block_header) = + NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + tip_block_id, + &wt.tenure_id_consensus_hash, + )? else { test_debug!("No tenure-start block for {}", &wt.tenure_id_consensus_hash); continue; }; + let Some((tenure_start_block, _)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&tenure_start_block_header.index_block_hash())? + else { + let msg = format!( + "Have header but no block for tenure-start of {} ({})", + &wt.tenure_id_consensus_hash, + &tenure_start_block_header.index_block_hash() + ); + error!("{}", &msg); + return Err(NetError::ChainstateError(msg)); + }; tenure_start_blocks.insert(tenure_start_block.block_id(), tenure_start_block); } Ok(()) @@ -388,11 +411,12 @@ impl NakamotoDownloadStateMachine { /// Update our local tenure start block data fn update_tenure_start_blocks( &mut self, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ) -> Result<(), NetError> { Self::load_tenure_start_blocks( &self.wanted_tenures, chainstate, + &self.nakamoto_tip, &mut self.tenure_start_blocks, ) } @@ -655,7 +679,9 @@ impl NakamotoDownloadStateMachine { // the prev_wanted_rc and at least one in the cur_wanted_rc let mut has_prev_rc_block = false; let mut has_cur_rc_block = false; + let mut available_considered = 0; for (_naddr, available) in tenure_block_ids.iter() { + available_considered += available.len(); for (_ch, tenure_info) in available.iter() { if tenure_info.start_reward_cycle == prev_wanted_rc || tenure_info.end_reward_cycle == prev_wanted_rc @@ -670,8 +696,9 @@ impl NakamotoDownloadStateMachine { } } - if (prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) - || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block) + if available_considered > 0 + && ((prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) + || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block)) { debug!( "tenure_block_ids stale: missing representation in reward cycles {} ({}) and {} ({})", @@ -732,7 +759,7 @@ impl NakamotoDownloadStateMachine { &mut self, network: &PeerNetwork, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ) -> Result<(), NetError> { let sort_tip = &network.burnchain_tip; let Some(invs) = network.inv_state_nakamoto.as_ref() else { @@ -983,9 +1010,13 @@ impl NakamotoDownloadStateMachine { fn count_available_tenure_neighbors( available: &HashMap>, ) -> usize { - available - .iter() - .fold(0, |count, (_ch, naddrs)| count.saturating_add(naddrs.len())) + let mut neighbors = HashSet::new(); + for (_, naddrs) in available.iter() { + for naddr in naddrs.iter() { + neighbors.insert(naddr); + } + } + neighbors.len() } /// This function examines the contents of `self.wanted_tenures` and @@ -1184,7 +1215,6 @@ impl NakamotoDownloadStateMachine { pox_constants: &PoxConstants, first_burn_height: u64, inventory_iter: impl Iterator, - blocks_db: NakamotoStagingBlocksConnRef, ) -> bool { if sort_tip.block_height < burnchain_height { test_debug!( @@ -1237,22 +1267,6 @@ impl NakamotoDownloadStateMachine { } } - // there are still tenures that have to be processed - if blocks_db - .has_any_unprocessed_nakamoto_block() - .map_err(|e| { - warn!( - "Failed to determine if there are unprocessed Nakamoto blocks: {:?}", - &e - ); - e - }) - .unwrap_or(true) - { - test_debug!("Still have stored but unprocessed Nakamoto blocks"); - return false; - } - true } @@ -1374,24 +1388,30 @@ impl NakamotoDownloadStateMachine { HashMap>, HashMap, ) { + test_debug!("Run unconfirmed tenure downloaders from highest-complete tenure {:?} to unconfirmed tenure {:?}", highest_complete_tenure, unconfirmed_tenure); + let addrs: Vec<_> = downloaders.keys().map(|addr| addr.clone()).collect(); let mut finished = vec![]; let mut unconfirmed_blocks = HashMap::new(); let mut highest_completed_tenure_downloaders = HashMap::new(); - // find the highest-processed block, and update all ongoing state-machines. - // Then, as faster state-machines linked to more up-to-date peers download newer blocks, - // other state-machines will automatically terminate once they reach the highest block this - // peer has now processed. - let highest_processed_block_id = - StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1); - let highest_processed_block_height = network.stacks_tip.2; - - for (_, downloader) in downloaders.iter_mut() { - downloader.set_highest_processed_block( - highest_processed_block_id.clone(), - highest_processed_block_height, + if network.stacks_tip.is_nakamoto { + // find the highest-processed block, and update all ongoing state-machines. + // Then, as faster state-machines linked to more up-to-date peers download newer blocks, + // other state-machines will automatically terminate once they reach the highest block this + // peer has now processed. + let highest_processed_block_id = StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, ); + let highest_processed_block_height = network.stacks_tip.height; + + for (_, downloader) in downloaders.iter_mut() { + downloader.set_highest_processed_block( + highest_processed_block_id.clone(), + highest_processed_block_height, + ); + } } // send requests @@ -1438,42 +1458,62 @@ impl NakamotoDownloadStateMachine { }; test_debug!("Got response from {}", &naddr); - let Ok(blocks_opt) = downloader.handle_next_download_response( + let blocks_opt = match downloader.handle_next_download_response( response, sortdb, sort_tip, chainstate, &network.current_reward_sets, - ) else { - neighbor_rpc.add_dead(network, &naddr); - continue; + ) { + Ok(blocks_opt) => blocks_opt, + Err(NetError::StaleView) => { + continue; + } + Err(e) => { + debug!("Failed to handle next download response from unconfirmed downloader for {:?} in state {:?}: {:?}", &naddr, &downloader.state, &e); + neighbor_rpc.add_dead(network, &naddr); + continue; + } }; let Some(blocks) = blocks_opt else { continue; }; - if let Some(highest_complete_tenure_downloader) = downloader - .make_highest_complete_tenure_downloader( - highest_complete_tenure, - unconfirmed_tenure, - ) - .map_err(|e| { - warn!( - "Failed to make highest complete tenure downloader for {:?}: {:?}", - &downloader.unconfirmed_tenure_id(), - &e - ); - e - }) - .ok() + if downloader + .can_make_highest_complete_tenure_downloader(sortdb) + .unwrap_or(false) { - // don't start this unless the downloader is actually done (this should always be - // the case, but don't tempt fate with an assert!) - if downloader.is_done() { - highest_completed_tenure_downloaders - .insert(naddr.clone(), highest_complete_tenure_downloader); + if let Some(highest_complete_tenure_downloader) = downloader + .make_highest_complete_tenure_downloader( + highest_complete_tenure, + unconfirmed_tenure, + ) + .map_err(|e| { + warn!( + "Failed to make highest complete tenure downloader for {:?}: {:?}", + &downloader.unconfirmed_tenure_id(), + &e + ); + e + }) + .ok() + { + // don't start this unless the downloader is actually done (this should always be + // the case, but don't tempt fate with an assert!) + if downloader.is_done() { + test_debug!( + "Will fetch the highest complete tenure from {:?}", + &downloader.unconfirmed_tenure_id() + ); + highest_completed_tenure_downloaders + .insert(naddr.clone(), highest_complete_tenure_downloader); + } } + } else { + test_debug!( + "Will not make highest-complete tenure downloader (not a Nakamoto tenure)" + ); } unconfirmed_blocks.insert(naddr.clone(), blocks); @@ -1708,9 +1748,10 @@ impl NakamotoDownloadStateMachine { return HashMap::new(); }; test_debug!( - "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}", + "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", burnchain_height, - network.burnchain_tip.block_height + network.burnchain_tip.block_height, + &self.state ); self.update_available_tenures( &invs.inventories, @@ -1735,10 +1776,6 @@ impl NakamotoDownloadStateMachine { return HashMap::new(); }; - debug!( - "tenure_downloads.is_empty: {}", - self.tenure_downloads.is_empty() - ); if self.tenure_downloads.is_empty() && Self::need_unconfirmed_tenures( self.nakamoto_start_height, @@ -1751,7 +1788,6 @@ impl NakamotoDownloadStateMachine { &sortdb.pox_constants, sortdb.first_block_height, invs.inventories.values(), - chainstate.nakamoto_blocks_db(), ) { debug!( @@ -1771,14 +1807,20 @@ impl NakamotoDownloadStateMachine { return new_blocks; } NakamotoDownloadState::Unconfirmed => { - let highest_processed_block_id = - StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1); + let highest_processed_block_id = StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, + ); let new_blocks = self.download_unconfirmed_tenures( network, sortdb, chainstate, - Some(highest_processed_block_id), + if network.stacks_tip.is_nakamoto { + Some(highest_processed_block_id) + } else { + None + }, ); // keep borrow-checker happy by instantiang this ref again, now that `network` is @@ -1789,8 +1831,15 @@ impl NakamotoDownloadStateMachine { return HashMap::new(); }; - if self.tenure_downloads.is_empty() - && self.unconfirmed_tenure_downloads.is_empty() + if !self.tenure_downloads.is_empty() { + // need to go get this scheduled tenure + debug!( + "Transition from {} to {} -- need confirmed tenure still", + &self.state, + NakamotoDownloadState::Confirmed + ); + self.state = NakamotoDownloadState::Confirmed; + } else if self.unconfirmed_tenure_downloads.is_empty() && self.unconfirmed_tenure_download_schedule.is_empty() { if Self::need_unconfirmed_tenures( @@ -1804,7 +1853,6 @@ impl NakamotoDownloadStateMachine { &sortdb.pox_constants, sortdb.first_block_height, invs.inventories.values(), - chainstate.nakamoto_blocks_db(), ) { // do this again self.unconfirmed_tenure_download_schedule = @@ -1840,9 +1888,11 @@ impl NakamotoDownloadStateMachine { burnchain_height: u64, network: &mut PeerNetwork, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, ) -> Result>, NetError> { + self.nakamoto_tip = network.stacks_tip.block_id(); + test_debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); self.update_wanted_tenures(&network, sortdb, chainstate)?; self.update_processed_tenures(chainstate)?; let new_blocks = self.run_downloads(burnchain_height, network, sortdb, chainstate, ibd); From f919830724cc572ad37ce464c6ec324a5c4cfe2e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 18:11:39 -0400 Subject: [PATCH 0400/1400] chore: API sync --- stackslib/src/net/download/nakamoto/mod.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index b856afab44..dd440ac110 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -183,7 +183,8 @@ impl PeerNetwork { return; } let epoch = self.get_epoch_by_epoch_id(StacksEpochId::Epoch30); - let downloader = NakamotoDownloadStateMachine::new(epoch.start_height); + let downloader = + NakamotoDownloadStateMachine::new(epoch.start_height, self.stacks_tip.block_id()); self.block_downloader_nakamoto = Some(downloader); } @@ -192,7 +193,7 @@ impl PeerNetwork { &mut self, burnchain_height: u64, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, ) -> Result>, NetError> { if self.block_downloader_nakamoto.is_none() { @@ -214,7 +215,7 @@ impl PeerNetwork { &mut self, burnchain_height: u64, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, ) -> Result>, NetError> { if self.connection_opts.disable_block_download { From 8fca2585c0b84e13ae58fe139bc59bdec9c6fbb5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:46:16 -0400 Subject: [PATCH 0401/1400] fix: dead code --- stackslib/src/net/download/nakamoto/tenure.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 53563ab334..5e2e06c41a 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -177,9 +177,6 @@ impl TenureStartEnd { let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); if !invbits.get(bit).unwrap_or(false) { test_debug!("i={} bit not set", i); - /* - i += 1; - */ continue; } From 746ee8a44b3c164ec19e56c25f16d35a374f2e69 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:46:41 -0400 Subject: [PATCH 0402/1400] chore: log decode failures (indicates 404) --- .../download/nakamoto/tenure_downloader.rs | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 9f261929b5..c6e5ee0703 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -182,9 +182,11 @@ impl NakamotoTenureDownloader { end_signer_keys: RewardSet, ) -> Self { test_debug!( - "Instantiate downloader to {} for tenure {}", + "Instantiate downloader to {} for tenure {}: {}-{}", &naddr, - &tenure_id_consensus_hash + &tenure_id_consensus_hash, + &tenure_start_block_id, + &tenure_end_block_id, ); Self { tenure_id_consensus_hash, @@ -656,7 +658,10 @@ impl NakamotoTenureDownloader { "Got download response for tenure-start block {}", &_block_id ); - let block = response.decode_nakamoto_block()?; + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; self.try_accept_tenure_start_block(block)?; Ok(None) } @@ -666,7 +671,10 @@ impl NakamotoTenureDownloader { } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { test_debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block()?; + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; self.try_accept_tenure_end_block(&block)?; Ok(None) } @@ -675,7 +683,10 @@ impl NakamotoTenureDownloader { "Got download response for tenure blocks ending at {}", &_end_block_id ); - let blocks = response.decode_nakamoto_tenure()?; + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; self.try_accept_tenure_blocks(blocks) } NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), From 9df0155b4270431949a23eeb7eacb2f7e67207f1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:49:32 -0400 Subject: [PATCH 0403/1400] fix: a downloader set is empty if there are actually no downloaders, regardless of whether or not they're inflight --- .../nakamoto/tenure_downloader_set.rs | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index f275e83d29..61c5f6ee77 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -193,9 +193,20 @@ impl NakamotoTenureDownloaderSet { .is_some() } - /// Determine if this downloader set is empty -- i.e. there's no in-flight requests. + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. pub fn is_empty(&self) -> bool { - self.inflight() == 0 + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + test_debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + cnt += 1; + } + cnt == 0 } /// Try to resume processing a download state machine with a given peer. Since a peer is @@ -609,7 +620,13 @@ impl NakamotoTenureDownloaderSet { }; test_debug!("Got response from {}", &naddr); - let Ok(blocks_opt) = downloader.handle_next_download_response(response) else { + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { test_debug!("Failed to handle download response from {}", &naddr); neighbor_rpc.add_dead(network, &naddr); continue; From aba1587cb1d019ffa44316a308581b494fb89002 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:50:01 -0400 Subject: [PATCH 0404/1400] chore: log more failure modes, and validate the unconfirmed tenure against the remote node's reported sortitions from /v3/tenure/info (as opposed to our local tip sortitions, which can be _ahead_) --- .../nakamoto/tenure_downloader_unconfirmed.rs | 164 ++++++++++++++++-- 1 file changed, 149 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 2fd9599c5e..c01c10c00e 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -28,7 +28,7 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; @@ -78,8 +78,7 @@ pub enum NakamotoUnconfirmedDownloadState { /// The inner value is tenure-start block ID of the ongoing tenure. GetTenureStartBlock(StacksBlockId), /// Receiving unconfirmed tenure blocks. - /// The inner value is the _last_ block on the ongoing tenure. The ongoing tenure is fetched - /// from highest block to lowest block. + /// The inner value is the block ID of the next block to fetch. GetUnconfirmedTenureBlocks(StacksBlockId), /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). @@ -195,22 +194,45 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); } + test_debug!("Got tenure info {:?}", remote_tenure_tip); + test_debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + // authenticate consensus hashes against canonical chain history let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( sortdb.conn(), &remote_tenure_tip.consensus_hash, )? - .ok_or(NetError::DBError(DBError::NotFoundError))?; + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( sortdb.conn(), &remote_tenure_tip.parent_consensus_hash, )? - .ok_or(NetError::DBError(DBError::NotFoundError))?; + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; let ih = sortdb.index_handle(&local_sort_tip.sortition_id); let ancestor_local_tenure_sn = ih .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or(NetError::DBError(DBError::NotFoundError))?; + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { // .consensus_hash is not on the canonical fork @@ -221,7 +243,15 @@ impl NakamotoUnconfirmedTenureDownloader { } let ancestor_parent_local_tenure_sn = ih .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or(NetError::DBError(DBError::NotFoundError.into()))?; + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { // .parent_consensus_hash is not on the canonical fork @@ -245,18 +275,21 @@ impl NakamotoUnconfirmedTenureDownloader { if local_tenure_sn.winning_stacks_block_hash.0 != remote_tenure_tip.parent_tenure_start_block_id.0 { - warn!("Ongoing tenure does not commit to highest complete tenure's start block"; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.tenure_start_block_id, + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::InvalidMessage); + return Err(NetError::StaleView); } if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronize this tenure before, so don't get anymore blocks before it. + // we've synchronized this tenure before, so don't get anymore blocks before it. let highest_processed_block = chainstate .nakamoto_blocks_db() .get_nakamoto_block(highest_processed_block_id)? - .ok_or(NetError::DBError(DBError::NotFoundError))? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? .0; let highest_processed_block_height = highest_processed_block.header.chain_length; @@ -323,13 +356,19 @@ impl NakamotoUnconfirmedTenureDownloader { if chainstate .nakamoto_blocks_db() - .has_nakamoto_block(&remote_tenure_tip.tenure_start_block_id.clone())? + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? { // proceed to get unconfirmed blocks. We already have the tenure-start block. let unconfirmed_tenure_start_block = chainstate .nakamoto_blocks_db() .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::DBError(DBError::NotFoundError))? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? .0; self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( @@ -369,10 +408,12 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); }; let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); return Err(NetError::InvalidState); }; let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); return Err(NetError::InvalidState); }; @@ -433,15 +474,18 @@ impl NakamotoUnconfirmedTenureDownloader { }; let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); return Err(NetError::InvalidState); }; let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); return Err(NetError::InvalidState); }; if tenure_blocks.is_empty() { // nothing to do + debug!("No tenure blocks obtained"); return Ok(None); } @@ -503,6 +547,8 @@ impl NakamotoUnconfirmedTenureDownloader { break; } + test_debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + // NOTE: this field can get updated by the downloader while this state-machine is in // this state. if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { @@ -550,6 +596,8 @@ impl NakamotoUnconfirmedTenureDownloader { self.state = NakamotoUnconfirmedDownloadState::Done; let highest_processed_block_height = *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + test_debug!("Finished receiving unconfirmed tenure"); return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { blocks .into_iter() @@ -573,6 +621,10 @@ impl NakamotoUnconfirmedTenureDownloader { }; let next_block_id = earliest_block.header.parent_block_id.clone(); + test_debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); Ok(None) } @@ -605,6 +657,56 @@ impl NakamotoUnconfirmedTenureDownloader { )?) } + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get /// its tenure-start block. @@ -619,6 +721,31 @@ impl NakamotoUnconfirmedTenureDownloader { if self.state != NakamotoUnconfirmedDownloadState::Done { return Err(NetError::InvalidState); } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + test_debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, + &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + /* let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() else { return Err(NetError::InvalidState); @@ -643,6 +770,7 @@ impl NakamotoUnconfirmedTenureDownloader { unconfirmed_signer_keys.clone(), ) .with_tenure_end_block(unconfirmed_tenure_start_block.clone()); + */ Ok(ntd) } @@ -753,7 +881,13 @@ impl NakamotoUnconfirmedTenureDownloader { NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { test_debug!("Got unconfirmed tenure blocks response"); let blocks = response.decode_nakamoto_tenure()?; - self.try_accept_unconfirmed_tenure_blocks(blocks) + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + if accepted_opt.is_some() { + test_debug!("Got complete unconfirmed tenure blocks"); + } else { + test_debug!("Got partial unconfirmed tenure blocks"); + } + Ok(accepted_opt) } NakamotoUnconfirmedDownloadState::Done => { return Err(NetError::InvalidState); From cec61ea33ebb81670051ffd2ea384a6ea43b3f77 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:50:55 -0400 Subject: [PATCH 0405/1400] chore: trace --> test_debug --- stackslib/src/net/httpcore.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 78a2036ae3..0bb725bcc9 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -797,7 +797,7 @@ impl StacksHttpRecvStream { let mut decoded_buf = vec![0u8; CHUNK_BUF_LEN]; let (read_pass, consumed_pass) = match self.state.do_read(fd, &mut decoded_buf) { Ok((0, num_consumed)) => { - trace!( + test_debug!( "consume_data blocked on 0 decoded bytes ({} consumed)", num_consumed ); @@ -808,7 +808,7 @@ impl StacksHttpRecvStream { Err(e) => { if e.kind() == io::ErrorKind::WouldBlock || e.kind() == io::ErrorKind::TimedOut { - trace!("consume_data blocked on read error"); + test_debug!("consume_data blocked on read error"); blocked = true; (0, 0) } else { @@ -1466,7 +1466,7 @@ impl ProtocolFamily for StacksHttp { } None => { // need more data - trace!( + test_debug!( "did not read http response payload, but buffered {}", num_read ); From 6206ce91ce42f7d64b6052f35fe617b399fbf5b0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:51:25 -0400 Subject: [PATCH 0406/1400] chore: API sync with new MARF'ed chainstate --- stackslib/src/net/inv/nakamoto.rs | 45 ++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index ceee9c0b12..ea0e89fb21 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -17,11 +17,12 @@ use std::collections::{BTreeMap, HashMap}; use stacks_common::bitvec::BitVec; +use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use crate::burnchains::PoxConstants; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; @@ -76,18 +77,18 @@ impl InvTenureInfo { /// (i.e. it was a BlockFound tenure, not an Extension tenure) pub fn load( chainstate: &StacksChainState, - consensus_hash: &ConsensusHash, + tip_block_id: &StacksBlockId, + tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { - Ok( - NakamotoChainState::get_highest_nakamoto_tenure_change_by_tenure_id( - chainstate.db(), - consensus_hash, - )? - .map(|tenure| Self { - tenure_id_consensus_hash: tenure.tenure_id_consensus_hash, - parent_tenure_id_consensus_hash: tenure.prev_tenure_id_consensus_hash, - }), - ) + Ok(NakamotoChainState::get_block_found_tenure( + &mut chainstate.index_conn(), + tip_block_id, + tenure_id_consensus_hash, + )? + .map(|tenure| Self { + tenure_id_consensus_hash: tenure.tenure_id_consensus_hash, + parent_tenure_id_consensus_hash: tenure.prev_tenure_id_consensus_hash, + })) } } @@ -114,13 +115,15 @@ impl InvGenerator { fn get_processed_tenure( &mut self, chainstate: &StacksChainState, + tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { if let Some(info_opt) = self.processed_tenures.get(&tenure_id_consensus_hash) { return Ok((*info_opt).clone()); }; // not cached so go load it - let loaded_info_opt = InvTenureInfo::load(chainstate, &tenure_id_consensus_hash)?; + let loaded_info_opt = + InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; self.processed_tenures .insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); Ok(loaded_info_opt) @@ -148,6 +151,11 @@ impl InvGenerator { reward_cycle: u64, ) -> Result, NetError> { let ih = sortdb.index_handle(&tip.sortition_id); + let Some(nakamoto_tip) = ih.get_nakamoto_tip_block_id()? else { + // no Nakamoto tip? no inventory + return Ok(vec![]); + }; + let reward_cycle_end_height = sortdb .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, reward_cycle + 1) @@ -163,7 +171,8 @@ impl InvGenerator { let mut cur_height = reward_cycle_end_tip.block_height; let mut cur_consensus_hash = reward_cycle_end_tip.consensus_hash; - let mut cur_tenure_opt = self.get_processed_tenure(chainstate, &cur_consensus_hash)?; + let mut cur_tenure_opt = + self.get_processed_tenure(chainstate, &nakamoto_tip, &cur_consensus_hash)?; // loop variables and invariants: // @@ -219,6 +228,7 @@ impl InvGenerator { tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, + &nakamoto_tip, &cur_tenure_info.parent_tenure_id_consensus_hash, )?; } else { @@ -229,8 +239,11 @@ impl InvGenerator { // no active tenure during this sortition. Check the parent sortition to see if a // tenure begain there. tenure_status.push(false); - cur_tenure_opt = - self.get_processed_tenure(chainstate, &parent_sortition_consensus_hash)?; + cur_tenure_opt = self.get_processed_tenure( + chainstate, + &nakamoto_tip, + &parent_sortition_consensus_hash, + )?; } // next sortition From 393f2f038e70a253df160b225c9ef8f7325688b4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:51:42 -0400 Subject: [PATCH 0407/1400] chore: store and verify storage of malleablized blocks --- stackslib/src/net/mod.rs | 42 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 94c061dc5d..626701f8b7 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1450,6 +1450,8 @@ pub const DENY_MIN_BAN_DURATION: u64 = 2; /// Result of doing network work #[derive(Clone)] pub struct NetworkResult { + /// Stacks chain tip when we began this pass + pub stacks_tip: StacksBlockId, /// PoX ID as it was when we begin downloading blocks (set if we have downloaded new blocks) pub download_pox_id: Option, /// Network messages we received but did not handle @@ -1500,6 +1502,7 @@ pub struct NetworkResult { impl NetworkResult { pub fn new( + stacks_tip: StacksBlockId, num_state_machine_passes: u64, num_inv_sync_passes: u64, num_download_passes: u64, @@ -1509,6 +1512,7 @@ impl NetworkResult { stacker_db_configs: HashMap, ) -> NetworkResult { NetworkResult { + stacks_tip, unhandled_messages: HashMap::new(), download_pox_id: None, blocks: vec![], @@ -2241,6 +2245,8 @@ pub mod test { (), BitcoinIndexer, >, + /// list of malleablized blocks produced when mining. + pub malleablized_blocks: Vec, } impl<'a> TestPeer<'a> { @@ -2654,6 +2660,7 @@ pub mod test { chainstate_path: chainstate_path, coord: coord, indexer: Some(indexer), + malleablized_blocks: vec![], } } @@ -3437,6 +3444,10 @@ pub mod test { &mut self.stacks_node.as_mut().unwrap().chainstate } + pub fn chainstate_ref(&self) -> &StacksChainState { + &self.stacks_node.as_ref().unwrap().chainstate + } + pub fn sortdb(&mut self) -> &mut SortitionDB { self.sortdb.as_mut().unwrap() } @@ -4090,6 +4101,37 @@ pub mod test { self.sortdb = Some(sortdb); self.stacks_node = Some(node); } + + /// Verify that all malleablized blocks are duly processed + pub fn check_malleablized_blocks( + &self, + all_blocks: Vec, + expected_siblings: usize, + ) { + for block in all_blocks.iter() { + let sighash = block.header.signer_signature_hash(); + let siblings = self + .chainstate_ref() + .nakamoto_blocks_db() + .get_blocks_at_height(block.header.chain_length); + + debug!("Expect {} siblings: {:?}", expected_siblings, &siblings); + assert_eq!(siblings.len(), expected_siblings); + + for sibling in siblings { + let (processed, orphaned) = NakamotoChainState::get_nakamoto_block_status( + self.chainstate_ref().nakamoto_blocks_db(), + self.chainstate_ref().db(), + &sibling.header.consensus_hash, + &sibling.header.block_hash(), + ) + .unwrap() + .unwrap(); + assert!(processed); + assert!(!orphaned); + } + } + } } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { From 6c34a0d7c2ee1841546557616cde7ac1e537d570 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:52:09 -0400 Subject: [PATCH 0408/1400] chore: only log HTTP convo status if it's not idle --- stackslib/src/net/neighbors/rpc.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/neighbors/rpc.rs b/stackslib/src/net/neighbors/rpc.rs index 3a5378803f..c75074222d 100644 --- a/stackslib/src/net/neighbors/rpc.rs +++ b/stackslib/src/net/neighbors/rpc.rs @@ -245,11 +245,13 @@ impl NeighborRPC { // see if we got any data let Some(http_response) = convo.try_get_response() else { - // still waiting - debug!( - "{:?}: HTTP event {} is still waiting for a response", - &network.local_peer, event_id - ); + if !convo.is_idle() { + // still waiting + debug!( + "{:?}: HTTP event {} is still waiting for a response", + &network.local_peer, event_id + ); + } return Ok(None); }; From 1ed1c30e2741e6bd7fa2aebd30f4075c1253a172 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:52:55 -0400 Subject: [PATCH 0409/1400] chore: clean up stacks tip representation, and use MARF'ed chainstate for loading tip and parent tip --- stackslib/src/net/p2p.rs | 258 +++++++++++++++++++++++++-------------- 1 file changed, 169 insertions(+), 89 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 740c21b0b4..e02256ece4 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -33,7 +33,7 @@ use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_H use stacks_common::types::chainstate::{PoxId, SortitionId}; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; use {mio, url}; @@ -48,7 +48,7 @@ use crate::chainstate::coordinator::{ }; use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; -use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::{StacksBlockHeader, MAX_BLOCK_LEN, MAX_TRANSACTION_LEN}; use crate::core::StacksEpoch; use crate::monitoring::{update_inbound_neighbors, update_outbound_neighbors}; @@ -234,6 +234,7 @@ impl ConnectingPeer { } } +/// Cached reward cycle, for validating pushed blocks #[derive(Clone, Debug, PartialEq)] pub struct CurrentRewardSet { pub reward_cycle: u64, @@ -252,6 +253,30 @@ impl CurrentRewardSet { } } +/// Cached stacks chain tip info, consumed by RPC endpoints +#[derive(Clone, Debug, PartialEq)] +pub struct StacksTipInfo { + pub consensus_hash: ConsensusHash, + pub block_hash: BlockHeaderHash, + pub height: u64, + pub is_nakamoto: bool, +} + +impl StacksTipInfo { + pub fn empty() -> Self { + Self { + consensus_hash: ConsensusHash([0u8; 20]), + block_hash: BlockHeaderHash([0u8; 32]), + height: 0, + is_nakamoto: false, + } + } + + pub fn block_id(&self) -> StacksBlockId { + StacksBlockId::new(&self.consensus_hash, &self.block_hash) + } +} + pub struct PeerNetwork { // constants pub peer_version: u32, @@ -267,13 +292,11 @@ pub struct PeerNetwork { pub ast_rules: ASTRules, /// Current Stacks tip -- the highest block's consensus hash, block hash, and height - pub stacks_tip: (ConsensusHash, BlockHeaderHash, u64), - /// Sortition that corresponds to the current Stacks tip, if known - pub stacks_tip_sn: Option, + pub stacks_tip: StacksTipInfo, /// Parent tenure Stacks tip -- the last block in the current tip's parent tenure. /// In epoch 2.x, this is the parent block. /// In nakamoto, this is the last block in the parent tenure - pub parent_stacks_tip: (ConsensusHash, BlockHeaderHash, u64), + pub parent_stacks_tip: StacksTipInfo, /// The block id of the first block in this tenure. /// In epoch 2.x, this is the same as the tip block ID /// In nakamoto, this is the block ID of the first block in the current tenure @@ -488,9 +511,8 @@ impl PeerNetwork { &first_burn_header_hash, first_burn_header_ts as u64, ), - stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), - stacks_tip_sn: None, - parent_stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), + stacks_tip: StacksTipInfo::empty(), + parent_stacks_tip: StacksTipInfo::empty(), tenure_start_block_id: StacksBlockId([0x00; 32]), current_reward_sets: BTreeMap::new(), @@ -4085,7 +4107,7 @@ impl PeerNetwork { &mut self, burnchain_height: u64, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, network_result: &mut NetworkResult, ) -> bool { @@ -4651,61 +4673,89 @@ impl PeerNetwork { /// current stacks tip. /// If this is the first tenure in epoch 3.x, then this is the pointer to the epoch 2.x block /// that it builds atop. + /// TODO: unit test pub(crate) fn get_parent_stacks_tip( - cur_epoch: StacksEpochId, + &self, chainstate: &StacksChainState, stacks_tip_block_id: &StacksBlockId, - ) -> Result<(ConsensusHash, BlockHeaderHash, u64), net_error> { + ) -> Result { let header = NakamotoChainState::get_block_header(chainstate.db(), stacks_tip_block_id)? - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - let parent_header = if cur_epoch < StacksEpochId::Epoch30 { - // prior to epoch 3.0, the self.prev_stacks_tip field is just the parent block - let parent_block_id = - StacksChainState::get_parent_block_id(chainstate.db(), &header.index_block_hash())? - .ok_or(net_error::DBError(db_error::NotFoundError))?; + .ok_or_else(|| { + debug!( + "{:?}: get_parent_stacks_tip: No such stacks block: {:?}", + self.get_local_peer(), + stacks_tip_block_id + ); + net_error::DBError(db_error::NotFoundError) + })?; - NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id)? - .ok_or(net_error::DBError(db_error::NotFoundError))? - } else { - // in epoch 3.0 and later, self.prev_stacks_tip is the first tenure block of the - // current tip's parent tenure. - match NakamotoChainState::get_nakamoto_parent_tenure_id_consensus_hash( - chainstate.db(), + let tenure_start_header = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + stacks_tip_block_id, + &header.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "{:?}: get_parent_stacks_tip: No tenure-start block for {} off of {}", + self.get_local_peer(), &header.consensus_hash, - )? { - Some(ch) => NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &ch, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))?, - None => { - // parent in epoch 2 - let tenure_start_block_header = - NakamotoChainState::get_block_header_by_consensus_hash( - chainstate.db(), - &header.consensus_hash, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - let nakamoto_header = tenure_start_block_header - .anchored_header - .as_stacks_nakamoto() - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - NakamotoChainState::get_block_header( - chainstate.db(), - &nakamoto_header.parent_block_id, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))? - } + stacks_tip_block_id + ); + net_error::DBError(db_error::NotFoundError) + })?; + + let parent_block_id = match tenure_start_header.anchored_header { + StacksBlockHeaderTypes::Nakamoto(ref nakamoto_header) => { + nakamoto_header.parent_block_id.clone() } + StacksBlockHeaderTypes::Epoch2(..) => StacksChainState::get_parent_block_id( + chainstate.db(), + &tenure_start_header.index_block_hash(), + )? + .ok_or_else(|| { + debug!( + "{:?}: get_parent_stacks_tip: No parent block ID found for epoch2x block {}", + self.get_local_peer(), + &tenure_start_header.index_block_hash() + ); + net_error::DBError(db_error::NotFoundError) + })?, }; - Ok(( - parent_header.consensus_hash, - parent_header.anchored_header.block_hash(), - parent_header.anchored_header.height(), - )) + + let parent_header = + NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id)?.ok_or_else( + || { + debug!( + "{:?}: get_parent_stacks_tip: No such parent stacks block: {:?}", + self.get_local_peer(), + &parent_block_id + ); + net_error::DBError(db_error::NotFoundError) + }, + )?; + + let parent_tenure_start_header = NakamotoChainState::get_tenure_start_block_header(&mut chainstate.index_conn(), stacks_tip_block_id, &parent_header.consensus_hash)? + .ok_or_else(|| { + debug!("{:?}: get_parent_stacks_tip: No tenure-start block for parent tenure {} off of child {} (parnet {})", self.get_local_peer(), &parent_header.consensus_hash, stacks_tip_block_id, &parent_block_id); + net_error::DBError(db_error::NotFoundError) + })?; + + let parent_stacks_tip = StacksTipInfo { + consensus_hash: parent_tenure_start_header.consensus_hash, + block_hash: parent_tenure_start_header.anchored_header.block_hash(), + height: parent_tenure_start_header.anchored_header.height(), + is_nakamoto: parent_tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .is_some(), + }; + test_debug!( + "{:?}: Parent Stacks tip off of {} is {:?}", + self.get_local_peer(), + &stacks_tip_block_id, + &parent_stacks_tip + ); + Ok(parent_stacks_tip) } /// Clear out old reward cycles @@ -4727,6 +4777,7 @@ impl PeerNetwork { sortdb: &SortitionDB, chainstate: &mut StacksChainState, tip_sn: &BlockSnapshot, + tip_block_id: &StacksBlockId, ) -> Result<(), net_error> { let cur_rc = self .burnchain @@ -4767,6 +4818,7 @@ impl PeerNetwork { &tip_sn.sortition_id, &self.burnchain, chainstate, + tip_block_id, sortdb, &OnChainRewardSetProvider::new(), ) @@ -4815,60 +4867,74 @@ impl PeerNetwork { ) -> Result>, net_error> { // update burnchain snapshot if we need to (careful -- it's expensive) let canonical_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; - let stacks_tip = + let (stacks_tip_ch, stacks_tip_bhh, stacks_tip_height) = SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn())?; let burnchain_tip_changed = canonical_sn.block_height != self.chain_view.burn_block_height || self.num_state_machine_passes == 0 || canonical_sn.sortition_id != self.burnchain_tip.sortition_id; - let stacks_tip_changed = self.stacks_tip != stacks_tip; - let new_stacks_tip_block_id = StacksBlockId::new(&stacks_tip.0, &stacks_tip.1); + let stacks_tip_changed = self.stacks_tip.consensus_hash != stacks_tip_ch + || self.stacks_tip.block_hash != stacks_tip_bhh + || self.stacks_tip.height != stacks_tip_height; + + let new_stacks_tip_block_id = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); + let stacks_tip_is_nakamoto = if stacks_tip_changed { + // go check + chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&new_stacks_tip_block_id) + .unwrap_or(false) + } else { + self.stacks_tip.is_nakamoto + }; + let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash != self.burnchain_tip.canonical_stacks_tip_consensus_hash || burnchain_tip_changed || stacks_tip_changed; if burnchain_tip_changed || stacks_tip_changed { - self.refresh_reward_cycles(sortdb, chainstate, &canonical_sn)?; + self.refresh_reward_cycles( + sortdb, + chainstate, + &canonical_sn, + &new_stacks_tip_block_id, + )?; } let mut ret: HashMap> = HashMap::new(); - let (parent_stacks_tip, tenure_start_block_id, stacks_tip_sn) = if stacks_tip_changed { - let stacks_tip_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_tip.0)?; + let (parent_stacks_tip, tenure_start_block_id) = if stacks_tip_changed { let tenure_start_block_id = if let Some(header) = NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &stacks_tip.0, + &mut chainstate.index_conn(), + &new_stacks_tip_block_id, + &stacks_tip_ch, )? { header.index_block_hash() } else { new_stacks_tip_block_id.clone() }; - let parent_tip_id = match Self::get_parent_stacks_tip( - self.get_current_epoch().epoch_id, - chainstate, - &new_stacks_tip_block_id, - ) { - Ok(tip_id) => tip_id, + let parent_tip = match self.get_parent_stacks_tip(chainstate, &new_stacks_tip_block_id) + { + Ok(tip) => tip, Err(net_error::DBError(db_error::NotFoundError)) => { // this is the first block - ( - FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - FIRST_STACKS_BLOCK_HASH.clone(), - 0, - ) + StacksTipInfo { + consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + block_hash: FIRST_STACKS_BLOCK_HASH.clone(), + height: 0, + is_nakamoto: false, + } } Err(e) => return Err(e), }; - (parent_tip_id, tenure_start_block_id, stacks_tip_sn) + (parent_tip, tenure_start_block_id) } else { ( self.parent_stacks_tip.clone(), self.tenure_start_block_id.clone(), - self.stacks_tip_sn.clone(), ) }; @@ -5005,10 +5071,22 @@ impl PeerNetwork { // update cached stacks chain view for /v2/info and /v3/tenures/info self.burnchain_tip = canonical_sn; - self.stacks_tip = stacks_tip; - self.stacks_tip_sn = stacks_tip_sn; - self.parent_stacks_tip = parent_stacks_tip; self.tenure_start_block_id = tenure_start_block_id; + if stacks_tip_changed { + self.stacks_tip = StacksTipInfo { + consensus_hash: stacks_tip_ch, + block_hash: stacks_tip_bhh, + height: stacks_tip_height, + is_nakamoto: stacks_tip_is_nakamoto, + }; + self.parent_stacks_tip = parent_stacks_tip; + + test_debug!( + "{:?}: canonical Stacks tip is now {:?}", + self.get_local_peer(), + &self.stacks_tip + ); + } Ok(ret) } @@ -5204,16 +5282,17 @@ impl PeerNetwork { debug!("Already have tx {}", txid); return false; } - let stacks_epoch = match sortdb - .index_conn() - .get_stacks_epoch(burnchain_tip.block_height as u32) + let stacks_epoch = match SortitionDB::get_stacks_epoch( + sortdb.conn(), + burnchain_tip.block_height, + ) + .ok() + .flatten() { Some(epoch) => epoch, None => { - warn!( - "Failed to store transaction because could not load Stacks epoch for canonical burn height = {}", - burnchain_tip.block_height - ); + warn!("Failed to store transaction because could not load Stacks epoch for canonical burn height = {}", + burnchain_tip.block_height); return false; } }; @@ -5422,6 +5501,7 @@ impl PeerNetwork { }; let mut network_result = NetworkResult::new( + self.stacks_tip.block_id(), self.num_state_machine_passes, self.num_inv_sync_passes, self.num_downloader_passes, From 9ef91712285417c4686debdf56d1fc5e9748268d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:53:17 -0400 Subject: [PATCH 0410/1400] chore: use MARF'ed chainstate API --- stackslib/src/net/relay.rs | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 11fa5f6364..072c938a14 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -42,6 +42,7 @@ use crate::chainstate::coordinator::{ BlockEventDispatcher, Error as CoordinatorError, OnChainRewardSetProvider, }; use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt, StacksHeaderInfo}; @@ -556,13 +557,14 @@ impl Relayer { /// Given Nakamoto blocks pushed to us, verify that they correspond to expected block data. pub fn validate_nakamoto_blocks_push( burnchain: &Burnchain, - conn: &SortitionDBConn, sortdb: &SortitionDB, chainstate: &mut StacksChainState, + stacks_tip: &StacksBlockId, nakamoto_blocks_data: &NakamotoBlocksData, ) -> Result<(), net_error> { + let conn = sortdb.index_conn(); let mut loaded_reward_sets = HashMap::new(); - let tip_sn = SortitionDB::get_canonical_burn_chain_tip(conn)?; + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; for nakamoto_block in nakamoto_blocks_data.blocks.iter() { // is this the right Stacks block for this sortition? @@ -603,6 +605,7 @@ impl Relayer { &tip_sn.sortition_id, burnchain, chainstate, + stacks_tip, sortdb, &OnChainRewardSetProvider::new(), ) @@ -792,8 +795,10 @@ impl Relayer { sortdb: &SortitionDB, sort_handle: &mut SortitionHandleConn, chainstate: &mut StacksChainState, + stacks_tip: &StacksBlockId, block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, + obtained_method: NakamotoBlockObtainMethod, ) -> Result { debug!( "Handle incoming Nakamoto block {}/{}", @@ -804,7 +809,7 @@ impl Relayer { // do we have this block? don't lock the DB needlessly if so. if chainstate .nakamoto_blocks_db() - .has_nakamoto_block(&block.header.block_id()) + .has_nakamoto_block_with_index_hash(&block.header.block_id()) .map_err(|e| { warn!( "Failed to determine if we have Nakamoto block {}/{}: {:?}", @@ -881,6 +886,7 @@ impl Relayer { &tip, burnchain, chainstate, + stacks_tip, sortdb, &OnChainRewardSetProvider::new(), ) { @@ -920,6 +926,7 @@ impl Relayer { &staging_db_tx, headers_conn, reward_set, + obtained_method, )?; staging_db_tx.commit()?; @@ -945,6 +952,7 @@ impl Relayer { burnchain: &Burnchain, sortdb: &SortitionDB, chainstate: &mut StacksChainState, + stacks_tip: &StacksBlockId, blocks: impl Iterator, coord_comms: Option<&CoordinatorChannels>, ) -> Result, chainstate_error> { @@ -958,8 +966,10 @@ impl Relayer { sortdb, &mut sort_handle, chainstate, + stacks_tip, &block, coord_comms, + NakamotoBlockObtainMethod::Downloaded, ) { warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); } else { @@ -1554,9 +1564,9 @@ impl Relayer { let mut accepted_blocks = vec![]; if let Err(e) = Relayer::validate_nakamoto_blocks_push( burnchain, - &sortdb.index_conn(), sortdb, chainstate, + &network_result.stacks_tip, nakamoto_blocks_data, ) { info!( @@ -1581,8 +1591,10 @@ impl Relayer { sortdb, &mut sort_handle, chainstate, + &network_result.stacks_tip, &nakamoto_block, coord_comms, + NakamotoBlockObtainMethod::Pushed, ) { Ok(accepted) => { if accepted { @@ -1935,6 +1947,7 @@ impl Relayer { burnchain, sortdb, chainstate, + &network_result.stacks_tip, nakamoto_blocks.into_values(), coord_comms, ) { @@ -2537,7 +2550,7 @@ impl Relayer { if !force_send && chainstate .nakamoto_blocks_db() - .has_nakamoto_block(&blk.block_id()) + .has_nakamoto_block_with_index_hash(&blk.block_id()) .unwrap_or(true) { return false; From 4663ea0166ad28bef2e54b82ef4e63ca859205fa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:53:34 -0400 Subject: [PATCH 0411/1400] chore: use MARF'ed chainstate API --- stackslib/src/net/tests/download/nakamoto.rs | 133 +++++++++++-------- 1 file changed, 77 insertions(+), 56 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index a7798bfe78..864faac84a 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -30,6 +30,7 @@ use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::VRFProof; use crate::burnchains::PoxConstants; +use crate::chainstate::burn::db::sortdb::SortitionHandle; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; @@ -363,10 +364,10 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { }; peer.refresh_burnchain_view(); - let tip_block_id = StacksBlockId::new(&peer.network.stacks_tip.0, &peer.network.stacks_tip.1); + let tip_block_id = peer.network.stacks_tip.block_id(); - let tip_ch = peer.network.stacks_tip.0.clone(); - let parent_tip_ch = peer.network.parent_stacks_tip.0.clone(); + let tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let parent_tip_ch = peer.network.parent_stacks_tip.consensus_hash.clone(); let current_reward_sets = peer.network.current_reward_sets.clone(); let unconfirmed_tenure = peer @@ -392,7 +393,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .unwrap() .unwrap(); let parent_parent_start_header = NakamotoChainState::get_nakamoto_tenure_start_block_header( - peer.chainstate().db(), + &mut peer.chainstate().index_conn(), + &tip_block_id, &parent_parent_header.consensus_hash, ) .unwrap() @@ -422,14 +424,14 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .expect("FATAL: burnchain tip before system start"); let highest_confirmed_wanted_tenure = WantedTenure { - tenure_id_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + tenure_id_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), winning_block_id: parent_parent_start_header.index_block_hash(), processed: false, burn_height: peer.network.burnchain_tip.block_height - 1, }; let unconfirmed_wanted_tenure = WantedTenure { - tenure_id_consensus_hash: peer.network.stacks_tip.0.clone(), + tenure_id_consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), winning_block_id: last_confirmed_tenure .first() .as_ref() @@ -522,18 +524,18 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -601,18 +603,18 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -636,7 +638,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( utd.state, NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone() + tenure_tip.tip_block_id.clone(), ) ); assert_eq!(utd.tenure_tip, Some(tenure_tip.clone())); @@ -704,18 +706,18 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -739,7 +741,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( utd.state, NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone() + tenure_tip.tip_block_id.clone(), ) ); assert_eq!(utd.tenure_tip, Some(tenure_tip.clone())); @@ -806,18 +808,18 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -887,18 +889,18 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -950,18 +952,18 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), + consensus_hash: peer.network.stacks_tip.consensus_hash.clone(), tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), + parent_consensus_hash: peer.network.parent_stacks_tip.consensus_hash.clone(), parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, + &peer.network.parent_stacks_tip.consensus_hash, + &peer.network.parent_stacks_tip.block_hash, ), tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, + &peer.network.stacks_tip.consensus_hash, + &peer.network.stacks_tip.block_hash, ), - tip_height: peer.network.stacks_tip.2, + tip_height: peer.network.stacks_tip.height, reward_cycle: tip_rc, }; @@ -1302,6 +1304,7 @@ fn test_make_tenure_downloaders() { let test_signers = TestSigners::new(vec![]); let current_reward_sets = peer.network.current_reward_sets.clone(); + let stacks_tip = peer.network.stacks_tip.block_id(); // test load_wanted_tenures() { @@ -1452,11 +1455,13 @@ fn test_make_tenure_downloaders() { ) .unwrap(); + let nakamoto_tip = peer.network.stacks_tip.block_id(); let chainstate = peer.chainstate(); NakamotoDownloadStateMachine::inner_update_processed_wanted_tenures( nakamoto_start, &mut wanted_tenures, chainstate, + &nakamoto_tip, ) .unwrap(); @@ -1483,11 +1488,13 @@ fn test_make_tenure_downloaders() { // but the resulting map is keyed by block ID (and we don't have the first block ID) let wanted_tenures_with_blocks = wanted_tenures[1..].to_vec(); + let nakamoto_tip = peer.network.stacks_tip.block_id(); let chainstate = peer.chainstate(); let mut tenure_start_blocks = HashMap::new(); NakamotoDownloadStateMachine::load_tenure_start_blocks( &wanted_tenures, chainstate, + &nakamoto_tip, &mut tenure_start_blocks, ) .unwrap(); @@ -1769,7 +1776,8 @@ fn test_make_tenure_downloaders() { let chainstate = peer.chainstate(); let start_end = available_tenures.get(&wt.tenure_id_consensus_hash).unwrap(); let hdr = NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), + &mut chainstate.index_conn(), + &stacks_tip, &wt.tenure_id_consensus_hash, ) .unwrap() @@ -2101,7 +2109,12 @@ fn test_nakamoto_download_run_2_peers() { let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); - + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); assert_eq!(tip.block_height, 81); // make a neighbor from this peer @@ -2130,8 +2143,9 @@ fn test_nakamoto_download_run_2_peers() { let mut all_block_headers: HashMap = HashMap::new(); for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_block_header_by_consensus_hash( - peer.chainstate().db(), + if let Some(header) = NakamotoChainState::get_tenure_start_block_header( + &mut peer.chainstate().index_conn(), + &nakamoto_tip, &sn.consensus_hash, ) .unwrap() @@ -2288,6 +2302,12 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); assert_eq!(tip.block_height, 51); @@ -2317,8 +2337,9 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let mut all_block_headers: HashMap = HashMap::new(); for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_block_header_by_consensus_hash( - peer.chainstate().db(), + if let Some(header) = NakamotoChainState::get_tenure_start_block_header( + &mut peer.chainstate().index_conn(), + &nakamoto_tip, &sn.consensus_hash, ) .unwrap() From e662a4d891953884c81ae501bbb594432d098bde Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:53:47 -0400 Subject: [PATCH 0412/1400] chore: make malleablizable blocks by default when synthesizing peers from an inv --- stackslib/src/net/tests/inv/nakamoto.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index ca5c0818db..f8134360f2 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -34,6 +34,7 @@ use crate::chainstate::nakamoto::coordinator::tests::{ simple_nakamoto_coordinator_10_tenures_10_sortitions, simple_nakamoto_coordinator_2_tenures_3_sortitions, }; +use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ @@ -406,11 +407,18 @@ pub fn make_nakamoto_peers_from_invs<'a>( } } + // make malleablized blocks + let (test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); + let plan = NakamotoBootPlan::new(test_name) .with_private_key(private_key) .with_pox_constants(rc_len, prepare_len) .with_initial_balances(vec![(addr.into(), 1_000_000)]) - .with_extra_peers(num_peers); + .with_extra_peers(num_peers) + .with_test_signers(test_signers) + .with_test_stackers(test_stackers); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(observer)); (peer, other_peers) From bfea1eadca7b8b69d8a01e27c6cb5927f2334f67 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:54:23 -0400 Subject: [PATCH 0413/1400] chore: synthesize and process malleablized blocks when producing a testpeer from a boot plan --- stackslib/src/net/tests/mod.rs | 206 ++++++++++++++++++++------------- 1 file changed, 128 insertions(+), 78 deletions(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 20213bc986..1ece477fe0 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -20,6 +20,8 @@ pub mod inv; pub mod neighbors; pub mod relay; +use std::collections::HashSet; + use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::PrincipalData; use rand::prelude::SliceRandom; @@ -37,9 +39,8 @@ use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::BlockstackOperationType; use crate::chainstate::coordinator::tests::p2pkh_from; -use crate::chainstate::nakamoto::coordinator::tests::{ - boot_nakamoto, make_all_signers_vote_for_aggregate_key, -}; +use crate::chainstate::nakamoto::coordinator::tests::boot_nakamoto; +use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; @@ -216,6 +217,7 @@ impl NakamotoBootPlan { fn apply_blocks_to_other_peers( burn_ops: &[BlockstackOperationType], blocks: &[NakamotoBlock], + malleablized_blocks: &[NakamotoBlock], other_peers: &mut [TestPeer], ) { info!("Applying block to other peers"; "block_height" => ?burn_ops.first().map(|op| op.block_height())); @@ -229,14 +231,23 @@ impl NakamotoBootPlan { let mut sort_handle = sortdb.index_handle(&sort_tip); for block in blocks { + debug!( + "Apply block {} (sighash {}) to peer {} ({})", + &block.block_id(), + &block.header.signer_signature_hash(), + i, + &peer.to_neighbor().addr + ); let block_id = block.block_id(); let accepted = Relayer::process_new_nakamoto_block( &peer.network.burnchain, &sortdb, &mut sort_handle, &mut node.chainstate, + &peer.network.stacks_tip.block_id(), &block, None, + NakamotoBlockObtainMethod::Pushed, ) .unwrap(); if accepted { @@ -248,6 +259,48 @@ impl NakamotoBootPlan { i ); } + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + + for block in malleablized_blocks { + debug!( + "Apply malleablized block {} (sighash {}) to peer {} ({})", + &block.block_id(), + &block.header.signer_signature_hash(), + i, + &peer.to_neighbor().addr + ); + let block_id = block.block_id(); + let accepted = Relayer::process_new_nakamoto_block( + &peer.network.burnchain, + &sortdb, + &mut sort_handle, + &mut node.chainstate, + &peer.network.stacks_tip.block_id(), + &block, + None, + NakamotoBlockObtainMethod::Pushed, + ) + .unwrap(); + if accepted { + test_debug!( + "Accepted malleablized Nakamoto block {block_id} to other peer {}", + i + ); + peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + } else { + panic!( + "Did NOT accept malleablized Nakamoto block {block_id} to other peer {}", + i + ); + } + + // process it + peer.coord.handle_new_stacks_block().unwrap(); + peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } peer.sortdb = Some(sortdb); @@ -258,9 +311,8 @@ impl NakamotoBootPlan { /// Make a peer and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. - fn boot_nakamoto<'a>( + fn boot_nakamoto_peers<'a>( mut self, - aggregate_public_key: Point, observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, Vec) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); @@ -277,7 +329,6 @@ impl NakamotoBootPlan { // first 25 blocks are boot-up // reward cycle 6 instantiates pox-3 // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); peer_config .stacker_dbs .push(boot_code_id(MINERS_NAME, false)); @@ -339,6 +390,8 @@ impl NakamotoBootPlan { let mut peer_nonce = 0; let mut other_peer_nonces = vec![0; other_peers.len()]; let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); let mut sortition_height = peer.get_burn_block_height(); debug!("\n\n======================"); @@ -413,16 +466,18 @@ impl NakamotoBootPlan { .unwrap_or(vec![]) .iter() .map(|test_stacker| { - let pox_addr = test_stacker.pox_address.clone().unwrap_or_else(|| { - PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()) - }); + let pox_addr = test_stacker + .pox_addr + .clone() + .unwrap_or(default_pox_addr.clone()); + let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); let signature = make_signer_key_signature( &pox_addr, &test_stacker.signer_private_key, reward_cycle.into(), &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, 12_u128, - u128::MAX, + max_amount, 1, ); make_pox_4_lockup( @@ -434,7 +489,7 @@ impl NakamotoBootPlan { &StacksPublicKey::from_private(&test_stacker.signer_private_key), sortition_height + 1, Some(signature), - u128::MAX, + max_amount, 1, ) }) @@ -465,34 +520,6 @@ impl NakamotoBootPlan { sortition_height = peer.get_burn_block_height(); } - debug!("\n\n======================"); - debug!("Vote for the Aggregate Key"); - debug!("========================\n\n"); - - let target_cycle = peer - .config - .burnchain - .block_height_to_reward_cycle(sortition_height.into()) - .expect("Failed to get reward cycle") - + 1; - let vote_txs = with_sortdb(peer, |chainstate, sortdb| { - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &stacks_block, - &mut self.test_signers, - &self.test_stackers, - target_cycle.into(), - ) - }); - - peer.tenure_with_txs(&vote_txs, &mut peer_nonce); - for (other_peer, other_peer_nonce) in - other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) - { - other_peer.tenure_with_txs(&vote_txs, other_peer_nonce); - } - debug!("\n\n======================"); debug!("Advance to Epoch 3.0"); debug!("========================\n\n"); @@ -522,14 +549,14 @@ impl NakamotoBootPlan { let pox_constants = self.pox_constants.clone(); let test_stackers = self.test_stackers.clone(); - let (mut peer, mut other_peers) = - self.boot_nakamoto(test_signers.aggregate_public_key.clone(), observer); + let (mut peer, mut other_peers) = self.boot_nakamoto_peers(observer); if boot_plan.is_empty() { debug!("No boot plan steps supplied -- returning once nakamoto epoch has been reached"); return (peer, other_peers); } let mut all_blocks = vec![]; + let mut malleablized_block_ids = HashSet::new(); let mut consensus_hashes = vec![]; let mut last_tenure_change: Option = None; let mut blocks_since_last_tenure = 0; @@ -619,12 +646,24 @@ impl NakamotoBootPlan { .map(|(block, _, _)| block) .collect(); + let malleablized_blocks = + std::mem::replace(&mut peer.malleablized_blocks, vec![]); + for mblk in malleablized_blocks.iter() { + malleablized_block_ids.insert(mblk.block_id()); + } + Self::check_blocks_against_boot_plan( &blocks, &boot_steps, num_expected_transactions, ); - Self::apply_blocks_to_other_peers(&burn_ops, &blocks, &mut other_peers); + + Self::apply_blocks_to_other_peers( + &burn_ops, + &blocks, + &malleablized_blocks, + &mut other_peers, + ); all_blocks.push(blocks); } NakamotoBootTenure::Sortition(boot_steps) => { @@ -665,29 +704,6 @@ impl NakamotoBootPlan { i += 1; let mut txs = vec![]; - // check if the stacker/signers need to vote for an aggregate key. if so, append those transactions - // to the end of the block. - // NOTE: this will only work the block after .signers is updated, because `make_all_signers_vote...` - // checks the chainstate as of `tip` to obtain the signer vector. this means that some tests may - // need to produce an extra block in a tenure in order to get the signer votes in place. - // The alternative to doing this would be to either manually build the signer vector or to refactor - // the testpeer such that a callback is provided during the actual mining of the block with a - // `ClarityBlockConnection`. - let mut voting_txs = if pox_constants.is_in_prepare_phase(first_burn_ht, burn_ht) { - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb).unwrap().unwrap(); - let cycle_id = 1 + pox_constants.block_height_to_reward_cycle(first_burn_ht, burn_ht).unwrap(); - make_all_signers_vote_for_aggregate_key( - chainstate, - sortdb, - &tip.index_block_hash(), - &mut test_signers.clone(), - &test_stackers, - u128::from(cycle_id), - ) - } else { - vec![] - }; - let last_block_opt = blocks_so_far .last() .as_ref() @@ -719,9 +735,6 @@ impl NakamotoBootPlan { } } - num_expected_transactions += voting_txs.len(); - txs.append(&mut voting_txs); - blocks_since_last_tenure += 1; txs }); @@ -733,25 +746,41 @@ impl NakamotoBootPlan { .map(|(block, _, _)| block) .collect(); + let malleablized_blocks = + std::mem::replace(&mut peer.malleablized_blocks, vec![]); + for mblk in malleablized_blocks.iter() { + malleablized_block_ids.insert(mblk.block_id()); + } + Self::check_blocks_against_boot_plan( &blocks, &boot_steps, num_expected_transactions, ); - Self::apply_blocks_to_other_peers(&burn_ops, &blocks, &mut other_peers); + Self::apply_blocks_to_other_peers( + &burn_ops, + &blocks, + &malleablized_blocks, + &mut other_peers, + ); all_blocks.push(blocks); } } } + // check that our tenure-extends have been getting applied let (highest_tenure, sort_tip) = { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = NakamotoChainState::get_highest_nakamoto_tenure( - chainstate.db(), - &sort_db.index_handle_at_tip(), + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), ) .unwrap() .unwrap(); @@ -779,12 +808,24 @@ impl NakamotoBootPlan { // already checked that `all_blocks` matches the boot plan, so just check that each // transaction in `all_blocks` ran to completion if let Some(observer) = observer { - let observed_blocks = observer.get_blocks(); + let mut observed_blocks = observer.get_blocks(); let mut block_idx = (peer.config.burnchain.pox_constants.pox_4_activation_height + peer.config.burnchain.pox_constants.reward_cycle_length - 25) as usize; - for tenure in all_blocks { - for block in tenure { + + // filter out observed blocks that are malleablized + observed_blocks.retain(|blk| { + if let Some(nakamoto_block_header) = + blk.metadata.anchored_header.as_stacks_nakamoto() + { + !malleablized_block_ids.contains(&nakamoto_block_header.block_id()) + } else { + true + } + }); + + for tenure in all_blocks.iter() { + for block in tenure.iter() { let observed_block = &observed_blocks[block_idx]; block_idx += 1; @@ -825,9 +866,13 @@ impl NakamotoBootPlan { let chainstate = &mut other_peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = other_peer.sortdb.as_mut().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let tenure = NakamotoChainState::get_highest_nakamoto_tenure( - chainstate.db(), - &sort_db.index_handle_at_tip(), + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), ) .unwrap() .unwrap(); @@ -838,9 +883,14 @@ impl NakamotoBootPlan { assert_eq!(other_sort_tip, sort_tip); } + // flatten + let all_blocks: Vec = all_blocks.into_iter().flatten().collect(); + peer.check_nakamoto_migration(); + peer.check_malleablized_blocks(all_blocks.clone(), 2); for other_peer in other_peers.iter_mut() { other_peer.check_nakamoto_migration(); + other_peer.check_malleablized_blocks(all_blocks.clone(), 2); } (peer, other_peers) } From 614ca8593a935c95a16ce3c45506680a0b5905a0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:54:44 -0400 Subject: [PATCH 0414/1400] chore: API sync --- stackslib/src/net/tests/relay/epoch2x.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index fe69b6895a..e6a69f5dc0 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -3088,8 +3088,16 @@ fn process_new_blocks_rejects_problematic_asts() { let mut unsolicited = HashMap::new(); unsolicited.insert(nk.clone(), bad_msgs.clone()); - let mut network_result = - NetworkResult::new(0, 0, 0, 0, 0, ConsensusHash([0x01; 20]), HashMap::new()); + let mut network_result = NetworkResult::new( + peer.network.stacks_tip.block_id(), + 0, + 0, + 0, + 0, + 0, + ConsensusHash([0x01; 20]), + HashMap::new(), + ); network_result.consume_unsolicited(unsolicited); assert!(network_result.has_blocks()); From 1e5f342baae3c10b8ed6684b777187aa721d9bd2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:54:53 -0400 Subject: [PATCH 0415/1400] chore: API sync --- stackslib/src/net/unsolicited.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index e444a4f633..80c036fca8 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -803,7 +803,7 @@ impl PeerNetwork { ) -> bool { if chainstate .nakamoto_blocks_db() - .has_nakamoto_block(&nakamoto_block.block_id()) + .has_nakamoto_block_with_index_hash(&nakamoto_block.block_id()) .unwrap_or(false) { debug!( From 27d6d4109cb490e327504845f25f74f1ad98bcfe Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:55:06 -0400 Subject: [PATCH 0416/1400] fix: remove sortition DB-specific functions --- stackslib/src/util_lib/db.rs | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 22509a8ac4..3e6f985f79 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -633,16 +633,6 @@ impl<'a, C, T: MarfTrieId> IndexDBConn<'a, C, T> { pub fn conn(&self) -> &DBConn { self.index.sqlite_conn() } - - pub fn get_stacks_epoch_by_epoch_id(&self, epoch_id: &StacksEpochId) -> Option { - SortitionDB::get_stacks_epoch_by_epoch_id(self.conn(), epoch_id) - .expect("BUG: failed to get epoch for epoch id") - } - - pub fn get_stacks_epoch(&self, height: u32) -> Option { - SortitionDB::get_stacks_epoch(self.conn(), height as u64) - .expect("BUG: failed to get epoch for burn block height") - } } impl<'a, C, T: MarfTrieId> Deref for IndexDBConn<'a, C, T> { @@ -926,6 +916,12 @@ impl<'a, C: Clone, T: MarfTrieId> IndexDBTx<'a, C, T> { get_indexed(self.index_mut(), header_hash, key) } + /// Get a value from the fork index, but with a read-only reference + pub fn get_indexed_ref(&self, header_hash: &T, key: &str) -> Result, Error> { + let mut ro_index = self.index().reopen_readonly()?; + get_indexed(&mut ro_index, header_hash, key) + } + /// Put all keys and values in a single MARF transaction, and seal it. /// This is a one-time operation; subsequent calls will panic. You should follow this up with /// a commit if you want to save the MARF state. From 0ebce5801404e9181b3b2856bf14c0f3205eb80e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:55:31 -0400 Subject: [PATCH 0417/1400] chore: API sync, add more logging, remove dead code --- .../stacks-node/src/nakamoto_node/miner.rs | 161 +++++------------- 1 file changed, 39 insertions(+), 122 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 421702bcfb..7609ff0785 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -29,13 +29,14 @@ use stacks::chainstate::coordinator::OnChainRewardSetProvider; use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; +use stacks::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use stacks::chainstate::stacks::boot::RewardSet; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, - TenureChangeCause, TenureChangePayload, ThresholdSignature, TransactionAnchorMode, - TransactionPayload, TransactionVersion, + TenureChangeCause, TenureChangePayload, TransactionAnchorMode, TransactionPayload, + TransactionVersion, }; use stacks::net::stackerdb::StackerDBs; use stacks::util::secp256k1::MessageSignature; @@ -43,7 +44,6 @@ use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; @@ -300,109 +300,6 @@ impl BlockMinerThread { } } - #[allow(dead_code)] - fn coordinate_signature( - &mut self, - new_block: &mut NakamotoBlock, - burn_block_height: u64, - stackerdbs: &mut StackerDBs, - attempts: &mut u64, - ) -> Result<(Point, ThresholdSignature), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open sortition DB. Cannot mine! {e:?}" - )) - })?; - - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to retrieve chain tip: {:?}", - e - )) - }) - .and_then(|result| { - result.ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) - }) - })?; - - let mut chain_state = - neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open chainstate DB. Cannot mine! {e:?}" - )) - })?; - - let reward_info = match load_nakamoto_reward_set( - self.burnchain - .pox_reward_cycle(tip.block_height.saturating_add(1)) - .expect("FATAL: no reward cycle for sortition"), - &tip.sortition_id, - &self.burnchain, - &mut chain_state, - &sort_db, - &OnChainRewardSetProvider::new(), - ) { - Ok(Some((reward_info, _))) => reward_info, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set stored yet. Cannot mine!".into(), - )); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" - ))); - } - }; - - let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Current reward cycle did not select a reward set. Cannot mine!".into(), - )); - }; - - // NOTE: this is a placeholder until the API can be fixed - let aggregate_public_key = Point::new(); - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); - let mut coordinator = - SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( - |e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - }, - )?; - - *attempts += 1; - let signature = coordinator.begin_sign_v1( - new_block, - burn_block_height, - *attempts, - &tip, - &self.burnchain, - &sort_db, - &stackerdbs, - &self.globals.counters, - )?; - - Ok((aggregate_public_key, signature)) - } - /// Gather a list of signatures from the signers for the block fn gather_signatures( &mut self, @@ -457,6 +354,7 @@ impl BlockMinerThread { &tip.sortition_id, &self.burnchain, &mut chain_state, + &new_block.header.parent_block_id, &sort_db, &OnChainRewardSetProvider::new(), ) { @@ -664,6 +562,7 @@ impl BlockMinerThread { &staging_tx, headers_conn, reward_set, + NakamotoBlockObtainMethod::Mined, )?; staging_tx.commit()?; Ok(()) @@ -759,8 +658,10 @@ impl BlockMinerThread { // 2. The highest block in the current tenure's parent tenure // Where the current tenure's parent tenure is the tenure start block committed to in the current tenure's associated block commit. let stacks_block_id = if let Some(block) = self.mined_blocks.last() { + test_debug!("Stacks block parent ID is last mined block"); block.block_id() } else { + test_debug!("Stacks block parent ID is parent tenure ID"); self.parent_tenure_id }; let Some(mut stacks_tip_header) = @@ -786,19 +687,32 @@ impl BlockMinerThread { }); }; - if self.mined_blocks.is_empty() { - // We could call this even if self.mined_blocks was not empty, but would return the same value, so save the effort and only do it when necessary. - // If we are starting a new tenure, then make sure we are building off of the last block of our parent tenure - if let Some(last_tenure_finish_block_header) = - NakamotoChainState::get_nakamoto_tenure_finish_block_header( - chain_state.db(), - &stacks_tip_header.consensus_hash, - ) - .expect("FATAL: could not query parent tenure finish block") - { - stacks_tip_header = last_tenure_finish_block_header; - } + // if self.mined_blocks.is_empty() { + // We could call this even if self.mined_blocks was not empty, but would return the same value, so save the effort and only do it when necessary. + // If we are starting a new tenure, then make sure we are building off of the last block of our parent tenure + if let Some(last_tenure_finish_block_header) = + NakamotoChainState::get_highest_block_header_in_tenure( + &mut chain_state.index_conn(), + &stacks_block_id, + &stacks_tip_header.consensus_hash, + ) + .expect("FATAL: could not query parent tenure finish block") + { + test_debug!( + "Miner: stacks tip header is now {} {:?} from {} {:?}", + &last_tenure_finish_block_header.index_block_hash(), + &last_tenure_finish_block_header, + &stacks_tip_header.index_block_hash(), + &stacks_tip_header + ); + stacks_tip_header = last_tenure_finish_block_header; } + // } + test_debug!( + "Miner: stacks tip header is {} {:?}", + &stacks_tip_header.index_block_hash(), + &stacks_tip_header + ); let miner_address = self .keychain .origin_address(self.config.is_mainnet()) @@ -913,6 +827,7 @@ impl BlockMinerThread { &tip.sortition_id, &self.burnchain, &mut chain_state, + &parent_block_info.stacks_parent_header.index_block_hash(), &burn_db, &OnChainRewardSetProvider::new(), ) { @@ -984,6 +899,7 @@ impl BlockMinerThread { block.txs.len(); "signer_sighash" => %block.header.signer_signature_hash(), "consensus_hash" => %block.header.consensus_hash, + "parent_block_id" => %block.header.parent_block_id, ); self.event_dispatcher.process_mined_nakamoto_block_event( @@ -1042,7 +958,7 @@ impl BlockMinerThread { } => { let num_blocks_so_far = NakamotoChainState::get_nakamoto_tenure_length( chainstate.db(), - &self.burn_election_block.consensus_hash, + &parent_block_id, ) .map_err(NakamotoNodeError::MiningFailure)?; debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); @@ -1136,9 +1052,10 @@ impl ParentStacksBlockInfo { .is_some() { let Ok(Some(last_parent_tenure_header)) = - NakamotoChainState::get_nakamoto_tenure_finish_block_header( - chain_state.db(), - &parent_tenure_header.consensus_hash, + NakamotoChainState::get_highest_block_header_in_tenure( + &mut chain_state.index_conn(), + &stacks_tip_header.index_block_hash(), + &stacks_tip_header.consensus_hash, ) else { warn!("Failed loading last block of parent tenure"; "parent_tenure_id" => %parent_tenure_id); From dbd4c158750dec39f41196d25f4225ee3fdd32a4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:55:49 -0400 Subject: [PATCH 0418/1400] chore: API sync --- .../stacks-node/src/nakamoto_node/relayer.rs | 64 ++++++++++++------- 1 file changed, 41 insertions(+), 23 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 21a993e899..3d1fe39e96 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -412,16 +412,21 @@ impl RelayerThread { let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; - let parent_vrf_proof = - NakamotoChainState::get_block_vrf_proof(self.chainstate.db(), &target_ch) - .map_err(|_e| NakamotoNodeError::ParentNotFound)? - .unwrap_or_else(|| VRFProof::empty()); + let block_id = StacksBlockId::new(target_ch, target_bh); + let parent_vrf_proof = NakamotoChainState::get_block_vrf_proof( + &mut self.chainstate.index_conn(), + &block_id, + &target_ch, + ) + .map_err(|_e| NakamotoNodeError::ParentNotFound)? + .unwrap_or_else(|| VRFProof::empty()); // let's figure out the recipient set! let recipients = get_nakamoto_next_recipients( &sort_tip, &mut self.sortdb, &mut self.chainstate, + &block_id, &self.burnchain, ) .map_err(|e| { @@ -429,16 +434,19 @@ impl RelayerThread { NakamotoNodeError::SnapshotNotFoundForChainTip })?; - let block_header = - NakamotoChainState::get_block_header_by_consensus_hash(self.chainstate.db(), target_ch) - .map_err(|e| { - error!("Relayer: Failed to get block header for parent tenure: {e:?}"); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("Relayer: Failed to find block header for parent tenure"); - NakamotoNodeError::ParentNotFound - })?; + let block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &block_id, + &target_ch, + ) + .map_err(|e| { + error!("Relayer: Failed to get block header for parent tenure: {e:?}"); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("Relayer: Failed to find block header for parent tenure"); + NakamotoNodeError::ParentNotFound + })?; let parent_block_id = block_header.index_block_hash(); if parent_block_id != StacksBlockId::new(target_ch, target_bh) { @@ -797,11 +805,8 @@ impl RelayerThread { #[cfg(test)] { if TEST_SKIP_COMMIT_OP.lock().unwrap().unwrap_or(false) { - //if let Some((last_committed, ..)) = self.last_committed.as_ref() { - // if last_committed.consensus_hash == last_committed_at.consensus_hash { warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); return Ok(()); - //} } } let mut op_signer = self.keychain.generate_op_signer(); @@ -883,19 +888,32 @@ impl RelayerThread { )); }; + debug!( + "Relayer: canonical block header is {}/{} ({})", + &chain_tip_header.consensus_hash, + &chain_tip_header.anchored_header.block_hash(), + &chain_tip_header.index_block_hash() + ); + // get the starting block of the chain tip's tenure - let Ok(Some(chain_tip_tenure_start)) = - NakamotoChainState::get_block_header_by_consensus_hash( - self.chainstate.db(), - &chain_tip_header.consensus_hash, - ) - else { + let Ok(Some(chain_tip_tenure_start)) = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &chain_tip_header.index_block_hash(), + &chain_tip_header.consensus_hash, + ) else { warn!("Failure getting the first block of tenure in order to assemble block commit"; "tenure_consensus_hash" => %chain_tip_header.consensus_hash, "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); return None; }; + debug!( + "Relayer: tenure-start block header is {}/{} ({})", + &chain_tip_tenure_start.consensus_hash, + &chain_tip_tenure_start.anchored_header.block_hash(), + &chain_tip_tenure_start.index_block_hash() + ); + let chain_tip_tenure_id = chain_tip_tenure_start.index_block_hash(); let should_commit = burnchain_changed || if let Some((_, last_committed_tenure_id)) = self.last_committed.as_ref() { From c4b2efc318c65ceaa638308558824bdee83941af Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:55:57 -0400 Subject: [PATCH 0419/1400] chore: API sync --- testnet/stacks-node/src/node.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 2c78b4c187..92a7809bce 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -890,10 +890,10 @@ impl Node { let mut cost_estimator = self.config.make_cost_estimator(); let mut fee_estimator = self.config.make_fee_estimator(); - let stacks_epoch = db - .index_conn() - .get_stacks_epoch_by_epoch_id(&processed_block.evaluated_epoch) - .expect("Could not find a stacks epoch."); + let stacks_epoch = + SortitionDB::get_stacks_epoch_by_epoch_id(db.conn(), &processed_block.evaluated_epoch) + .expect("FATAL: could not query sortition DB for epochs") + .expect("Could not find a stacks epoch."); if let Some(estimator) = cost_estimator.as_mut() { estimator.notify_block( &processed_block.tx_receipts, From 3c885f82d1131fa502bfbf4c53f70e39a5d3e445 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:56:14 -0400 Subject: [PATCH 0420/1400] WIP: follower_bootup work --- .../src/tests/nakamoto_integrations.rs | 94 ++++++++++++++++--- 1 file changed, 79 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8570b0aeff..8428937d6e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -87,7 +87,7 @@ use stacks_common::types::chainstate::{ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::sleep_ms; +use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::SortitionsView; use stacks_signer::signerdb::{BlockInfo, SignerDb}; use wsts::net::Message; @@ -100,8 +100,8 @@ use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ - call_read_only, get_account, get_chain_info_result, get_pox_info, next_block_and_wait, - run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, + call_read_only, get_account, get_account_result, get_chain_info_result, get_pox_info, + next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, @@ -386,6 +386,15 @@ pub fn read_and_sign_block_proposal( let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?.0; + let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); + let signer_sig_hash = proposed_block.header.signer_signature_hash(); + + if signed_blocks.contains(&signer_sig_hash) { + // already signed off on this block, don't sign again. + return Ok(signer_sig_hash); + } + let reward_set = load_nakamoto_reward_set( burnchain .pox_reward_cycle(tip.block_height.saturating_add(1)) @@ -393,6 +402,7 @@ pub fn read_and_sign_block_proposal( &tip.sortition_id, &burnchain, &mut chainstate, + &proposed_block.header.parent_block_id, &sortdb, &OnChainRewardSetProvider::new(), ) @@ -402,15 +412,6 @@ pub fn read_and_sign_block_proposal( .known_selected_anchor_block_owned() .expect("Expected a reward set"); - let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?.0; - let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); - let signer_sig_hash = proposed_block.header.signer_signature_hash(); - - if signed_blocks.contains(&signer_sig_hash) { - // already signed off on this block, don't sign again. - return Ok(signer_sig_hash); - } - info!( "Fetched proposed block from .miners StackerDB"; "proposed_block_hash" => &proposed_block_hash, @@ -2636,6 +2637,7 @@ fn follower_bootup() { follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.pox_sync_sample_secs = 30; let node_info = get_chain_info(&naka_conf); follower_conf.node.add_bootstrap_node( @@ -2671,12 +2673,19 @@ fn follower_bootup() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { + debug!("follower_bootup: Miner runs tenure {}", tenure_ix); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_tip_height = 0; + let mut last_nonce = None; + + debug!( + "follower_bootup: Miner mines interum blocks for tenure {}", + tenure_ix + ); // mine the interim blocks for interim_block_ix in 0..inter_blocks_per_tenure { @@ -2685,12 +2694,54 @@ fn follower_bootup() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; + let Ok(account) = get_account_result(&http_origin, &sender_addr) else { + thread::sleep(Duration::from_millis(100)); + continue; + }; + + let sender_nonce = account + .nonce + .max(last_nonce.as_ref().map(|ln| *ln + 1).unwrap_or(0)); let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - loop { + last_nonce = Some(sender_nonce); + + let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); + + debug!("follower_bootup: Miner account: {:?}", &account); + debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + + let now = get_epoch_time_secs(); + while get_epoch_time_secs() < now + 10 { + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: Could not get miner chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + let Ok(follower_info) = get_chain_info_result(&follower_conf) else { + debug!("follower_bootup: Could not get follower chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + if follower_info.burn_block_height < info.burn_block_height { + debug!("follower_bootup: Follower is behind miner's burnchain view"); + thread::sleep(Duration::from_millis(100)); + continue; + } + + if info.stacks_tip == last_tip { + debug!( + "follower_bootup: Miner stacks tip hasn't changed ({})", + &info.stacks_tip + ); + thread::sleep(Duration::from_millis(100)); + continue; + } + let blocks_processed = coord_channel .lock() .expect("Mutex poisoned") @@ -2698,17 +2749,29 @@ fn follower_bootup() { if blocks_processed > blocks_processed_before { break; } + + debug!("follower_bootup: No blocks processed yet"); thread::sleep(Duration::from_millis(100)); } - let info = get_chain_info_result(&naka_conf).unwrap(); + debug!("follower_bootup: Follower advanced to miner tip"); + + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: failed to load tip info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + /* assert_ne!(info.stacks_tip, last_tip); assert_ne!(info.stacks_tip_height, last_tip_height); + */ last_tip = info.stacks_tip; last_tip_height = info.stacks_tip_height; } + debug!("follower_bootup: Wait for next block-commit"); let start_time = Instant::now(); while commits_submitted.load(Ordering::SeqCst) <= commits_before { if start_time.elapsed() >= Duration::from_secs(20) { @@ -2716,6 +2779,7 @@ fn follower_bootup() { } thread::sleep(Duration::from_millis(100)); } + debug!("follower_bootup: Block commit submitted"); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 From f97502fab6dcfa126de88ef2e5d75710d046d196 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 28 Jun 2024 22:56:29 -0400 Subject: [PATCH 0421/1400] feat: get_account_result() helper method --- .../src/tests/neon_integrations.rs | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index fb1535685a..b083c4787a 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -827,7 +827,7 @@ pub fn get_chain_info_result(conf: &Config) -> Result() + client.get(&path).send()?.json::() } pub fn get_chain_info_opt(conf: &Config) -> Option { @@ -1265,21 +1265,23 @@ pub struct Account { pub nonce: u64, } -pub fn get_account(http_origin: &str, account: &F) -> Account { +pub fn get_account_result( + http_origin: &str, + account: &F, +) -> Result { let client = reqwest::blocking::Client::new(); let path = format!("{}/v2/accounts/{}?proof=0", http_origin, account); - let res = client - .get(&path) - .send() - .unwrap() - .json::() - .unwrap(); + let res = client.get(&path).send()?.json::()?; info!("Account response: {:#?}", res); - Account { + Ok(Account { balance: u128::from_str_radix(&res.balance[2..], 16).unwrap(), locked: u128::from_str_radix(&res.locked[2..], 16).unwrap(), nonce: res.nonce, - } + }) +} + +pub fn get_account(http_origin: &str, account: &F) -> Account { + get_account_result(http_origin, account).unwrap() } pub fn get_pox_info(http_origin: &str) -> Option { From c5c223dac9d4714948c10c630b0a617bdc2cd13d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 1 Jul 2024 11:36:20 -0500 Subject: [PATCH 0422/1400] chore: cargo fmt-stacks --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6273db50cb..c73812acaf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -38,9 +38,9 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; -use crate::tests::{self, make_stacks_transfer}; use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; use crate::tests::neon_integrations::{get_chain_info, next_block_and_wait, submit_tx}; +use crate::tests::{self, make_stacks_transfer}; use crate::{nakamoto_node, BurnchainController}; impl SignerTest { From c687791d92ca4f2fe0fa62e5818f5d8f472fa9e7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 1 Jul 2024 13:38:37 -0500 Subject: [PATCH 0423/1400] test: v0 signature assert fixes * in V0, the miner can assemble a block once it crosses the threshold, without all signers. the existing assertion would cause CI to flap. --- testnet/stacks-node/src/tests/signer/v0.rs | 37 +++++++++++++++------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c1d6169f7d..bdf1b4c33f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -128,31 +128,44 @@ impl SignerTest { // Verify that the signers signed the proposed block let signature = self.wait_for_confirmed_block_v0(&proposed_signer_signature_hash, timeout); + // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block + // whenever it has crossed the threshold. info!("Got {} signatures", signature.len()); - assert_eq!(signature.len(), num_signers); - let reward_cycle = self.get_current_reward_cycle(); let signers = self.get_reward_set_signers(reward_cycle); // Verify that the signers signed the proposed block - let all_signed = signers.iter().zip(signature).all(|(signer, signature)| { + let mut signer_index = 0; + let mut signature_index = 0; + let validated = loop { + let Some(signature) = signature.get(signature_index) else { + break true; + }; + let Some(signer) = signers.get(signer_index) else { + error!("Failed to validate the mined nakamoto block: ran out of signers to try to validate signatures"); + break false; + }; let stacks_public_key = Secp256k1PublicKey::from_slice(signer.signing_key.as_slice()) .expect("Failed to convert signing key to StacksPublicKey"); - - // let valid = stacks_public_key.verify(message, signature); let valid = stacks_public_key - .verify(&message, &signature) + .verify(&message, signature) .expect("Failed to verify signature"); if !valid { - error!( - "Failed to verify signature for signer: {:?}", - stacks_public_key + info!( + "Failed to verify signature for signer, will attempt to validate without this signer"; + "signer_pk" => stacks_public_key.to_hex(), + "signer_index" => signer_index, + "signature_index" => signature_index, ); + signer_index += 1; + } else { + signer_index += 1; + signature_index += 1; } - valid - }); - assert!(all_signed); + }; + + assert!(validated); } // Only call after already past the epoch 3.0 boundary From 9f13a1da505ab99273d31131b81046c7fb15f764 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 1 Jul 2024 15:03:51 -0400 Subject: [PATCH 0424/1400] Fix number of signatures to check against in mine 2 nakamoto tenures Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index bdf1b4c33f..04c1b1a8a8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -128,9 +128,11 @@ impl SignerTest { // Verify that the signers signed the proposed block let signature = self.wait_for_confirmed_block_v0(&proposed_signer_signature_hash, timeout); + info!("Got {} signatures", signature.len()); + // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. - info!("Got {} signatures", signature.len()); + assert!(signature.len() >= num_signers / 7 * 10); let reward_cycle = self.get_current_reward_cycle(); let signers = self.get_reward_set_signers(reward_cycle); From f2a2bbf6bedbe0299baee427891c3cd8d15250be Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:11:42 -0400 Subject: [PATCH 0425/1400] fix: #4938 --- stackslib/src/net/api/getsortition.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 5df67e3636..d7600a7c4f 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -214,7 +214,9 @@ impl RPCRequestHandler for GetSortitionHandler { .ok_or_else(|| ChainError::NoSuchBlockError)?; let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { - (None, None, None, None) + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + (None, None, None, Some(last_sortition.consensus_hash)) } else { let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? .ok_or_else(|| { From 09b07b1a1369abab8e73a7b62db93ac729de57d1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:11:42 -0400 Subject: [PATCH 0426/1400] fix: #4938 --- stackslib/src/net/api/getsortition.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 5df67e3636..d7600a7c4f 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -214,7 +214,9 @@ impl RPCRequestHandler for GetSortitionHandler { .ok_or_else(|| ChainError::NoSuchBlockError)?; let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { - (None, None, None, None) + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + (None, None, None, Some(last_sortition.consensus_hash)) } else { let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? .ok_or_else(|| { From 434b3188cc031453784b11c65d1a4f4f288eddd9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:45:12 -0400 Subject: [PATCH 0427/1400] chore: fmt --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 8ff14f4146..2630a377b3 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -127,6 +127,7 @@ fn advance_to_nakamoto( ); let signing_key = StacksPublicKey::from_private(&test_stacker.signer_private_key); + make_pox_4_lockup( &test_stacker.stacker_private_key, 0, From 8393e1df0af2bfe519da2ece0ebaade87623fe42 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:45:48 -0400 Subject: [PATCH 0428/1400] refactor: expose get_nakamoto_tenure_vrf_proof --- stackslib/src/chainstate/nakamoto/mod.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index f281e71795..424005db67 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2717,7 +2717,8 @@ impl NakamotoChainState { Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) } - /// Get the highest block in the given tenure + /// Get the highest block in the given tenure. + /// Only works on Nakamoto blocks. /// TODO: unit test pub fn get_highest_block_header_in_tenure( chainstate_conn: &mut STH, @@ -2866,9 +2867,10 @@ impl NakamotoChainState { } /// Get the VRF proof for a Nakamoto block, if it exists. + /// This must be the tenure-start Nakamoto block ID /// Returns None if the Nakamoto block's VRF proof is not found (e.g. because there is no /// Nakamoto block, or becuase this isn't a tenure-start block) - pub(crate) fn get_nakamoto_tenure_vrf_proof( + pub fn get_nakamoto_tenure_vrf_proof( chainstate_conn: &Connection, tenure_start_block_id: &StacksBlockId, ) -> Result, ChainstateError> { @@ -3787,6 +3789,7 @@ impl NakamotoChainState { let new_tenure = block.is_wellformed_tenure_start_block().map_err(|_| { ChainstateError::InvalidStacksBlock("Invalid tenure changes in nakamoto block".into()) })?; + // this block is mined in the ongoing tenure. if !new_tenure && !Self::check_tenure_continuity(chainstate_tx.as_tx(), &parent_ch, &block.header)? From 7b0c8f346e6ab03cda47e7345028a6839fef1a57 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:46:04 -0400 Subject: [PATCH 0429/1400] docs: get_ongoing_tenure returns None for epoch2x blocks --- stackslib/src/chainstate/nakamoto/tenure.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index cdb3511737..454e54988c 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -539,6 +539,7 @@ impl NakamotoChainState { /// Get the tenure-change most recently processed in the history tipped by the given block. /// This can be a block-found or an extended tenure change. + /// Returns None if this tip is an epoch2x block ID pub fn get_ongoing_tenure( headers_conn: &mut STH, tip_block_id: &StacksBlockId, From 64ab7b35be2c18a7292888733220937500053eae Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:46:42 -0400 Subject: [PATCH 0430/1400] chore: add test coverage for get_highest_block_header_in_tenure() --- .../src/chainstate/nakamoto/tests/node.rs | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index d1c294bf41..964779e079 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -1611,6 +1611,7 @@ impl<'a> TestPeer<'a> { /// * get_coinbase_height /// * get_tenure_start_block_header /// * get_nakamoto_tenure_start_block_header + /// * get_highest_block_header_in_tenure /// * get_block_vrf_proof /// * get_nakamoto_tenure_vrf_proof /// * get_parent_vrf_proof @@ -1714,6 +1715,35 @@ impl<'a> TestPeer<'a> { ); } + // get highest block header in tenure + if tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .is_some() + { + assert_eq!( + &block.header, + NakamotoChainState::get_highest_block_header_in_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .unwrap() + .anchored_header + .as_stacks_nakamoto() + .unwrap() + ) + } else { + assert!(NakamotoChainState::get_highest_block_header_in_tenure( + &mut chainstate.index_conn(), + &block.block_id(), + &block.header.consensus_hash + ) + .unwrap() + .is_none()) + } + // get_block_vrf_proof // Verify that a VRF proof is defined for each tenure let Ok(Some(vrf_proof)) = NakamotoChainState::get_block_vrf_proof( From b2fb824270e09f787fffdd095394277d28679f6c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:46:58 -0400 Subject: [PATCH 0431/1400] chore: remove signers voting tests (since this code won't be used) --- stackslib/src/chainstate/stacks/boot/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 18706743e9..0f45d7a6d0 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1389,8 +1389,6 @@ pub mod pox_3_tests; pub mod pox_4_tests; #[cfg(test)] pub mod signers_tests; -#[cfg(test)] -pub mod signers_voting_tests; #[cfg(test)] pub mod test { From ad12117809e3e402aa3a99dd10a03f2e1cf92de6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:47:15 -0400 Subject: [PATCH 0432/1400] chore: remove nonexistant imports --- stackslib/src/chainstate/stacks/boot/pox_4_tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 663c6747b7..ac59772f32 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -64,7 +64,6 @@ use crate::chainstate::stacks::boot::pox_2_tests::{ use crate::chainstate::stacks::boot::signers_tests::{ get_signer_index, prepare_signers_test, readonly_call, }; -use crate::chainstate::stacks::boot::signers_voting_tests::{make_dummy_tx, nakamoto_tenure}; use crate::chainstate::stacks::boot::{ PoxVersions, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, MINERS_NAME, POX_2_NAME, POX_3_NAME, From fa71e6203bc4fb89c86222a9a49ee97747c77dc7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:47:26 -0400 Subject: [PATCH 0433/1400] chore: these tests no longer test functionality that will be used --- .../stacks/boot/signers_voting_tests.rs | 2218 ----------------- 1 file changed, 2218 deletions(-) delete mode 100644 stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs diff --git a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs deleted file mode 100644 index 9a97e88b5e..0000000000 --- a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs +++ /dev/null @@ -1,2218 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{HashMap, HashSet, VecDeque}; - -use clarity::boot_util::boot_code_addr; -use clarity::vm::clarity::ClarityConnection; -use clarity::vm::contexts::OwnedEnvironment; -use clarity::vm::contracts::Contract; -use clarity::vm::costs::{CostOverflowingMath, LimitedCostTracker}; -use clarity::vm::database::*; -use clarity::vm::errors::{ - CheckErrors, Error, IncomparableError, InterpreterError, InterpreterResult, RuntimeErrorType, -}; -use clarity::vm::eval; -use clarity::vm::events::StacksTransactionEvent; -use clarity::vm::representations::SymbolicExpression; -use clarity::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; -use clarity::vm::types::{ - BuffData, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, - StacksAddressExtensions, StandardPrincipalData, TupleData, TupleTypeSignature, TypeSignature, - Value, NONE, -}; -use stacks_common::address::AddressHashMode; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, VRFSeed, -}; -use stacks_common::types::{Address, PrivateKey}; -use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::Secp256k1PrivateKey; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; - -use super::test::*; -use super::RawRewardSetEntry; -use crate::burnchains::{Burnchain, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{self, SortitionDB}; -use crate::chainstate::burn::operations::*; -use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; -use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::tests::get_account; -use crate::chainstate::nakamoto::tests::node::TestStacker; -use crate::chainstate::nakamoto::NakamotoBlock; -use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; -use crate::chainstate::stacks::boot::pox_2_tests::{ - check_pox_print_event, generate_pox_clarity_value, get_reward_set_entries_at, - get_stacking_state_pox, get_stx_account_at, with_clarity_db_ro, PoxPrintFields, - StackingStateCheckData, -}; -use crate::chainstate::stacks::boot::pox_4_tests::{ - assert_latest_was_burn, get_last_block_sender_transactions, get_tip, make_test_epochs_pox, -}; -use crate::chainstate::stacks::boot::signers_tests::{ - get_signer_index, prepare_signers_test, readonly_call, -}; -use crate::chainstate::stacks::boot::{ - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, SIGNERS_NAME, - SIGNERS_VOTING_NAME, -}; -use crate::chainstate::stacks::db::{ - MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, -}; -use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; -use crate::chainstate::stacks::index::marf::MarfConnection; -use crate::chainstate::stacks::index::MarfTrieId; -use crate::chainstate::stacks::tests::make_coinbase; -use crate::chainstate::stacks::*; -use crate::chainstate::{self}; -use crate::clarity_vm::clarity::{ClarityBlockConnection, Error as ClarityError}; -use crate::clarity_vm::database::marf::{MarfedKV, WritableMarfStore}; -use crate::clarity_vm::database::HeadersDBConn; -use crate::core::*; -use crate::net::test::{TestEventObserver, TestPeer}; -use crate::util_lib::boot::boot_code_id; -use crate::util_lib::db::{DBConn, FromRow}; - -pub fn prepare_pox4_test<'a>( - test_name: &str, - observer: Option<&'a TestEventObserver>, -) -> ( - Burnchain, - TestPeer<'a>, - Vec, - StacksBlockId, - u64, - usize, -) { - let (epochs, pox_constants) = make_test_epochs_pox(); - - let mut burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), - ); - burnchain.pox_constants = pox_constants.clone(); - - let (mut peer, keys) = - instantiate_pox_peer_with_epoch(&burnchain, test_name, Some(epochs.clone()), observer); - - assert_eq!(burnchain.pox_constants.reward_slots(), 6); - let mut coinbase_nonce = 0; - - // Advance into pox4 - let target_height = burnchain.pox_constants.pox_4_activation_height; - let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { - assert_latest_was_burn(&mut peer); - } - } - - let block_height = get_tip(peer.sortdb.as_ref()).block_height; - - info!("Block height: {}", block_height); - - ( - burnchain, - peer, - keys, - latest_block, - block_height, - coinbase_nonce, - ) -} - -/// In this test case, Alice & Bob both successfully vote for the same key. -/// Alice votes successfully, then Bob votes successfully, reaching the -/// threshold and setting the aggregate public key. -#[test] -fn vote_for_aggregate_public_key_success() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let aggregate_public_key_ill_formed = Value::buff_from_byte(0x00); - - let txs = vec![ - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the first burn block of prepare phase - // - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Alice's vote should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - // Bob's vote should succeed and reach the threshold, setting the aggregate public key - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } -} - -/// In this test case, Alice & Bob both successfully vote for the same key, -/// but also trigger all tenure-agnostic errors. -#[test] -fn vote_for_aggregate_public_key_with_errors() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let aggregate_public_key_ill_formed = Value::buff_from_byte(0x00); - - let txs = vec![ - // Alice casts a vote with a non-existant index - should return signer index mismatch error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Alice casts a vote with Bobs index - should return invalid signer index error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - 2, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Alice casts a vote with an invalid public key - should return ill-formed public key error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 2, - alice_index, - aggregate_public_key_ill_formed, - 0, - cycle_id + 1, - ), - // Alice casts a vote with an incorrect reward cycle - should return cycle not set error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 3, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 4, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Alice casts vote twice - should return duplicate vote error - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 5, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote with the wrong round - should return an invalid round error - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 2, - cycle_id + 1, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the first burn block of prepare phase - // - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last eight txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 10); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Alice's first vote should fail (signer mismatch) - let alice_first_vote_tx = &receipts[2]; - let alice_first_vote_tx_result = alice_first_vote_tx.result.clone(); - assert_eq!( - alice_first_vote_tx_result, - Value::err_uint(10) // ERR_SIGNER_INDEX_MISMATCH - ); - assert_eq!(alice_first_vote_tx.events.len(), 0); - - // Alice's second vote should fail (invalid signer) - let alice_second_vote_tx = &receipts[3]; - let alice_second_vote_tx_result = alice_second_vote_tx.result.clone(); - assert_eq!( - alice_second_vote_tx_result, - Value::err_uint(11) // ERR_INVALID_SIGNER_INDEX - ); - assert_eq!(alice_second_vote_tx.events.len(), 0); - - // Alice's third vote should fail (ill formed aggregate public key) - let alice_third_vote_tx = &receipts[4]; - let alice_third_vote_tx_result = alice_third_vote_tx.result.clone(); - assert_eq!( - alice_third_vote_tx_result, - Value::err_uint(13) // ERR_ILL_FORMED_AGGREGATE_PUBLIC_KEY - ); - assert_eq!(alice_third_vote_tx.events.len(), 0); - - // Alice's fourth vote should fail (cycle not set) - let alice_fourth_vote_tx = &receipts[5]; - let alice_fourth_vote_tx_result = alice_fourth_vote_tx.result.clone(); - assert_eq!( - alice_fourth_vote_tx_result, - Value::err_uint(2) // ERR_CYCLE_NOT_SET - ); - assert_eq!(alice_fourth_vote_tx.events.len(), 0); - - // Alice's fifth vote, correct vote should succeed - let alice_fifth_vote_tx = &receipts[6]; - assert_eq!(alice_fifth_vote_tx.result, Value::okay_true()); - assert_eq!(alice_fifth_vote_tx.events.len(), 1); - let alice_vote_event = &alice_fifth_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - // Alice's sixth vote should fail (duplicate vote) - let alice_sixth_vote_tx = &receipts[7]; - let alice_sixth_vote_tx_result = alice_sixth_vote_tx.result.clone(); - assert_eq!( - alice_sixth_vote_tx_result, - Value::err_uint(15) // ERR_DUPLICATE_VOTE - ); - assert_eq!(alice_sixth_vote_tx.events.len(), 0); - - // Bob's first vote should fail (invalid round) - let bob_first_vote_tx = &receipts[8]; - let bob_first_vote_tx_result = bob_first_vote_tx.result.clone(); - assert_eq!( - bob_first_vote_tx_result, - Value::err_uint(17) // ERR_INVALID_ROUND - ); - assert_eq!(bob_first_vote_tx.events.len(), 0); - - // Bob's second vote should succeed and reach the threshold, setting the aggregate public key - let bob_second_vote_tx = &receipts[9]; - assert_eq!(bob_second_vote_tx.result, Value::okay_true()); - assert_eq!(bob_second_vote_tx.events.len(), 2); - let bob_vote_event = &bob_second_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - let approve_event = &bob_second_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } -} - -/// 4 stackers vote for the same aggregate public key. The threshold is reached -/// after the 3rd vote, so the 4th gets an "out of voting window" error. -#[test] -fn vote_for_aggregate_public_key_out_of_window() { - // Test setup - let stacker1 = TestStacker::from_seed(&[3, 4]); - let stacker2 = TestStacker::from_seed(&[5, 6]); - let stacker3 = TestStacker::from_seed(&[7, 8]); - let stacker4 = TestStacker::from_seed(&[9, 10]); - let observer = TestEventObserver::new(); - - // Signer 1 - let stacker1_key = &stacker1.signer_private_key; - let stacker1_address = key_to_stacks_addr(stacker1_key); - let stacker1_principal = PrincipalData::from(stacker1_address); - - // Signer 2 - let stacker2_key = &stacker2.signer_private_key; - let stacker2_address = key_to_stacks_addr(stacker2_key); - let stacker2_principal = PrincipalData::from(stacker2_address); - - // Signer 3 - let stacker3_key = &stacker3.signer_private_key; - let stacker3_address = key_to_stacks_addr(stacker3_key); - let stacker3_principal = PrincipalData::from(stacker3_address); - - // Signer 4 - let stacker4_key = &stacker4.signer_private_key; - let stacker4_address = key_to_stacks_addr(stacker4_key); - let stacker4_principal = PrincipalData::from(stacker4_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (stacker1_principal.clone(), 1000), - (stacker2_principal.clone(), 1000), - (stacker3_principal.clone(), 1000), - (stacker4_principal.clone(), 1000), - ], - &[ - stacker1.clone(), - stacker2.clone(), - stacker3.clone(), - stacker4.clone(), - ], - Some(&observer), - ); - - // Stackers will each have voted once while booting to Nakamoto - let stacker1_nonce = 1; - let stacker2_nonce = 1; - let stacker3_nonce = 1; - let stacker4_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let stacker1_index = get_signer_index(&mut peer, latest_block_id, stacker1_address, cycle_id); - let stacker2_index = get_signer_index(&mut peer, latest_block_id, stacker2_address, cycle_id); - let stacker3_index = get_signer_index(&mut peer, latest_block_id, stacker3_address, cycle_id); - let stacker4_index = get_signer_index(&mut peer, latest_block_id, stacker4_address, cycle_id); - - let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // stacker1 casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - stacker1_key, - stacker1_nonce, - stacker1_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // stacker2 casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - stacker2_key, - stacker2_nonce, - stacker2_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // stacker3 casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - stacker3_key, - stacker3_nonce, - stacker3_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // stacker4 casts vote correctly, but it will return an out of voting window error - make_signers_vote_for_aggregate_public_key_value( - stacker4_key, - stacker4_nonce, - stacker4_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the first burn block of prepare phase - // - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 6); - // ignore tenure change tx - // ignore tenure coinbase tx - - // stacker1's vote should succeed - let stacker1_vote_tx = &receipts[2]; - assert_eq!(stacker1_vote_tx.result, Value::okay_true()); - assert_eq!(stacker1_vote_tx.events.len(), 1); - let stacker1_vote_event = &stacker1_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = stacker1_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(1)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ( - "signer".into(), - Value::Principal(stacker1_principal.clone()) - ), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", stacker1_vote_event); - } - - // stacker2's vote should succeed - let stacker2_vote_tx = &receipts[3]; - assert_eq!(stacker2_vote_tx.result, Value::okay_true()); - assert_eq!(stacker2_vote_tx.events.len(), 1); - let stacker2_vote_event = &stacker2_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = stacker2_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ( - "signer".into(), - Value::Principal(stacker2_principal.clone()) - ), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", stacker2_vote_event); - } - - // stacker3's vote should succeed - let stacker3_vote_tx = &receipts[4]; - assert_eq!(stacker3_vote_tx.result, Value::okay_true()); - assert_eq!(stacker3_vote_tx.events.len(), 2); - let stacker3_vote_event = &stacker3_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = stacker3_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(3)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ( - "signer".into(), - Value::Principal(stacker3_principal.clone()) - ), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", stacker3_vote_event); - } - let approve_event = &stacker3_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } - - // stacker4's vote should get an out of voting window error - let stacker4_vote_tx = &receipts[5]; - assert_eq!( - stacker4_vote_tx.result, - Value::err_uint(12) // ERR_OUT_OF_VOTING_WINDOW - ); - assert_eq!(stacker4_vote_tx.events.len(), 0); -} - -/// In this test case, Alice votes in the first block of the first tenure of the prepare phase. -/// Alice can vote successfully. -/// A second vote on the same key and round fails with "duplicate vote" error -#[test] -fn vote_for_aggregate_public_key_in_first_block() { - let stacker_1 = TestStacker::from_seed(&[3, 4]); - let stacker_2 = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - let signer = key_to_stacks_addr(&stacker_1.signer_private_key).to_account_principal(); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![(signer, 1000)], - &[stacker_1.clone(), stacker_2.clone()], - Some(&observer), - ); - - // create vote txs - let signer_nonce = 1; // Start at 1 because the signer has already voted once - let signer_key = &stacker_1.signer_private_key; - let signer_address = key_to_stacks_addr(signer_key); - let signer_principal = PrincipalData::from(signer_address); - let cycle_id = current_reward_cycle; - - let signer_index = get_signer_index(&mut peer, latest_block_id, signer_address, cycle_id); - - let aggregate_public_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); - - let txs = vec![ - // cast a vote for the aggregate public key - make_signers_vote_for_aggregate_public_key( - signer_key, - signer_nonce, - signer_index, - &aggregate_public_key, - 0, - cycle_id + 1, - ), - // cast the vote twice - make_signers_vote_for_aggregate_public_key( - signer_key, - signer_nonce + 1, - signer_index, - &aggregate_public_key, - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the first burn block of prepare phase - // - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // first vote should succeed - let alice_first_vote_tx = &receipts[2]; - assert_eq!(alice_first_vote_tx.result, Value::okay_true()); - - // second vote should fail with duplicate vote error - let alice_second_vote_tx = &receipts[3]; - assert_eq!( - alice_second_vote_tx.result, - Value::err_uint(15) // ERR_DUPLICATE_VOTE - ); - assert_eq!(alice_second_vote_tx.events.len(), 0); -} - -/// In this test case, Alice votes in the first block of the last tenure of the prepare phase. -/// Bob votes in the second block of that tenure. -/// Both can vote successfully. -#[test] -fn vote_for_aggregate_public_key_in_last_block() { - let stacker_1 = TestStacker::from_seed(&[3, 4]); - let stacker_2 = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - let signer_1 = key_to_stacks_addr(&stacker_1.signer_private_key).to_account_principal(); - let signer_2 = key_to_stacks_addr(&stacker_2.signer_private_key).to_account_principal(); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![(signer_1, 1000), (signer_2, 1000)], - &[stacker_1.clone(), stacker_2.clone()], - Some(&observer), - ); - - let mut stacker_1_nonce: u64 = 1; - let dummy_tx_1 = make_dummy_tx( - &mut peer, - &stacker_1.stacker_private_key, - &mut stacker_1_nonce, - ); - let dummy_tx_2 = make_dummy_tx( - &mut peer, - &stacker_1.stacker_private_key, - &mut stacker_1_nonce, - ); - let dummy_tx_3 = make_dummy_tx( - &mut peer, - &stacker_1.stacker_private_key, - &mut stacker_1_nonce, - ); - - let cycle_id: u128 = current_reward_cycle; - let aggregate_public_key_1 = test_signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key_2 = test_signers.generate_aggregate_key(cycle_id as u64 + 2); - - // create vote txs for alice - let signer_1_nonce = 1; // Start at 1 because the signer has already voted once - let signer_1_key = &stacker_1.signer_private_key; - let signer_1_address = key_to_stacks_addr(signer_1_key); - let signer_1_principal = PrincipalData::from(signer_1_address); - let signer_1_index = get_signer_index(&mut peer, latest_block_id, signer_1_address, cycle_id); - - let txs_block_1 = vec![ - // cast a vote for the aggregate public key - make_signers_vote_for_aggregate_public_key( - signer_1_key, - signer_1_nonce, - signer_1_index, - &aggregate_public_key_1, - 1, - cycle_id + 1, - ), - // cast the vote twice - make_signers_vote_for_aggregate_public_key( - signer_1_key, - signer_1_nonce + 1, - signer_1_index, - &aggregate_public_key_1, - 1, - cycle_id + 1, - ), - // cast a vote for old round - make_signers_vote_for_aggregate_public_key( - signer_1_key, - signer_1_nonce + 2, - signer_1_index, - &aggregate_public_key_2, - 0, - cycle_id + 1, - ), - ]; - - // create vote txs for bob - let signer_2_nonce = 1; // Start at 1 because the signer has already voted once - let signer_2_key = &stacker_2.signer_private_key; - let signer_2_address = key_to_stacks_addr(signer_2_key); - let signer_2_principal = PrincipalData::from(signer_2_address); - let signer_2_index = get_signer_index(&mut peer, latest_block_id, signer_2_address, cycle_id); - - let txs_block_2 = vec![ - // cast a vote for the aggregate public key - make_signers_vote_for_aggregate_public_key( - signer_2_key, - signer_2_nonce, - signer_2_index, - &aggregate_public_key_1, - 0, - cycle_id + 1, - ), - ]; - - // - // vote in the last burn block of prepare phase - // - - nakamoto_tenure(&mut peer, &mut test_signers, vec![vec![dummy_tx_1]]); - - // alice votes in first block of tenure - // bob votes in second block of tenure - let blocks_and_sizes = - nakamoto_tenure(&mut peer, &mut test_signers, vec![txs_block_1, txs_block_2]); - - // check alice's and bob's txs - let blocks = observer.get_blocks(); - - // alice's block - let block = &blocks[blocks.len() - 2].clone(); - let receipts = &block.receipts; - assert_eq!(receipts.len(), 5); - - // first vote should succeed - let alice_first_vote_tx = &receipts[2]; - assert_eq!(alice_first_vote_tx.result, Value::okay_true()); - - // second vote should fail with duplicate vote error - let alice_second_vote_tx = &receipts[3]; - assert_eq!( - alice_second_vote_tx.result, - Value::err_uint(15) // ERR_DUPLICATE_VOTE - ); - assert_eq!(alice_second_vote_tx.events.len(), 0); - - // third vote should succeed even though it is on an old round - let alice_third_vote_tx = &receipts[4]; - assert_eq!(alice_third_vote_tx.result, Value::okay_true()); - - // bob's block - let block = blocks.last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 1); - - // bob's vote should succeed - let tx1_bob = &receipts[0]; - assert_eq!(tx1_bob.result, Value::okay_true()); -} - -/// In this test case, Alice & Bob both successfully vote in cycle N, then -/// Alice tries to vote for the same signature in cycle N+1, but fails with -/// "duplicate aggregate public key" error. -#[test] -fn vote_for_duplicate_aggregate_public_key() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = - Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - - // vote in the first burn block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Both votes should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - - // Proceed to the next prepare phase - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - - let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); - let aggregate_public_key_2 = - Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for the same key as the last cycle - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - // Alice casts vote for a new key - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 2, - alice_index, - aggregate_public_key_2.clone(), - 0, - cycle_id + 2, - ), - // Bob casts vote for the same key - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key_2.clone(), - 0, - cycle_id + 2, - ), - ]; - - // Submit the vote in a new block - nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // Check the last 3 tx in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 5); - - // Alice's vote should fail with duplicate aggregate public key error - let alice_vote_tx = &receipts[2]; - assert_eq!( - alice_vote_tx.result, - Value::err_uint(14) // ERR_DUPLICATE_AGGREGATE_PUBLIC_KEY - ); - assert_eq!(alice_vote_tx.events.len(), 0); - - // Both remaining votes should succeed - let alice_vote_tx = &receipts[3]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - let bob_vote_tx = &receipts[4]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); -} - -/// In this test case, Alice & Bob both successfully vote in cycle N, but for -/// different keys. Then in round 1, they both vote for the same key and -/// key selection is successful. In the first cycle, these two rounds happen -/// in the same tenure. In the second cycle, the first round happens in the -/// first tenure of the prepare phase, and the second round happens in the -/// second tenure of the prepare phase. -#[test] -fn vote_for_aggregate_public_key_two_rounds() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let aggregate_public_key_0_point = test_signers.generate_aggregate_key(0); - let aggregate_public_key_0 = - Value::buff_from(aggregate_public_key_0_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - let aggregate_public_key_1_point = test_signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key_1 = - Value::buff_from(aggregate_public_key_1_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key_0.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote for key 1 in round 0 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key_1.clone(), - 0, - cycle_id + 1, - ), - // Alice casts vote for key 1 in round 1 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - alice_index, - aggregate_public_key_1.clone(), - 1, - cycle_id + 1, - ), - // Bob casts a vote for key 1 in round 1 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key_1.clone(), - 1, - cycle_id + 1, - ), - ]; - - // vote in the first burn block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last four txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 6); - // ignore tenure change tx - // ignore tenure coinbase tx - - // All votes should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_0.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 1); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - let alice_vote_tx = &receipts[4]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[5]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - // The aggregate key is approved in round 1 - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(1)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } - - // Proceed to the next prepare phase - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - - // In this cycle, the two rounds are in separate tenures. - - let aggregate_public_key_0_point = test_signers.generate_aggregate_key(1); - let aggregate_public_key_0 = - Value::buff_from(aggregate_public_key_0_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - let aggregate_public_key_1_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); - let aggregate_public_key_1 = - Value::buff_from(aggregate_public_key_1_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 2, - alice_index, - aggregate_public_key_0.clone(), - 0, - cycle_id + 2, - ), - // Bob casts a vote for key 1 in round 0 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 2, - bob_index, - aggregate_public_key_1.clone(), - 0, - cycle_id + 2, - ), - ]; - - // vote in the first burn block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Both votes should succeed, but the aggregate key is not approved yet - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_0.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 1); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - let txs = vec![ - // Alice casts vote for key 1 in round 1 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 3, - alice_index, - aggregate_public_key_1.clone(), - 1, - cycle_id + 2, - ), - // Bob casts a vote for key 1 in round 1 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 3, - bob_index, - aggregate_public_key_1.clone(), - 1, - cycle_id + 2, - ), - ]; - - // vote again in the next burn block - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - // The aggregate key is approved in round 1 - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key_1.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(1)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } -} - -/// In this test case, Alice & Bob both successfully vote for the same key in -/// cycle N, then in cycle N + 1, Alice tries to vote before the prepare phase, -/// but fails with a "cycle not set" error. -#[test] -fn vote_for_aggregate_public_key_early() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let aggregate_key = test_signers.generate_aggregate_key(cycle_id as u64 + 1); - let aggregate_public_key = Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let aggregate_public_key_ill_formed = Value::buff_from_byte(0x00); - - let txs = vec![ - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - ]; - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - - // Both votes should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - - // Proceed to the reward phase - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - - // In this tenure, signers have not been set yet, so the vote should fail - let aggregate_public_key_point = test_signers.generate_aggregate_key(cycle_id as u64 + 2); - let aggregate_public_key = - Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - ]; - - // vote before the prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last two txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 3); - // ignore tenure change tx - // ignore tenure coinbase tx - - // Alice's vote should fail with a "cycle not set" error - let alice_vote_tx = &receipts[2]; - assert_eq!( - alice_vote_tx.result, - Value::err_uint(2) // ERR_CYCLE_NOT_SET - ); - assert_eq!(alice_vote_tx.events.len(), 0); - - // Proceed to the prepare phase - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - let _ = nakamoto_tenure(&mut peer, &mut test_signers, Vec::new()); - - let txs = vec![ - // Alice casts vote correctly - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 2, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - // Bob casts a vote correctly - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 2, - ), - ]; - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // This time, the votes should succeed and the key should be approved - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - // The aggregate key is approved in round 1 - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 2)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } -} - -/// In this test case, Alice votes in round 0 and Bob votes in round 1. -/// Although they both voted for the same key, the key is not approved. In the -/// next tenure, Bob votes in round 0, and the key is approved. -#[test] -fn vote_for_aggregate_public_key_mixed_rounds() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, mut test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Alice and Bob will each have voted once while booting to Nakamoto - let alice_nonce = 1; - let bob_nonce = 1; - - let cycle_id = current_reward_cycle; - - // create vote txs - let alice_index = get_signer_index(&mut peer, latest_block_id, alice_address, cycle_id); - let bob_index = get_signer_index(&mut peer, latest_block_id, bob_address, cycle_id); - - let aggregate_public_key_point = test_signers.generate_aggregate_key(0); - let aggregate_public_key = - Value::buff_from(aggregate_public_key_point.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); - - let txs = vec![ - // Alice casts vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce, - alice_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Bob casts a vote for key 0 in round 1 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce, - bob_index, - aggregate_public_key.clone(), - 1, - cycle_id + 1, - ), - ]; - - // vote in the first burn block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last four txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - // All votes should succeed - let alice_vote_tx = &receipts[2]; - assert_eq!(alice_vote_tx.result, Value::okay_true()); - assert_eq!(alice_vote_tx.events.len(), 1); - let alice_vote_event = &alice_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = alice_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(alice_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", alice_vote_event); - } - - let bob_vote_tx = &receipts[3]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 1); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(2)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(1)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - let txs = vec![ - // Bob casts a vote for key 0 in round 0 - make_signers_vote_for_aggregate_public_key_value( - bob_key, - bob_nonce + 1, - bob_index, - aggregate_public_key.clone(), - 0, - cycle_id + 1, - ), - // Alice casts vote for key 0 in round 1 - make_signers_vote_for_aggregate_public_key_value( - alice_key, - alice_nonce + 1, - alice_index, - aggregate_public_key.clone(), - 1, - cycle_id + 1, - ), - ]; - - // vote again in the next block of prepare phase - let blocks_and_sizes = nakamoto_tenure(&mut peer, &mut test_signers, vec![txs]); - - // check the last four txs in the last block - let block = observer.get_blocks().last().unwrap().clone(); - let receipts = block.receipts.as_slice(); - assert_eq!(receipts.len(), 4); - // ignore tenure change tx - // ignore tenure coinbase tx - - let bob_vote_tx = &receipts[2]; - assert_eq!(bob_vote_tx.result, Value::okay_true()); - assert_eq!(bob_vote_tx.events.len(), 2); - let bob_vote_event = &bob_vote_tx.events[0]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = bob_vote_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes("voted".as_bytes().to_vec()) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("new-total".into(), Value::UInt(4)), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ("signer".into(), Value::Principal(bob_principal.clone())), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", bob_vote_event); - } - - // The aggregate key is approved in round 0 - let approve_event = &bob_vote_tx.events[1]; - if let StacksTransactionEvent::SmartContractEvent(contract_event) = approve_event { - assert_eq!( - contract_event.value, - TupleData::from_data(vec![ - ( - "event".into(), - Value::string_ascii_from_bytes( - "approved-aggregate-public-key".as_bytes().to_vec() - ) - .expect("Failed to create string") - ), - ("key".into(), aggregate_public_key.clone()), - ("reward-cycle".into(), Value::UInt(cycle_id + 1)), - ("round".into(), Value::UInt(0)), - ]) - .expect("Failed to create tuple") - .into() - ); - } else { - panic!("Expected SmartContractEvent, got {:?}", approve_event); - } - - // Alice's vote should fail with an "out of voting window" error, since the - // key is already set - let alice_vote_tx = &receipts[3]; - assert_eq!(alice_vote_tx.result, Value::err_uint(12)); // ERR_OUT_OF_VOTING_WINDOW - assert_eq!(alice_vote_tx.events.len(), 0); -} - -// In this test case, Alice & Bob advance through setup & check -// the round info from the very first reward cycle & round. -#[test] -fn test_get_round_info() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Get the current creward cycle - let cycle_id = current_reward_cycle; - - let round_info = get_round_info(&mut peer, latest_block_id, cycle_id, 0) - .unwrap() - .expect_tuple() - .unwrap(); - let votes_count = round_info.get("votes-count").unwrap(); - let votes_weight = round_info.get("votes-weight").unwrap(); - - assert_eq!(votes_count, &Value::UInt(2)); - assert_eq!(votes_weight, &Value::UInt(4)); -} - -pub fn get_round_info( - peer: &mut TestPeer<'_>, - latest_block_id: StacksBlockId, - reward_cycle: u128, - round: u128, -) -> Option { - let round_tuple = readonly_call( - peer, - &latest_block_id, - "signers-voting".into(), - "get-round-info".into(), - vec![Value::UInt(reward_cycle), Value::UInt(round)], - ) - .expect_optional() - .unwrap(); - round_tuple -} - -// In this test case, Alice & Bob advance through setup & check -// the weight threshold info from the very first reward cycle & round. -#[test] -fn test_get_threshold_weight() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Get the current creward cycle - let cycle_id = current_reward_cycle; - - // Call get-threshold-weight - let threshold_weight: u128 = get_threshold_weight(&mut peer, latest_block_id, cycle_id); - - // Since there are four votes, the threshold weight should be 3 (75% of 4) - assert_eq!(threshold_weight, 3); -} - -pub fn get_threshold_weight( - peer: &mut TestPeer<'_>, - latest_block_id: StacksBlockId, - reward_cycle: u128, -) -> u128 { - let threshold_weight = readonly_call( - peer, - &latest_block_id, - "signers-voting".into(), - "get-threshold-weight".into(), - vec![Value::UInt(reward_cycle)], - ) - .expect_u128() - .unwrap(); - threshold_weight -} - -pub fn nakamoto_tenure( - peer: &mut TestPeer, - test_signers: &mut TestSigners, - txs_of_blocks: Vec>, -) -> Vec<(NakamotoBlock, u64, ExecutionCost)> { - let current_height = peer.get_burnchain_view().unwrap().burn_block_height; - - info!("current height: {}", current_height); - - let (burn_ops, mut tenure_change, miner_key) = - peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - - let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); - - tenure_change.tenure_consensus_hash = consensus_hash.clone(); - tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = peer - .miner - .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); - let recipient_addr = boot_code_addr(false); - let mut mutable_txs_of_blocks = txs_of_blocks.clone(); - mutable_txs_of_blocks.reverse(); - let blocks_and_sizes = peer.make_nakamoto_tenure( - tenure_change_tx, - coinbase_tx.clone(), - test_signers, - |miner, chainstate, sortdb, blocks| mutable_txs_of_blocks.pop().unwrap_or(vec![]), - ); - info!("tenure length {}", blocks_and_sizes.len()); - blocks_and_sizes -} - -pub fn make_dummy_tx( - peer: &mut TestPeer, - private_key: &StacksPrivateKey, - nonce: &mut u64, -) -> StacksTransaction { - peer.with_db_state(|sortdb, chainstate, _, _| { - let addr = key_to_stacks_addr(&private_key); - let account = get_account(chainstate, sortdb, &addr); - let recipient_addr = boot_code_addr(false); - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - *nonce, - 1, - 1, - &recipient_addr, - ); - *nonce += 1; - Ok(stx_transfer) - }) - .unwrap() -} From 63de87a6d149fc61b8a769011d25237fcb6d4009 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:47:44 -0400 Subject: [PATCH 0434/1400] chore: remove import for nonexistant module --- stackslib/src/chainstate/stacks/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 92da8ac283..175ed61f61 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1133,7 +1133,6 @@ pub const MAX_MICROBLOCK_SIZE: u32 = 65536; #[cfg(test)] pub mod test { - use boot::signers_voting_tests::make_dummy_tx; use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; From 05258d9c4984ed6b4f318d76ed5d96ddcae6e92e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:48:03 -0400 Subject: [PATCH 0435/1400] chore: accept fix for #4938 --- stackslib/src/net/api/getsortition.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 5df67e3636..cb5f81de89 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -214,7 +214,10 @@ impl RPCRequestHandler for GetSortitionHandler { .ok_or_else(|| ChainError::NoSuchBlockError)?; let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { - (None, None, None, None) + // find the last snapshot with the sortition + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + (None, None, None, Some(last_sortition.consensus_hash)) } else { let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? .ok_or_else(|| { From 74a6275170af2c1f966e63edce14f6af50599557 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:48:39 -0400 Subject: [PATCH 0436/1400] chore: API sync; log getnakamotoinv response --- stackslib/src/net/chat.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index b129fab50a..1e1fa79f5c 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1748,6 +1748,7 @@ impl ConversationP2P { &tip, sortdb, chainstate, + &network.stacks_tip.block_id(), reward_cycle, )?; let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).map_err(|e| { @@ -1758,6 +1759,13 @@ impl ConversationP2P { e })?; + test_debug!( + "Reply NakamotoInv for {} (rc {}): {:?}", + &get_nakamoto_inv.consensus_hash, + reward_cycle, + &nakamoto_inv + ); + Ok(StacksMessageType::NakamotoInv(nakamoto_inv)) } From 815e1d94ada4628c3e89ef68948c3b64d638e4b3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:49:15 -0400 Subject: [PATCH 0437/1400] chore: remove dead code --- .../nakamoto/download_state_machine.rs | 76 +------------------ 1 file changed, 2 insertions(+), 74 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 77e3b30ab2..c3af881516 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -1382,13 +1382,11 @@ impl NakamotoDownloadStateMachine { sortdb: &SortitionDB, sort_tip: &BlockSnapshot, chainstate: &StacksChainState, - highest_complete_tenure: &WantedTenure, - unconfirmed_tenure: &WantedTenure, ) -> ( HashMap>, HashMap, ) { - test_debug!("Run unconfirmed tenure downloaders from highest-complete tenure {:?} to unconfirmed tenure {:?}", highest_complete_tenure, unconfirmed_tenure); + test_debug!("Run unconfirmed tenure downloaders"); let addrs: Vec<_> = downloaders.keys().map(|addr| addr.clone()).collect(); let mut finished = vec![]; @@ -1485,10 +1483,7 @@ impl NakamotoDownloadStateMachine { .unwrap_or(false) { if let Some(highest_complete_tenure_downloader) = downloader - .make_highest_complete_tenure_downloader( - highest_complete_tenure, - unconfirmed_tenure, - ) + .make_highest_complete_tenure_downloader() .map_err(|e| { warn!( "Failed to make highest complete tenure downloader for {:?}: {:?}", @@ -1590,71 +1585,6 @@ impl NakamotoDownloadStateMachine { // run all unconfirmed downloaders, and start confirmed downloaders for the // highest complete tenure let burnchain_tip = network.burnchain_tip.clone(); - let Some(unconfirmed_tenure) = self - .wanted_tenures - .last() - .map(|wt| Some(wt.clone())) - .unwrap_or_else(|| { - // unconfirmed tenure is the last tenure in prev_wanted_tenures if - // wanted_tenures.len() is 0 - let prev_wanted_tenures = self.prev_wanted_tenures.as_ref()?; - let wt = prev_wanted_tenures.last()?; - Some(wt.clone()) - }) - else { - // not initialized yet (technically unrachable) - return HashMap::new(); - }; - - // Get the highest WantedTenure. This will be the WantedTenure whose winning block hash is - // the start block hash of the highest complete tenure, and whose consensus hash is the - // tenure ID of the ongoing tenure. It corresponds to the highest sortition for which - // there exists a tenure. - // - // There are three possibilities for obtaining this, based on what we know about tenures - // from the sortition DB and the peers' inventories: - // - // Case 1: There are no sortitions yet in the current reward cycle, so this is the - // second-to-last WantedTenure in the last reward cycle's WantedTenure list. - // - // Case 2: There is one sortition in the current reward cycle, so this is the last - // WantedTenure in the last reward cycle's WantedTenure list - // - // Case 3: There are two or more sortitions in the current reward cycle, so this is the - // second-to-last WantedTenure in the current reward cycle's WantedTenure list. - let highest_wanted_tenure = if self.wanted_tenures.is_empty() { - // highest complete wanted tenure is the second-to-last tenure in prev_wanted_tenures - let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() else { - // not initialized yet (technically unrachable) - return HashMap::new(); - }; - if prev_wanted_tenures.len() < 2 { - return HashMap::new(); - }; - let Some(wt) = prev_wanted_tenures.get(prev_wanted_tenures.len().saturating_sub(2)) - else { - return HashMap::new(); - }; - wt.clone() - } else if self.wanted_tenures.len() == 1 { - // highest complete tenure is the last tenure in prev_wanted_tenures - let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() else { - return HashMap::new(); - }; - let Some(wt) = prev_wanted_tenures.last() else { - return HashMap::new(); - }; - wt.clone() - } else { - // highest complete tenure is the second-to-last tenure in wanted_tenures - let Some(wt) = self - .wanted_tenures - .get(self.wanted_tenures.len().saturating_sub(2)) - else { - return HashMap::new(); - }; - wt.clone() - }; // Run the confirmed downloader state machine set, since we could already be processing the // highest complete tenure download. NOTE: due to the way that we call this method, we're @@ -1682,8 +1612,6 @@ impl NakamotoDownloadStateMachine { sortdb, &burnchain_tip, chainstate, - &highest_wanted_tenure, - &unconfirmed_tenure, ) }; From 52ccde49627a60c3520f4acddb03d749440c18e7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:49:30 -0400 Subject: [PATCH 0438/1400] chore: remove dead code --- .../nakamoto/tenure_downloader_unconfirmed.rs | 29 ------------------- 1 file changed, 29 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index c01c10c00e..9254e6d86c 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -715,8 +715,6 @@ impl NakamotoUnconfirmedTenureDownloader { /// Returns Err(..) if we call this function out of sequence. pub fn make_highest_complete_tenure_downloader( &self, - highest_tenure: &WantedTenure, - unconfirmed_tenure: &WantedTenure, ) -> Result { if self.state != NakamotoUnconfirmedDownloadState::Done { return Err(NetError::InvalidState); @@ -745,33 +743,6 @@ impl NakamotoUnconfirmedTenureDownloader { unconfirmed_signer_keys.clone(), ); - /* - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - test_debug!( - "Create highest complete tenure downloader for {}", - &highest_tenure.tenure_id_consensus_hash - ); - let ntd = NakamotoTenureDownloader::new( - highest_tenure.tenure_id_consensus_hash.clone(), - unconfirmed_tenure.winning_block_id.clone(), - unconfirmed_tenure_start_block.header.block_id(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ) - .with_tenure_end_block(unconfirmed_tenure_start_block.clone()); - */ - Ok(ntd) } From d44af3d21250e65fc9d4874abafa98bb4aaf67e2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:49:43 -0400 Subject: [PATCH 0439/1400] chore: query invnetory state from network-given tip --- stackslib/src/net/inv/nakamoto.rs | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index ea0e89fb21..491d0bcaca 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -85,9 +85,16 @@ impl InvTenureInfo { tip_block_id, tenure_id_consensus_hash, )? - .map(|tenure| Self { - tenure_id_consensus_hash: tenure.tenure_id_consensus_hash, - parent_tenure_id_consensus_hash: tenure.prev_tenure_id_consensus_hash, + .map(|tenure| { + test_debug!("BlockFound tenure for {}", &tenure_id_consensus_hash); + Self { + tenure_id_consensus_hash: tenure.tenure_id_consensus_hash, + parent_tenure_id_consensus_hash: tenure.prev_tenure_id_consensus_hash, + } + }) + .or_else(|| { + test_debug!("No BlockFound tenure for {}", &tenure_id_consensus_hash); + None })) } } @@ -148,14 +155,12 @@ impl InvGenerator { tip: &BlockSnapshot, sortdb: &SortitionDB, chainstate: &StacksChainState, + nakamoto_tip: &StacksBlockId, reward_cycle: u64, ) -> Result, NetError> { let ih = sortdb.index_handle(&tip.sortition_id); - let Some(nakamoto_tip) = ih.get_nakamoto_tip_block_id()? else { - // no Nakamoto tip? no inventory - return Ok(vec![]); - }; + // N.B. reward_cycle_to_block_height starts at reward index 1 let reward_cycle_end_height = sortdb .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, reward_cycle + 1) From a482d54ba331f658354dc773350f3cd41cea0e7d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:49:56 -0400 Subject: [PATCH 0440/1400] chore: test get_parent_stacks_tip() by looking at the tip and parent tip before/after refreshing the burnchain view --- stackslib/src/net/mod.rs | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 626701f8b7..34e3479a17 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2773,6 +2773,9 @@ pub mod test { let mut mempool = self.mempool.take().unwrap(); let indexer = self.indexer.take().unwrap(); + let old_parent_tip = self.network.parent_stacks_tip.clone(); + let old_tip = self.network.stacks_tip.clone(); + let ret = self.network.run( &indexer, &mut sortdb, @@ -2785,6 +2788,10 @@ pub mod test { &RPCHandlerArgs::default(), ); + if self.network.stacks_tip != old_tip { + assert_eq!(self.network.parent_stacks_tip, old_tip); + } + self.sortdb = Some(sortdb); self.stacks_node = Some(stacks_node); self.mempool = Some(mempool); @@ -2851,6 +2858,9 @@ pub mod test { ); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let old_parent_tip = self.network.parent_stacks_tip.clone(); + let old_tip = self.network.stacks_tip.clone(); + let ret = self.network.run( &indexer, &mut sortdb, @@ -2863,6 +2873,10 @@ pub mod test { &RPCHandlerArgs::default(), ); + if self.network.stacks_tip != old_tip { + assert_eq!(self.network.parent_stacks_tip, old_tip); + } + self.sortdb = Some(sortdb); self.stacks_node = Some(stacks_node); self.mempool = Some(mempool); @@ -2874,10 +2888,18 @@ pub mod test { let sortdb = self.sortdb.take().unwrap(); let mut stacks_node = self.stacks_node.take().unwrap(); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + + let old_parent_tip = self.network.parent_stacks_tip.clone(); + let old_tip = self.network.stacks_tip.clone(); + self.network .refresh_burnchain_view(&indexer, &sortdb, &mut stacks_node.chainstate, false) .unwrap(); + if self.network.stacks_tip != old_tip { + assert_eq!(self.network.parent_stacks_tip, old_tip); + } + self.sortdb = Some(sortdb); self.stacks_node = Some(stacks_node); } @@ -3563,6 +3585,7 @@ pub mod test { SortitionDB::get_canonical_burn_chain_tip(&self.sortdb.as_ref().unwrap().conn()) .unwrap(); let burnchain = self.config.burnchain.clone(); + let (burn_ops, stacks_block, microblocks) = self.make_tenure( |ref mut miner, ref mut sortdb, @@ -3613,6 +3636,14 @@ pub mod test { } self.refresh_burnchain_view(); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb().conn()).unwrap(); + assert_eq!( + self.network.stacks_tip.block_id(), + StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh) + ); + tip_id } From 2e1fbe85f8af728220ed1c846f70c2fc420fe9a7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:50:18 -0400 Subject: [PATCH 0441/1400] chore: NetworkHandle can be Clone'd --- stackslib/src/net/p2p.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index e02256ece4..ec29acb711 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -90,6 +90,7 @@ pub enum NetworkRequest { /// The "main loop" for sending/receiving data is a select/poll loop, and runs outside of other /// threads that need a synchronous RPC or a multi-RPC interface. This object gives those threads /// a way to issue commands and hear back replies from them. +#[derive(Clone)] pub struct NetworkHandle { chan_in: SyncSender, } @@ -4673,7 +4674,6 @@ impl PeerNetwork { /// current stacks tip. /// If this is the first tenure in epoch 3.x, then this is the pointer to the epoch 2.x block /// that it builds atop. - /// TODO: unit test pub(crate) fn get_parent_stacks_tip( &self, chainstate: &StacksChainState, From e377b2b0b03330fe4c0da2fef8e050b79ebf9cec Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:50:30 -0400 Subject: [PATCH 0442/1400] chore: serve NetworkHandle from relayer --- stackslib/src/net/relay.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 072c938a14..238d4caeed 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -517,6 +517,10 @@ impl Relayer { Relayer::new(handle, network.connection_opts.clone(), stacker_dbs) } + pub fn get_p2p_handle(&self) -> NetworkHandle { + self.p2p.clone() + } + /// Given Stacks 2.x blocks pushed to us, verify that they correspond to expected block data. pub fn validate_blocks_push( conn: &SortitionDBConn, From 219b4216392e4c9a1664fd4c0f8bd9cece20598b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:50:46 -0400 Subject: [PATCH 0443/1400] chore: API sync --- stackslib/src/net/tests/download/nakamoto.rs | 35 +++----------------- 1 file changed, 5 insertions(+), 30 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 864faac84a..5903f5978c 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -560,12 +560,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( @@ -663,12 +658,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( @@ -768,12 +758,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( @@ -850,12 +835,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( @@ -995,12 +975,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { // we can request the highest-complete tenure assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); + let ntd = utd.make_highest_complete_tenure_downloader().unwrap(); assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( From beb777cbbc9062ff31ff6963d704a4bb76b3d165 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:50:58 -0400 Subject: [PATCH 0444/1400] chore: API sync --- stackslib/src/net/tests/inv/nakamoto.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index f8134360f2..539233812f 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -171,6 +171,7 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); + let stacks_tip = peer.network.stacks_tip.block_id(); let mut inv_generator = InvGenerator::new(); @@ -181,7 +182,7 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { // check the reward cycles for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, rc as u64) + .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) .unwrap(); debug!( "At reward cycle {}: {:?}, mesasge = {:?}", @@ -232,6 +233,7 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); + let stacks_tip = peer.network.stacks_tip.block_id(); let mut inv_generator = InvGenerator::new(); @@ -241,7 +243,7 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, rc as u64) + .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) .unwrap(); debug!( "At reward cycle {}: {:?}, mesasge = {:?}", @@ -284,6 +286,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); + let stacks_tip = peer.network.stacks_tip.block_id(); let mut inv_generator = InvGenerator::new(); @@ -293,7 +296,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, rc as u64) + .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) .unwrap(); debug!("At reward cycle {}: {:?}", rc, &bitvec); From b69703ec87131c9a7e53084671b38c4bda395042 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:51:11 -0400 Subject: [PATCH 0445/1400] chore: test get_parent_stacks_tip() in nakamoto bootup --- stackslib/src/net/tests/mod.rs | 67 ++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 1ece477fe0..e13a9ee4fc 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -230,6 +230,8 @@ impl NakamotoBootPlan { let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); let mut sort_handle = sortdb.index_handle(&sort_tip); + let mut possible_chain_tips = HashSet::new(); + for block in blocks { debug!( "Apply block {} (sighash {}) to peer {} ({})", @@ -260,6 +262,8 @@ impl NakamotoBootPlan { ); } + possible_chain_tips.insert(block.block_id()); + // process it peer.coord.handle_new_stacks_block().unwrap(); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); @@ -298,6 +302,8 @@ impl NakamotoBootPlan { ); } + possible_chain_tips.insert(block.block_id()); + // process it peer.coord.handle_new_stacks_block().unwrap(); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); @@ -306,6 +312,8 @@ impl NakamotoBootPlan { peer.sortdb = Some(sortdb); peer.stacks_node = Some(node); peer.refresh_burnchain_view(); + + assert!(possible_chain_tips.contains(&peer.network.stacks_tip.block_id())); } } @@ -495,11 +503,31 @@ impl NakamotoBootPlan { }) .collect(); + let old_tip = peer.network.stacks_tip.clone(); let mut stacks_block = peer.tenure_with_txs(&stack_txs, &mut peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, peer.network.parent_stacks_tip); + } + for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { + let old_tip = other_peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&stack_txs, other_peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(other_peer.sortdb().conn()) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, other_peer.network.parent_stacks_tip); + } } debug!("\n\n======================"); @@ -510,12 +538,31 @@ impl NakamotoBootPlan { .burnchain .is_in_prepare_phase(sortition_height.into()) { + let old_tip = peer.network.stacks_tip.clone(); stacks_block = peer.tenure_with_txs(&[], &mut peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, peer.network.parent_stacks_tip); + } other_peers .iter_mut() .zip(other_peer_nonces.iter_mut()) .for_each(|(peer, nonce)| { + let old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&[], nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, peer.network.parent_stacks_tip); + } }); sortition_height = peer.get_burn_block_height(); } @@ -526,11 +573,31 @@ impl NakamotoBootPlan { // advance to the start of epoch 3.0 while sortition_height < epoch_30_height - 1 { + let old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&vec![], &mut peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, peer.network.parent_stacks_tip); + } + for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { + let old_tip = peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&vec![], other_peer_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(other_peer.sortdb().conn()) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); + if old_tip.block_id() != stacks_tip { + assert_eq!(old_tip, other_peer.network.parent_stacks_tip); + } } sortition_height = peer.get_burn_block_height(); } From 7144b199f7fe72ad4459f9b4ba06ce00d79af44c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:51:39 -0400 Subject: [PATCH 0446/1400] feat: broadcast Nakamoto blocks to p2p state machine, and rework the logic for finding the chain tip to build on --- .../stacks-node/src/nakamoto_node/miner.rs | 145 +++++++++++------- 1 file changed, 91 insertions(+), 54 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7609ff0785..25ec73924f 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -38,7 +38,10 @@ use stacks::chainstate::stacks::{ TenureChangeCause, TenureChangePayload, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; +use stacks::net::StacksMessageType; use stacks::net::stackerdb::StackerDBs; +use stacks::net::p2p::NetworkHandle; +use stacks::net::NakamotoBlocksData; use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -49,11 +52,10 @@ use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; use super::sign_coordinator::SignCoordinator; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; -use crate::burnchains::bitcoin_regtest_controller::burnchain_params_from_config; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; -use crate::{neon_node, ChainTip}; +use crate::neon_node; #[cfg(test)] lazy_static::lazy_static! { @@ -144,6 +146,8 @@ pub struct BlockMinerThread { event_dispatcher: EventDispatcher, /// The reason the miner thread was spawned reason: MinerReason, + /// Handle to the p2p thread for block broadcast + p2p_handle: NetworkHandle, } impl BlockMinerThread { @@ -168,6 +172,7 @@ impl BlockMinerThread { event_dispatcher: rt.event_dispatcher.clone(), parent_tenure_id, reason, + p2p_handle: rt.get_p2p_handle() } } @@ -521,8 +526,10 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } + /// Store a block to the chainstate, and if successful (it should be since we mined it), + /// broadcast it via the p2p network. fn broadcast( - &self, + &mut self, block: NakamotoBlock, reward_set: RewardSet, ) -> Result<(), ChainstateError> { @@ -555,7 +562,7 @@ impl BlockMinerThread { let mut sortition_handle = sort_db.index_handle_at_ch(&block.header.consensus_hash)?; let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; - NakamotoChainState::accept_block( + let accepted = NakamotoChainState::accept_block( &chainstate_config, &block, &mut sortition_handle, @@ -565,6 +572,19 @@ impl BlockMinerThread { NakamotoBlockObtainMethod::Mined, )?; staging_tx.commit()?; + + if !accepted { + warn!("Did NOT accept block {} we mined", &block.block_id()); + + // not much we can do here, but try and mine again and hope we produce a valid one. + return Ok(()); + } + + // forward to p2p thread + let block_id = block.block_id(); + if let Err(e) = self.p2p_handle.broadcast_message(vec![], StacksMessageType::NakamotoBlocks(NakamotoBlocksData { blocks: vec![block] })) { + warn!("Failed to broadcast blocok {}: {:?}", &block_id, &e); + } Ok(()) } @@ -657,59 +677,72 @@ impl BlockMinerThread { // 1. The highest block in the miner's current tenure // 2. The highest block in the current tenure's parent tenure // Where the current tenure's parent tenure is the tenure start block committed to in the current tenure's associated block commit. - let stacks_block_id = if let Some(block) = self.mined_blocks.last() { - test_debug!("Stacks block parent ID is last mined block"); - block.block_id() + let stacks_tip_header = if let Some(block) = self.mined_blocks.last() { + test_debug!("Stacks block parent ID is last mined block {}", &block.block_id()); + let header_info = NakamotoChainState::get_block_header(chain_state.db(), &block.block_id()) + .map_err(|e| { + error!("Could not query header info for last-mined block ID {}: {:?}", &block.block_id(), &e); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("No header info for last-mined block ID {}", &block.block_id()); + NakamotoNodeError::ParentNotFound + })?; + + header_info } else { - test_debug!("Stacks block parent ID is parent tenure ID"); - self.parent_tenure_id - }; - let Some(mut stacks_tip_header) = - NakamotoChainState::get_block_header(chain_state.db(), &stacks_block_id) - .expect("FATAL: could not query prior stacks block id") - else { - debug!("No Stacks chain tip known, will return a genesis block"); - let burnchain_params = burnchain_params_from_config(&self.config.burnchain); - - let chain_tip = ChainTip::genesis( - &burnchain_params.first_block_hash, - burnchain_params.first_block_height.into(), - burnchain_params.first_block_timestamp.into(), - ); + test_debug!("Stacks block parent ID is last block in parent tenure ID {}", &self.parent_tenure_id); + let parent_tenure_header = NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!("Could not query header for parent tenure ID {}: {:?}", &self.parent_tenure_id, &e); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("No header for parent tenure ID {}", &self.parent_tenure_id); + NakamotoNodeError::ParentNotFound + })?; + + let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()) + .map_err(|e| { + error!("Failed to load canonical Stacks tip: {:?}", &e); + NakamotoNodeError::ParentNotFound + })?; + + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + let last_tenure_finish_block_header = if let Some(header) = + NakamotoChainState::get_highest_block_header_in_tenure( + &mut chain_state.index_conn(), + &stacks_tip, + &parent_tenure_header.consensus_hash, + ) + .map_err(|e| { + error!("Could not query parent tenure finish block: {:?}", &e); + NakamotoNodeError::ParentNotFound + })? + { + header + } + else { + // this is an epoch2 block + debug!("Stacks block parent ID may be an epoch2x block: {}", &self.parent_tenure_id); + let header = NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!("Could not query header info for epoch2x tenure block ID {}: {:?}", &self.parent_tenure_id, &e); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("No header info for epoch2x tenure block ID {}", &self.parent_tenure_id); + NakamotoNodeError::ParentNotFound + })?; + + header + }; - return Ok(ParentStacksBlockInfo { - parent_tenure: Some(ParentTenureInfo { - parent_tenure_consensus_hash: chain_tip.metadata.consensus_hash, - parent_tenure_blocks: 0, - }), - stacks_parent_header: chain_tip.metadata, - coinbase_nonce: 0, - }); + last_tenure_finish_block_header }; - - // if self.mined_blocks.is_empty() { - // We could call this even if self.mined_blocks was not empty, but would return the same value, so save the effort and only do it when necessary. - // If we are starting a new tenure, then make sure we are building off of the last block of our parent tenure - if let Some(last_tenure_finish_block_header) = - NakamotoChainState::get_highest_block_header_in_tenure( - &mut chain_state.index_conn(), - &stacks_block_id, - &stacks_tip_header.consensus_hash, - ) - .expect("FATAL: could not query parent tenure finish block") - { - test_debug!( - "Miner: stacks tip header is now {} {:?} from {} {:?}", - &last_tenure_finish_block_header.index_block_hash(), - &last_tenure_finish_block_header, - &stacks_tip_header.index_block_hash(), - &stacks_tip_header - ); - stacks_tip_header = last_tenure_finish_block_header; - } - // } + test_debug!( - "Miner: stacks tip header is {} {:?}", + "Miner: stacks tip parent header is {} {:?}", &stacks_tip_header.index_block_hash(), &stacks_tip_header ); @@ -1046,6 +1079,7 @@ impl ParentStacksBlockInfo { let parent_tenure_info = if stacks_tip_header.consensus_hash == parent_tenure_header.consensus_hash { + // in the same tenure let parent_tenure_blocks = if parent_tenure_header .anchored_header .as_stacks_nakamoto() @@ -1055,7 +1089,7 @@ impl ParentStacksBlockInfo { NakamotoChainState::get_highest_block_header_in_tenure( &mut chain_state.index_conn(), &stacks_tip_header.index_block_hash(), - &stacks_tip_header.consensus_hash, + &parent_tenure_header.consensus_hash, ) else { warn!("Failed loading last block of parent tenure"; "parent_tenure_id" => %parent_tenure_id); @@ -1065,6 +1099,9 @@ impl ParentStacksBlockInfo { if stacks_tip_header.index_block_hash() != last_parent_tenure_header.index_block_hash() { + warn!("Last known tenure block of parent tenure should be the stacks tip"; + "stacks_tip_header" => %stacks_tip_header.index_block_hash(), + "last_parent_tenure_header" => %last_parent_tenure_header.index_block_hash()); return Err(NakamotoNodeError::NewParentDiscovered); } 1 + last_parent_tenure_header.stacks_block_height From c9df911aadc4b2502602f23b6f5c879f10ac8af2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:52:05 -0400 Subject: [PATCH 0447/1400] chore: consolidate last-committed tip data and directly query te chainstate to determine when to issue block-commits --- .../stacks-node/src/nakamoto_node/relayer.rs | 504 ++++++++++++------ 1 file changed, 333 insertions(+), 171 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 3d1fe39e96..5f03e0b15c 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -36,11 +36,10 @@ use stacks::chainstate::stacks::miner::{ get_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::{ - FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER, -}; +use stacks::core::STACKS_EPOCH_3_0_MARKER; use stacks::monitoring::increment_stx_blocks_mined_counter; use stacks::net::db::LocalPeer; +use stacks::net::p2p::NetworkHandle; use stacks::net::relay::Relayer; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{ @@ -49,7 +48,7 @@ use stacks_common::types::chainstate::{ use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::Hash160; -use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; +use stacks_common::util::vrf::{VRFPublicKey}; use super::miner::MinerReason; use super::{ @@ -99,6 +98,85 @@ impl fmt::Display for RelayerDirective { } } +/// Last commitment data +/// This represents the tenure that the last-sent block-commit committed to. +pub struct LastCommit { + /// block-commit sent + block_commit: LeaderBlockCommitOp, + /// the sortition tip at the time the block-commit was sent + burn_tip: BlockSnapshot, + /// the stacks tip at the time the block-commit was sent + stacks_tip: StacksBlockId, + /// the tenure consensus hash for the tip's tenure + tenure_consensus_hash: ConsensusHash, + /// the start-block hash of the tip's tenure + start_block_hash: BlockHeaderHash, + /// What is the epoch in which this was sent? + epoch_id: StacksEpochId, + /// commit txid (to be filled in on submission) + txid: Txid, +} + +impl LastCommit { + pub fn new(commit: LeaderBlockCommitOp, burn_tip: BlockSnapshot, stacks_tip: StacksBlockId, tenure_consensus_hash: ConsensusHash, start_block_hash: BlockHeaderHash, epoch_id: StacksEpochId) -> Self { + Self { + block_commit: commit, + burn_tip, + stacks_tip, + tenure_consensus_hash, + start_block_hash, + epoch_id, + txid: Txid([0x00; 32]), + } + } + + /// Get the commit + pub fn get_block_commit(&self) -> &LeaderBlockCommitOp { + &self.block_commit + } + + /// What's the parent tenure's tenure-start block hash? + pub fn parent_tenure_id(&self) -> StacksBlockId { + StacksBlockId(self.block_commit.block_header_hash.clone().0) + } + + /// What's the stacks tip at the time of commit? + pub fn get_stacks_tip(&self) -> &StacksBlockId { + &self.stacks_tip + } + + /// What's the burn tip at the time of commit? + pub fn get_burn_tip(&self) -> &BlockSnapshot { + &self.burn_tip + } + + /// What's the burn view consensus hash as of this last commit? + pub fn get_burn_view_consensus_hash(&self) -> &ConsensusHash { + &self.burn_tip.consensus_hash + } + + /// What's the epoch in which this was sent? + pub fn get_epoch_id(&self) -> &StacksEpochId { + &self.epoch_id + } + + /// What's the tenure-start block ID of the tenure this block-commit confirms? + pub fn get_tenure_start_block_id(&self) -> StacksBlockId { + StacksBlockId::new(&self.tenure_consensus_hash, &self.start_block_hash) + } + + /// Get the tenure ID of the tenure this commit builds on + pub fn get_tenure_id(&self) -> &ConsensusHash { + &self.tenure_consensus_hash + } + + /// Set our txid + pub fn set_txid(&mut self, txid: &Txid) { + self.txid = txid.clone(); + } +} + + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -160,9 +238,9 @@ pub struct RelayerThread { /// to check if it should issue a block commit or try to register a VRF key next_initiative: Instant, is_miner: bool, - /// This is the last snapshot in which the relayer committed, and the parent_tenure_id - /// which was committed to - last_committed: Option<(BlockSnapshot, StacksBlockId)>, + /// Information about the last-sent block commit, and the relayer's view of the chain at the + /// time it was sent. + last_committed: Option, } impl RelayerThread { @@ -223,6 +301,11 @@ impl RelayerThread { } } + /// Get a handle to the p2p thread + pub fn get_p2p_handle(&self) -> NetworkHandle { + self.relayer.get_p2p_handle() + } + /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? fn has_waited_for_latest_blocks(&self) -> bool { @@ -306,10 +389,52 @@ impl RelayerThread { consensus_hash: ConsensusHash, burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, - ) -> MinerDirective { + ) -> Result { let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) .expect("FATAL: failed to query sortition DB") .expect("FATAL: unknown consensus hash"); + + let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + .map_err(|e| { + error!("Failed to load canonical stacks tip: {:?}", &e); + NakamotoNodeError::ParentNotFound + })?; + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + + let ongoing_tenure_consensus_hash = if let Some(ongoing_tenure) = NakamotoChainState::get_ongoing_tenure(&mut self.chainstate.index_conn(), &stacks_tip) + .map_err(|e| { + error!("Failed to get ongoing tenure off of {}: {:?}", &stacks_tip, &e); + NakamotoNodeError::ParentNotFound + })? + { + ongoing_tenure.tenure_id_consensus_hash + } + else if let Some(header) = StacksChainState::get_stacks_block_header_info_by_index_block_hash(self.chainstate.db(), &stacks_tip) + .map_err(|e| { + error!("Failed to get stacks 2.x block header for {}: {:?}", &stacks_tip, &e); + NakamotoNodeError::ParentNotFound + })? + { + header.consensus_hash + } + else { + error!("Could not deduce ongoing tenure"); + return Err(NakamotoNodeError::ParentNotFound); + }; + + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &stacks_tip, + &ongoing_tenure_consensus_hash + ) + .map_err(|e| { + error!("Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", &stacks_tip, &e); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("Relayer: Failed to find tenure-start block header for stacks tip {}", &stacks_tip); + NakamotoNodeError::ParentNotFound + })?; self.globals.set_last_sortition(sn.clone()); @@ -322,6 +447,7 @@ impl RelayerThread { "burn_height" => sn.block_height, "winning_txid" => %sn.winning_block_txid, "committed_parent" => %committed_index_hash, + "last_tenure_start_id" => %highest_tenure_start_block_header.index_block_hash(), "won_sortition?" => won_sortition, ); @@ -329,10 +455,10 @@ impl RelayerThread { increment_stx_blocks_mined_counter(); } - if sn.sortition { + let directive = if sn.sortition { if won_sortition { MinerDirective::BeginTenure { - parent_tenure_start: committed_index_hash, + parent_tenure_start: highest_tenure_start_block_header.index_block_hash(), burnchain_tip: sn, } } else { @@ -342,7 +468,8 @@ impl RelayerThread { MinerDirective::ContinueTenure { new_burn_view: consensus_hash, } - } + }; + Ok(directive) } /// Constructs and returns a LeaderKeyRegisterOp out of the provided params @@ -399,147 +526,163 @@ impl RelayerThread { } } - /// Produce the block-commit for this anchored block, if we can. - /// `target_ch` is the consensus-hash of the Tenure we will build off - /// `target_bh` is the block hash of the Tenure we will build off - /// Returns the (the most recent burn snapshot, the expected epoch, the commit-op) on success + /// Produce the block-commit for this upcoming tenure, if we can. + /// + /// Takes the Nakamoto chain tip (consensus hash, block header hash). + /// + /// Returns the (the most recent burn snapshot, the most recent stakcs tip, the commit-op) on success /// Returns None if we fail somehow. - fn make_block_commit( + /// + /// TODO: unit test + pub(crate) fn make_block_commit( &mut self, - target_ch: &ConsensusHash, - target_bh: &BlockHeaderHash, - ) -> Result<(BlockSnapshot, StacksEpochId, LeaderBlockCommitOp), NakamotoNodeError> { + tip_block_ch: &ConsensusHash, + tip_block_bh: &BlockHeaderHash, + ) -> Result { let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; - let block_id = StacksBlockId::new(target_ch, target_bh); - let parent_vrf_proof = NakamotoChainState::get_block_vrf_proof( + let stacks_tip = StacksBlockId::new(tip_block_ch, tip_block_bh); + + // sanity check -- this block must exist and have been processed locally + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), - &block_id, - &target_ch, + &stacks_tip, + &tip_block_ch, ) - .map_err(|_e| NakamotoNodeError::ParentNotFound)? - .unwrap_or_else(|| VRFProof::empty()); + .map_err(|e| { + error!("Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", &stacks_tip, &e); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("Relayer: Failed to find tenure-start block header for stacks tip {}", &stacks_tip); + NakamotoNodeError::ParentNotFound + })?; + + // load the VRF proof generated in this tenure, so we can use it to seed the VRF in the + // upcoming tenure. This may be an epoch2x VRF proof. + let tip_vrf_proof = NakamotoChainState::get_block_vrf_proof(&mut self.chainstate.index_conn(), &stacks_tip, tip_block_ch) + .map_err(|e| { + error!("Failed to load VRF proof for {} off of {}: {:?}", tip_block_ch, &stacks_tip, &e); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("No block VRF proof for {} off of {}", tip_block_ch, &stacks_tip); + NakamotoNodeError::ParentNotFound + })?; // let's figure out the recipient set! let recipients = get_nakamoto_next_recipients( &sort_tip, &mut self.sortdb, &mut self.chainstate, - &block_id, + &stacks_tip, &self.burnchain, ) .map_err(|e| { error!("Relayer: Failure fetching recipient set: {:?}", e); NakamotoNodeError::SnapshotNotFoundForChainTip })?; - - let block_header = NakamotoChainState::get_tenure_start_block_header( - &mut self.chainstate.index_conn(), - &block_id, - &target_ch, - ) - .map_err(|e| { - error!("Relayer: Failed to get block header for parent tenure: {e:?}"); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("Relayer: Failed to find block header for parent tenure"); - NakamotoNodeError::ParentNotFound - })?; - - let parent_block_id = block_header.index_block_hash(); - if parent_block_id != StacksBlockId::new(target_ch, target_bh) { - error!("Relayer: Found block header for parent tenure, but mismatched block id"; - "expected_block_id" => %StacksBlockId::new(target_ch, target_bh), - "found_block_id" => %parent_block_id); - return Err(NakamotoNodeError::UnexpectedChainState); + + let commit_outs = if self.burnchain.is_in_prepare_phase(sort_tip.block_height + 1) { + vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] } - - let Ok(Some(parent_sortition)) = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), target_ch) else { - error!("Relayer: Failed to lookup the block snapshot of parent tenure ID"; "tenure_consensus_hash" => %target_ch); - return Err(NakamotoNodeError::ParentNotFound); + RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) }; - let Ok(Some(target_epoch)) = - SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1) + // find the sortition that kicked off this tenure (it may be different from the sortition + // tip, such as when there is no sortition or when the miner of the current sortition never + // produces a block). This is used to find the parent block-commit of the block-commit + // we'll submit. + let Ok(Some(tip_tenure_sortition)) = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), tip_block_ch) else { - error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); - return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); + error!("Relayer: Failed to lookup the block snapshot of highest tenure ID"; "tenure_consensus_hash" => %tip_block_ch); + return Err(NakamotoNodeError::ParentNotFound); }; - - let parent_block_burn_height = parent_sortition.block_height; + + // find the parent block-commit of this commit + let commit_parent_block_burn_height = tip_tenure_sortition.block_height; let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( self.sortdb.conn(), - &parent_sortition.winning_block_txid, - &parent_sortition.sortition_id, + &tip_tenure_sortition.winning_block_txid, + &tip_tenure_sortition.sortition_id, ) else { - error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %target_ch); + error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %tip_block_ch); return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); }; - let parent_winning_vtxindex = parent_winning_tx.vtxindex; + let commit_parent_winning_vtxindex = parent_winning_tx.vtxindex; - let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); - let sunset_burn = self.burnchain.expected_sunset_burn( - sort_tip.block_height + 1, - burn_fee_cap, - target_epoch.epoch_id, - ); - let rest_commit = burn_fee_cap - sunset_burn; - - let commit_outs = if !self - .burnchain - .pox_constants - .is_after_pox_sunset_end(sort_tip.block_height, target_epoch.epoch_id) - && !self - .burnchain - .is_in_prepare_phase(sort_tip.block_height + 1) - { - RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) - } else { - vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] + // epoch in which this commit will be sent (affects how the burnchain client processes it) + let Ok(Some(target_epoch)) = + SortitionDB::get_stacks_epoch(self.sortdb.conn(), sort_tip.block_height + 1) + else { + error!("Relayer: Failed to lookup its epoch"; "target_height" => sort_tip.block_height + 1); + return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); }; - // let's commit, but target the current burnchain tip with our modulus + // amount of burnchain tokens (e.g. sats) we'll spend across the PoX outputs + let burn_fee_cap = get_mining_spend_amount(self.globals.get_miner_status()); + + // let's commit, but target the current burnchain tip with our modulus so the commit is + // only valid if it lands in the targeted burnchain block height let burn_parent_modulus = u8::try_from(sort_tip.block_height % BURN_BLOCK_MINED_AT_MODULUS) .map_err(|_| { error!("Relayer: Block mining modulus is not u8"); NakamotoNodeError::UnexpectedChainState })?; + + // burnchain signer for this commit let sender = self.keychain.get_burnchain_signer(); + + // VRF key this commit uses (i.e. the one we registered) let key = self .globals .get_leader_key_registration_state() .get_active() .ok_or_else(|| NakamotoNodeError::NoVRFKeyActive)?; - let op = LeaderBlockCommitOp { + + let commit = LeaderBlockCommitOp { + // NOTE: to be filled in treatment: vec![], - sunset_burn, - block_header_hash: BlockHeaderHash(parent_block_id.0), - burn_fee: rest_commit, - input: (Txid([0; 32]), 0), + // NOTE: PoX sunset has been disabled prior to taking effect + sunset_burn: 0, + // block-commits in Nakamoto commit to the ongoing tenure's tenure-start block (which, + // when processed, become the start-block of the tenure atop which this miner will + // produce blocks) + block_header_hash: BlockHeaderHash(highest_tenure_start_block_header.index_block_hash().0), + // the rest of this is the same as epoch2x commits, modulo the new epoch marker + burn_fee: burn_fee_cap, apparent_sender: sender, key_block_ptr: u32::try_from(key.block_height) .expect("FATAL: burn block height exceeded u32"), key_vtxindex: u16::try_from(key.op_vtxindex).expect("FATAL: vtxindex exceeded u16"), memo: vec![STACKS_EPOCH_3_0_MARKER], - new_seed: VRFSeed::from_proof(&parent_vrf_proof), - parent_block_ptr: u32::try_from(parent_block_burn_height) + new_seed: VRFSeed::from_proof(&tip_vrf_proof), + parent_block_ptr: u32::try_from(commit_parent_block_burn_height) .expect("FATAL: burn block height exceeded u32"), - parent_vtxindex: u16::try_from(parent_winning_vtxindex) + parent_vtxindex: u16::try_from(commit_parent_winning_vtxindex) .expect("FATAL: vtxindex exceeded u16"), + burn_parent_modulus, + commit_outs, + + // NOTE: to be filled in + input: (Txid([0; 32]), 0), vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, burn_header_hash: BurnchainHeaderHash::zero(), - burn_parent_modulus, - commit_outs, }; - Ok((sort_tip, target_epoch.epoch_id, op)) + Ok(LastCommit::new( + commit, + sort_tip, + stacks_tip, + highest_tenure_start_block_header.consensus_hash, + highest_tenure_start_block_header.anchored_header.block_hash(), + target_epoch.epoch_id)) } /// Create the block miner thread state. @@ -751,8 +894,12 @@ impl RelayerThread { burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> bool { - let miner_instruction = - self.process_sortition(consensus_hash, burn_hash, committed_index_hash); + let miner_instruction = match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) { + Ok(mi) => mi, + Err(_) => { + return false; + } + }; match miner_instruction { MinerDirective::BeginTenure { @@ -795,13 +942,13 @@ impl RelayerThread { true } + /// Generate and submit the next block-commit, and record it locally fn issue_block_commit( &mut self, - tenure_start_ch: ConsensusHash, - tenure_start_bh: BlockHeaderHash, + tip_block_ch: ConsensusHash, + tip_block_bh: BlockHeaderHash, ) -> Result<(), NakamotoNodeError> { - let (last_committed_at, target_epoch_id, commit) = - self.make_block_commit(&tenure_start_ch, &tenure_start_bh)?; + let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; #[cfg(test)] { if TEST_SKIP_COMMIT_OP.lock().unwrap().unwrap_or(false) { @@ -809,12 +956,14 @@ impl RelayerThread { return Ok(()); } } + + // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); let txid = self .bitcoin_controller .submit_operation( - target_epoch_id, - BlockstackOperationType::LeaderBlockCommit(commit), + last_committed.get_epoch_id().clone(), + BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()), &mut op_signer, 1, ) @@ -822,23 +971,26 @@ impl RelayerThread { warn!("Failed to submit block-commit bitcoin transaction"); NakamotoNodeError::BurnchainSubmissionFailed })?; + info!( "Relayer: Submitted block-commit"; - "parent_consensus_hash" => %tenure_start_ch, - "parent_block_hash" => %tenure_start_bh, + "tip_consensus_hash" => %tip_block_ch, + "tip_block_hash" => %tip_block_bh, "txid" => %txid, ); + // update local state + last_committed.set_txid(&txid); self.last_commits.insert(txid); - self.last_committed = Some(( - last_committed_at, - StacksBlockId::new(&tenure_start_ch, &tenure_start_bh), - )); + self.last_committed = Some(last_committed); self.globals.counters.bump_naka_submitted_commits(); Ok(()) } + /// Determine what the relayer should do to advance the chain. + /// * If this isn't a miner, then it's always nothing. + /// * Otherwise, if we haven't done so already, go register a VRF public key fn initiative(&mut self) -> Option { if !self.is_miner { return None; @@ -860,79 +1012,89 @@ impl RelayerThread { } LeaderKeyRegistrationState::Active(_) => {} }; - - // has there been a new sortition - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) else { + + // load up canonical sortition and stacks tips + let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + .map_err(|e| { + error!("Failed to load canonical sortition tip: {:?}", &e); + e + }) + else { return None; }; - // check if the burnchain changed, if so, we should issue a commit. - // if not, we may still want to update a commit if we've received a new tenure start block - let burnchain_changed = if let Some((last_committed_at, ..)) = self.last_committed.as_ref() - { - // if the new sortition tip has a different consesus hash than the last commit, - // issue a new commit - sort_tip.consensus_hash != last_committed_at.consensus_hash - } else { - // if there was no last commit, issue a new commit - true - }; - - let Ok(Some(chain_tip_header)) = - NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb) + // NOTE: this may be an epoch2x tip + let Ok((stacks_tip_ch, stacks_tip_bh)) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + .map_err(|e| { + error!("Failed to load canonical stacks tip: {:?}", &e); + e + }) else { - info!("No known canonical tip, will issue a genesis block commit"); - return Some(RelayerDirective::IssueBlockCommit( - FIRST_BURNCHAIN_CONSENSUS_HASH, - FIRST_STACKS_BLOCK_HASH, - )); + return None; }; - - debug!( - "Relayer: canonical block header is {}/{} ({})", - &chain_tip_header.consensus_hash, - &chain_tip_header.anchored_header.block_hash(), - &chain_tip_header.index_block_hash() - ); - - // get the starting block of the chain tip's tenure - let Ok(Some(chain_tip_tenure_start)) = NakamotoChainState::get_tenure_start_block_header( - &mut self.chainstate.index_conn(), - &chain_tip_header.index_block_hash(), - &chain_tip_header.consensus_hash, - ) else { - warn!("Failure getting the first block of tenure in order to assemble block commit"; - "tenure_consensus_hash" => %chain_tip_header.consensus_hash, - "tip_block_hash" => %chain_tip_header.anchored_header.block_hash()); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + + // check stacks and sortition tips to see if any chainstate change has happened. + // did our view of the sortition history change? + // if so, then let's try and confirm the highest tenure so far. + let burnchain_changed = self.last_committed + .as_ref() + .map(|cmt| cmt.get_burn_view_consensus_hash() != &sort_tip.consensus_hash) + .unwrap_or(true); + + // did our view of the highest ongoing tenure change? + // e.g. did we process blocks in an ancestral tenure that previously was empty? + // if so, then we will need to send (more likely, RBF) a block-commit to confirm it. + let (ongoing_tenure_consensus_hash, tenure_epoch_id) = if let Some(ongoing_tenure) = NakamotoChainState::get_ongoing_tenure(&mut self.chainstate.index_conn(), &stacks_tip) + .map_err(|e| { + error!("Failed to get ongoing tenure off of {}: {:?}", &stacks_tip, &e); + e + }) + .ok() + .flatten() + { + (ongoing_tenure.tenure_id_consensus_hash, StacksEpochId::Epoch30) + } + else if let Some(header) = StacksChainState::get_stacks_block_header_info_by_index_block_hash(self.chainstate.db(), &stacks_tip) + .map_err(|e| { + error!("Failed to get stacks 2.x block header for {}: {:?}", &stacks_tip, &e); + e + }) + .ok() + .flatten() + { + (header.consensus_hash, StacksEpochId::Epoch25) + } + else { + error!("Could not deduce ongoing tenure"); return None; }; - debug!( - "Relayer: tenure-start block header is {}/{} ({})", - &chain_tip_tenure_start.consensus_hash, - &chain_tip_tenure_start.anchored_header.block_hash(), - &chain_tip_tenure_start.index_block_hash() - ); - - let chain_tip_tenure_id = chain_tip_tenure_start.index_block_hash(); - let should_commit = burnchain_changed - || if let Some((_, last_committed_tenure_id)) = self.last_committed.as_ref() { - // if the tenure ID of the chain tip has changed, issue a new commit - last_committed_tenure_id != &chain_tip_tenure_id - } else { - // should be unreachable, but either way, if - // `self.last_committed` is None, we should issue a commit - true - }; - - if should_commit { - Some(RelayerDirective::IssueBlockCommit( - chain_tip_tenure_start.consensus_hash, - chain_tip_tenure_start.anchored_header.block_hash(), - )) - } else { - None + let highest_tenure_changed = self.last_committed + .as_ref() + .map(|cmt| cmt.get_tenure_id() != &ongoing_tenure_consensus_hash) + .unwrap_or(true); + + debug!("Relayer: initiative to commit"; + "sortititon tip" => %sort_tip.consensus_hash, + "stacks tip" => %stacks_tip, + "last-commit burn view" => %self.last_committed.as_ref().map(|cmt| cmt.get_burn_view_consensus_hash().to_string()).unwrap_or("(not set)".to_string()), + "ongoing tenure" => %ongoing_tenure_consensus_hash, + "last-commit ongoing tenure" => %self.last_committed.as_ref().map(|cmt| cmt.get_tenure_id().to_string()).unwrap_or("(not set)".to_string()), + "tenure epoch" => %tenure_epoch_id, + "burnchain view changed?" => %burnchain_changed, + "highest tenure changed?" => %highest_tenure_changed); + + if !burnchain_changed && !highest_tenure_changed { + // nothing to do + return None; } + + // burnchain view or highest-tenure view changed, so we need to send (or RBF) a commit + Some(RelayerDirective::IssueBlockCommit( + stacks_tip_ch, + stacks_tip_bh + )) } /// Main loop of the relayer. From c70df54fa62fc8ab3a31e1acdf3390ca876e1865 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:52:48 -0400 Subject: [PATCH 0448/1400] chore: fix follower_bootup --- .../src/tests/nakamoto_integrations.rs | 52 ++++++++++++------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8428937d6e..64de79b093 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2679,7 +2679,6 @@ fn follower_bootup() { .unwrap(); let mut last_tip = BlockHeaderHash([0x00; 32]); - let mut last_tip_height = 0; let mut last_nonce = None; debug!( @@ -2688,15 +2687,20 @@ fn follower_bootup() { ); // mine the interim blocks - for interim_block_ix in 0..inter_blocks_per_tenure { + for _ in 0..inter_blocks_per_tenure { let blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - // submit a tx so that the miner will mine an extra block - let Ok(account) = get_account_result(&http_origin, &sender_addr) else { - thread::sleep(Duration::from_millis(100)); - continue; + + let account = loop { + // submit a tx so that the miner will mine an extra block + let Ok(account) = get_account_result(&http_origin, &sender_addr) else { + debug!("follower_bootup: Failed to load miner account"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + break account; }; let sender_nonce = account @@ -2746,6 +2750,7 @@ fn follower_bootup() { .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { break; } @@ -2754,21 +2759,29 @@ fn follower_bootup() { thread::sleep(Duration::from_millis(100)); } - debug!("follower_bootup: Follower advanced to miner tip"); - - let Ok(info) = get_chain_info_result(&naka_conf) else { - debug!("follower_bootup: failed to load tip info"); - thread::sleep(Duration::from_millis(100)); - continue; - }; + // compare chain tips + loop { + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: failed to load tip info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; - /* - assert_ne!(info.stacks_tip, last_tip); - assert_ne!(info.stacks_tip_height, last_tip_height); - */ + let Ok(follower_info) = get_chain_info_result(&follower_conf) else { + debug!("follower_bootup: Could not get follower chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + if info.stacks_tip == follower_info.stacks_tip { + debug!("follower_bootup: Follower has advanced to miner's tip {}", &info.stacks_tip); + } + else { + debug!("follower_bootup: Follower has NOT advanced to miner's tip: {} != {}", &info.stacks_tip, follower_info.stacks_tip); + } - last_tip = info.stacks_tip; - last_tip_height = info.stacks_tip_height; + last_tip = info.stacks_tip; + break; + } } debug!("follower_bootup: Wait for next block-commit"); @@ -3430,6 +3443,7 @@ fn forked_tenure_is_ignored() { let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in Tenure C to mine a second block"); while mined_blocks.load(Ordering::SeqCst) <= blocks_before { assert!( From 0cdfa5493fa520bc1e9d9837f5303845136cfd17 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 1 Jul 2024 15:03:51 -0400 Subject: [PATCH 0449/1400] Fix number of signatures to check against in mine 2 nakamoto tenures Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c1d6169f7d..0f5387bc39 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -130,7 +130,7 @@ impl SignerTest { info!("Got {} signatures", signature.len()); - assert_eq!(signature.len(), num_signers); + assert!(signature.len() >= num_signers / 7 * 10); let reward_cycle = self.get_current_reward_cycle(); let signers = self.get_reward_set_signers(reward_cycle); From b9f2f5f1c54949382175a3390510bf2730047ba9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 1 Jul 2024 17:55:28 -0400 Subject: [PATCH 0450/1400] chore: fmt --- .../stacks-node/src/nakamoto_node/miner.rs | 116 ++++++---- .../stacks-node/src/nakamoto_node/relayer.rs | 200 ++++++++++++------ .../src/tests/nakamoto_integrations.rs | 13 +- 3 files changed, 212 insertions(+), 117 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 25ec73924f..4f04084dc5 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -38,10 +38,9 @@ use stacks::chainstate::stacks::{ TenureChangeCause, TenureChangePayload, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; -use stacks::net::StacksMessageType; -use stacks::net::stackerdb::StackerDBs; use stacks::net::p2p::NetworkHandle; -use stacks::net::NakamotoBlocksData; +use stacks::net::stackerdb::StackerDBs; +use stacks::net::{NakamotoBlocksData, StacksMessageType}; use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -53,9 +52,9 @@ use super::relayer::RelayerThread; use super::sign_coordinator::SignCoordinator; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; +use crate::neon_node; use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; -use crate::neon_node; #[cfg(test)] lazy_static::lazy_static! { @@ -172,7 +171,7 @@ impl BlockMinerThread { event_dispatcher: rt.event_dispatcher.clone(), parent_tenure_id, reason, - p2p_handle: rt.get_p2p_handle() + p2p_handle: rt.get_p2p_handle(), } } @@ -582,7 +581,12 @@ impl BlockMinerThread { // forward to p2p thread let block_id = block.block_id(); - if let Err(e) = self.p2p_handle.broadcast_message(vec![], StacksMessageType::NakamotoBlocks(NakamotoBlocksData { blocks: vec![block] })) { + if let Err(e) = self.p2p_handle.broadcast_message( + vec![], + StacksMessageType::NakamotoBlocks(NakamotoBlocksData { + blocks: vec![block], + }), + ) { warn!("Failed to broadcast blocok {}: {:?}", &block_id, &e); } Ok(()) @@ -678,32 +682,50 @@ impl BlockMinerThread { // 2. The highest block in the current tenure's parent tenure // Where the current tenure's parent tenure is the tenure start block committed to in the current tenure's associated block commit. let stacks_tip_header = if let Some(block) = self.mined_blocks.last() { - test_debug!("Stacks block parent ID is last mined block {}", &block.block_id()); - let header_info = NakamotoChainState::get_block_header(chain_state.db(), &block.block_id()) - .map_err(|e| { - error!("Could not query header info for last-mined block ID {}: {:?}", &block.block_id(), &e); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("No header info for last-mined block ID {}", &block.block_id()); - NakamotoNodeError::ParentNotFound - })?; + test_debug!( + "Stacks block parent ID is last mined block {}", + &block.block_id() + ); + let header_info = + NakamotoChainState::get_block_header(chain_state.db(), &block.block_id()) + .map_err(|e| { + error!( + "Could not query header info for last-mined block ID {}: {:?}", + &block.block_id(), + &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for last-mined block ID {}", + &block.block_id() + ); + NakamotoNodeError::ParentNotFound + })?; header_info } else { - test_debug!("Stacks block parent ID is last block in parent tenure ID {}", &self.parent_tenure_id); - let parent_tenure_header = NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) - .map_err(|e| { - error!("Could not query header for parent tenure ID {}: {:?}", &self.parent_tenure_id, &e); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("No header for parent tenure ID {}", &self.parent_tenure_id); - NakamotoNodeError::ParentNotFound - })?; + test_debug!( + "Stacks block parent ID is last block in parent tenure ID {}", + &self.parent_tenure_id + ); + let parent_tenure_header = + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header for parent tenure ID {}: {:?}", + &self.parent_tenure_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!("No header for parent tenure ID {}", &self.parent_tenure_id); + NakamotoNodeError::ParentNotFound + })?; - let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()) - .map_err(|e| { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { error!("Failed to load canonical Stacks tip: {:?}", &e); NakamotoNodeError::ParentNotFound })?; @@ -718,29 +740,37 @@ impl BlockMinerThread { .map_err(|e| { error!("Could not query parent tenure finish block: {:?}", &e); NakamotoNodeError::ParentNotFound - })? - { + })? { header - } - else { + } else { // this is an epoch2 block - debug!("Stacks block parent ID may be an epoch2x block: {}", &self.parent_tenure_id); - let header = NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) - .map_err(|e| { - error!("Could not query header info for epoch2x tenure block ID {}: {:?}", &self.parent_tenure_id, &e); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("No header info for epoch2x tenure block ID {}", &self.parent_tenure_id); - NakamotoNodeError::ParentNotFound - })?; + debug!( + "Stacks block parent ID may be an epoch2x block: {}", + &self.parent_tenure_id + ); + let header = + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header info for epoch2x tenure block ID {}: {:?}", + &self.parent_tenure_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for epoch2x tenure block ID {}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })?; header }; last_tenure_finish_block_header }; - + test_debug!( "Miner: stacks tip parent header is {} {:?}", &stacks_tip_header.index_block_hash(), diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 5f03e0b15c..d8d3454c9a 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -48,7 +48,7 @@ use stacks_common::types::chainstate::{ use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::Hash160; -use stacks_common::util::vrf::{VRFPublicKey}; +use stacks_common::util::vrf::VRFPublicKey; use super::miner::MinerReason; use super::{ @@ -118,7 +118,14 @@ pub struct LastCommit { } impl LastCommit { - pub fn new(commit: LeaderBlockCommitOp, burn_tip: BlockSnapshot, stacks_tip: StacksBlockId, tenure_consensus_hash: ConsensusHash, start_block_hash: BlockHeaderHash, epoch_id: StacksEpochId) -> Self { + pub fn new( + commit: LeaderBlockCommitOp, + burn_tip: BlockSnapshot, + stacks_tip: StacksBlockId, + tenure_consensus_hash: ConsensusHash, + start_block_hash: BlockHeaderHash, + epoch_id: StacksEpochId, + ) -> Self { Self { block_commit: commit, burn_tip, @@ -159,7 +166,7 @@ impl LastCommit { pub fn get_epoch_id(&self) -> &StacksEpochId { &self.epoch_id } - + /// What's the tenure-start block ID of the tenure this block-commit confirms? pub fn get_tenure_start_block_id(&self) -> StacksBlockId { StacksBlockId::new(&self.tenure_consensus_hash, &self.start_block_hash) @@ -176,7 +183,6 @@ impl LastCommit { } } - /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -389,35 +395,43 @@ impl RelayerThread { consensus_hash: ConsensusHash, burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, - ) -> Result { + ) -> Result { let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) .expect("FATAL: failed to query sortition DB") .expect("FATAL: unknown consensus hash"); - - let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) - .map_err(|e| { + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).map_err(|e| { error!("Failed to load canonical stacks tip: {:?}", &e); NakamotoNodeError::ParentNotFound })?; let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - - let ongoing_tenure_consensus_hash = if let Some(ongoing_tenure) = NakamotoChainState::get_ongoing_tenure(&mut self.chainstate.index_conn(), &stacks_tip) - .map_err(|e| { - error!("Failed to get ongoing tenure off of {}: {:?}", &stacks_tip, &e); + + let ongoing_tenure_consensus_hash = if let Some(ongoing_tenure) = + NakamotoChainState::get_ongoing_tenure(&mut self.chainstate.index_conn(), &stacks_tip) + .map_err(|e| { + error!( + "Failed to get ongoing tenure off of {}: {:?}", + &stacks_tip, &e + ); NakamotoNodeError::ParentNotFound - })? - { + })? { ongoing_tenure.tenure_id_consensus_hash - } - else if let Some(header) = StacksChainState::get_stacks_block_header_info_by_index_block_hash(self.chainstate.db(), &stacks_tip) + } else if let Some(header) = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + self.chainstate.db(), + &stacks_tip, + ) .map_err(|e| { - error!("Failed to get stacks 2.x block header for {}: {:?}", &stacks_tip, &e); + error!( + "Failed to get stacks 2.x block header for {}: {:?}", + &stacks_tip, &e + ); NakamotoNodeError::ParentNotFound })? { header.consensus_hash - } - else { + } else { error!("Could not deduce ongoing tenure"); return Err(NakamotoNodeError::ParentNotFound); }; @@ -425,14 +439,20 @@ impl RelayerThread { let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), &stacks_tip, - &ongoing_tenure_consensus_hash + &ongoing_tenure_consensus_hash, ) .map_err(|e| { - error!("Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", &stacks_tip, &e); + error!( + "Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", + &stacks_tip, &e + ); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { - error!("Relayer: Failed to find tenure-start block header for stacks tip {}", &stacks_tip); + error!( + "Relayer: Failed to find tenure-start block header for stacks tip {}", + &stacks_tip + ); NakamotoNodeError::ParentNotFound })?; @@ -543,7 +563,7 @@ impl RelayerThread { .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; let stacks_tip = StacksBlockId::new(tip_block_ch, tip_block_bh); - + // sanity check -- this block must exist and have been processed locally let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), @@ -551,25 +571,41 @@ impl RelayerThread { &tip_block_ch, ) .map_err(|e| { - error!("Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", &stacks_tip, &e); + error!( + "Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", + &stacks_tip, &e + ); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { - error!("Relayer: Failed to find tenure-start block header for stacks tip {}", &stacks_tip); + error!( + "Relayer: Failed to find tenure-start block header for stacks tip {}", + &stacks_tip + ); NakamotoNodeError::ParentNotFound })?; // load the VRF proof generated in this tenure, so we can use it to seed the VRF in the // upcoming tenure. This may be an epoch2x VRF proof. - let tip_vrf_proof = NakamotoChainState::get_block_vrf_proof(&mut self.chainstate.index_conn(), &stacks_tip, tip_block_ch) - .map_err(|e| { - error!("Failed to load VRF proof for {} off of {}: {:?}", tip_block_ch, &stacks_tip, &e); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("No block VRF proof for {} off of {}", tip_block_ch, &stacks_tip); - NakamotoNodeError::ParentNotFound - })?; + let tip_vrf_proof = NakamotoChainState::get_block_vrf_proof( + &mut self.chainstate.index_conn(), + &stacks_tip, + tip_block_ch, + ) + .map_err(|e| { + error!( + "Failed to load VRF proof for {} off of {}: {:?}", + tip_block_ch, &stacks_tip, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No block VRF proof for {} off of {}", + tip_block_ch, &stacks_tip + ); + NakamotoNodeError::ParentNotFound + })?; // let's figure out the recipient set! let recipients = get_nakamoto_next_recipients( @@ -583,11 +619,13 @@ impl RelayerThread { error!("Relayer: Failure fetching recipient set: {:?}", e); NakamotoNodeError::SnapshotNotFoundForChainTip })?; - - let commit_outs = if self.burnchain.is_in_prepare_phase(sort_tip.block_height + 1) { + + let commit_outs = if self + .burnchain + .is_in_prepare_phase(sort_tip.block_height + 1) + { vec![PoxAddress::standard_burn_address(self.config.is_mainnet())] - } - else { + } else { RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) }; @@ -601,7 +639,7 @@ impl RelayerThread { error!("Relayer: Failed to lookup the block snapshot of highest tenure ID"; "tenure_consensus_hash" => %tip_block_ch); return Err(NakamotoNodeError::ParentNotFound); }; - + // find the parent block-commit of this commit let commit_parent_block_burn_height = tip_tenure_sortition.block_height; let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( @@ -652,7 +690,9 @@ impl RelayerThread { // block-commits in Nakamoto commit to the ongoing tenure's tenure-start block (which, // when processed, become the start-block of the tenure atop which this miner will // produce blocks) - block_header_hash: BlockHeaderHash(highest_tenure_start_block_header.index_block_hash().0), + block_header_hash: BlockHeaderHash( + highest_tenure_start_block_header.index_block_hash().0, + ), // the rest of this is the same as epoch2x commits, modulo the new epoch marker burn_fee: burn_fee_cap, apparent_sender: sender, @@ -681,8 +721,11 @@ impl RelayerThread { sort_tip, stacks_tip, highest_tenure_start_block_header.consensus_hash, - highest_tenure_start_block_header.anchored_header.block_hash(), - target_epoch.epoch_id)) + highest_tenure_start_block_header + .anchored_header + .block_hash(), + target_epoch.epoch_id, + )) } /// Create the block miner thread state. @@ -894,12 +937,13 @@ impl RelayerThread { burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> bool { - let miner_instruction = match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) { - Ok(mi) => mi, - Err(_) => { - return false; - } - }; + let miner_instruction = + match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) { + Ok(mi) => mi, + Err(_) => { + return false; + } + }; match miner_instruction { MinerDirective::BeginTenure { @@ -963,7 +1007,9 @@ impl RelayerThread { .bitcoin_controller .submit_operation( last_committed.get_epoch_id().clone(), - BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()), + BlockstackOperationType::LeaderBlockCommit( + last_committed.get_block_commit().clone(), + ), &mut op_signer, 1, ) @@ -1012,10 +1058,10 @@ impl RelayerThread { } LeaderKeyRegistrationState::Active(_) => {} }; - + // load up canonical sortition and stacks tips - let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) - .map_err(|e| { + let Ok(sort_tip) = + SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).map_err(|e| { error!("Failed to load canonical sortition tip: {:?}", &e); e }) @@ -1024,8 +1070,8 @@ impl RelayerThread { }; // NOTE: this may be an epoch2x tip - let Ok((stacks_tip_ch, stacks_tip_bh)) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) - .map_err(|e| { + let Ok((stacks_tip_ch, stacks_tip_bh)) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).map_err(|e| { error!("Failed to load canonical stacks tip: {:?}", &e); e }) @@ -1037,7 +1083,8 @@ impl RelayerThread { // check stacks and sortition tips to see if any chainstate change has happened. // did our view of the sortition history change? // if so, then let's try and confirm the highest tenure so far. - let burnchain_changed = self.last_committed + let burnchain_changed = self + .last_committed .as_ref() .map(|cmt| cmt.get_burn_view_consensus_hash() != &sort_tip.consensus_hash) .unwrap_or(true); @@ -1045,32 +1092,45 @@ impl RelayerThread { // did our view of the highest ongoing tenure change? // e.g. did we process blocks in an ancestral tenure that previously was empty? // if so, then we will need to send (more likely, RBF) a block-commit to confirm it. - let (ongoing_tenure_consensus_hash, tenure_epoch_id) = if let Some(ongoing_tenure) = NakamotoChainState::get_ongoing_tenure(&mut self.chainstate.index_conn(), &stacks_tip) - .map_err(|e| { - error!("Failed to get ongoing tenure off of {}: {:?}", &stacks_tip, &e); - e - }) - .ok() - .flatten() + let (ongoing_tenure_consensus_hash, tenure_epoch_id) = if let Some(ongoing_tenure) = + NakamotoChainState::get_ongoing_tenure(&mut self.chainstate.index_conn(), &stacks_tip) + .map_err(|e| { + error!( + "Failed to get ongoing tenure off of {}: {:?}", + &stacks_tip, &e + ); + e + }) + .ok() + .flatten() { - (ongoing_tenure.tenure_id_consensus_hash, StacksEpochId::Epoch30) - } - else if let Some(header) = StacksChainState::get_stacks_block_header_info_by_index_block_hash(self.chainstate.db(), &stacks_tip) + ( + ongoing_tenure.tenure_id_consensus_hash, + StacksEpochId::Epoch30, + ) + } else if let Some(header) = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + self.chainstate.db(), + &stacks_tip, + ) .map_err(|e| { - error!("Failed to get stacks 2.x block header for {}: {:?}", &stacks_tip, &e); + error!( + "Failed to get stacks 2.x block header for {}: {:?}", + &stacks_tip, &e + ); e }) .ok() .flatten() { (header.consensus_hash, StacksEpochId::Epoch25) - } - else { + } else { error!("Could not deduce ongoing tenure"); return None; }; - let highest_tenure_changed = self.last_committed + let highest_tenure_changed = self + .last_committed .as_ref() .map(|cmt| cmt.get_tenure_id() != &ongoing_tenure_consensus_hash) .unwrap_or(true); @@ -1093,7 +1153,7 @@ impl RelayerThread { // burnchain view or highest-tenure view changed, so we need to send (or RBF) a commit Some(RelayerDirective::IssueBlockCommit( stacks_tip_ch, - stacks_tip_bh + stacks_tip_bh, )) } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 64de79b093..562e7c9fa8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2773,10 +2773,15 @@ fn follower_bootup() { continue; }; if info.stacks_tip == follower_info.stacks_tip { - debug!("follower_bootup: Follower has advanced to miner's tip {}", &info.stacks_tip); - } - else { - debug!("follower_bootup: Follower has NOT advanced to miner's tip: {} != {}", &info.stacks_tip, follower_info.stacks_tip); + debug!( + "follower_bootup: Follower has advanced to miner's tip {}", + &info.stacks_tip + ); + } else { + debug!( + "follower_bootup: Follower has NOT advanced to miner's tip: {} != {}", + &info.stacks_tip, follower_info.stacks_tip + ); } last_tip = info.stacks_tip; From 642bef7bfd24125bd8a06e8145b44d8b092f8550 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 2 Jul 2024 00:43:30 -0400 Subject: [PATCH 0451/1400] chore: address (some) failing unit tests --- .../src/chainstate/nakamoto/tests/node.rs | 83 ++++++++++--------- stackslib/src/net/mod.rs | 15 ---- stackslib/src/net/p2p.rs | 11 ++- 3 files changed, 56 insertions(+), 53 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 964779e079..d3f190de1f 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -398,12 +398,14 @@ impl TestStacksNode { /// Record the nakamoto blocks as a new tenure pub fn add_nakamoto_tenure_blocks(&mut self, tenure_blocks: Vec) { if let Some(last_tenure) = self.nakamoto_blocks.last_mut() { - // this tenure is overwriting the last tenure - if last_tenure.first().unwrap().header.consensus_hash - == tenure_blocks.first().unwrap().header.consensus_hash - { - *last_tenure = tenure_blocks; - return; + if tenure_blocks.len() > 0 { + // this tenure is overwriting the last tenure + if last_tenure.first().unwrap().header.consensus_hash + == tenure_blocks.first().unwrap().header.consensus_hash + { + *last_tenure = tenure_blocks; + return; + } } } self.nakamoto_blocks.push(tenure_blocks); @@ -761,16 +763,12 @@ impl TestStacksNode { let block_id = nakamoto_block.block_id(); - if !try_to_process { - blocks.push((nakamoto_block, size, cost)); - block_count += 1; - break; + if try_to_process { + debug!( + "Process Nakamoto block {} ({:?}", + &block_id, &nakamoto_block.header + ); } - - debug!( - "Process Nakamoto block {} ({:?}", - &block_id, &nakamoto_block.header - ); debug!( "Nakamoto block {} txs: {:?}", &block_id, &nakamoto_block.txs @@ -803,24 +801,28 @@ impl TestStacksNode { malleablized_blocks.push(block_to_store.clone()); } - let accepted = match Relayer::process_new_nakamoto_block( - &miner.burnchain, - sortdb, - &mut sort_handle, - chainstate, - &stacks_tip, - &block_to_store, - None, - NakamotoBlockObtainMethod::Pushed, - ) { - Ok(accepted) => accepted, - Err(e) => { - error!( - "Failed to process nakamoto block: {:?}\n{:?}", - &e, &nakamoto_block - ); - panic!(); + let accepted = if try_to_process { + match Relayer::process_new_nakamoto_block( + &miner.burnchain, + sortdb, + &mut sort_handle, + chainstate, + &stacks_tip, + &block_to_store, + None, + NakamotoBlockObtainMethod::Pushed, + ) { + Ok(accepted) => accepted, + Err(e) => { + error!( + "Failed to process nakamoto block: {:?}\n{:?}", + &e, &nakamoto_block + ); + panic!(); + } } + } else { + false }; if accepted { test_debug!("Accepted Nakamoto block {}", &block_to_store.block_id()); @@ -842,11 +844,18 @@ impl TestStacksNode { assert_eq!(nakamoto_chain_tip, &nakamoto_block.header); } } else { - test_debug!( - "Did NOT accept Nakamoto block {}", - &block_to_store.block_id() - ); - break; + if try_to_process { + test_debug!( + "Did NOT accept Nakamoto block {}", + &block_to_store.block_id() + ); + break; + } else { + test_debug!( + "Test will NOT process Nakamoto block {}", + &block_to_store.block_id() + ); + } } let num_sigs = block_to_store.header.signer_signature.len(); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 34e3479a17..4d3b048846 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2773,7 +2773,6 @@ pub mod test { let mut mempool = self.mempool.take().unwrap(); let indexer = self.indexer.take().unwrap(); - let old_parent_tip = self.network.parent_stacks_tip.clone(); let old_tip = self.network.stacks_tip.clone(); let ret = self.network.run( @@ -2788,10 +2787,6 @@ pub mod test { &RPCHandlerArgs::default(), ); - if self.network.stacks_tip != old_tip { - assert_eq!(self.network.parent_stacks_tip, old_tip); - } - self.sortdb = Some(sortdb); self.stacks_node = Some(stacks_node); self.mempool = Some(mempool); @@ -2858,7 +2853,6 @@ pub mod test { ); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); - let old_parent_tip = self.network.parent_stacks_tip.clone(); let old_tip = self.network.stacks_tip.clone(); let ret = self.network.run( @@ -2873,10 +2867,6 @@ pub mod test { &RPCHandlerArgs::default(), ); - if self.network.stacks_tip != old_tip { - assert_eq!(self.network.parent_stacks_tip, old_tip); - } - self.sortdb = Some(sortdb); self.stacks_node = Some(stacks_node); self.mempool = Some(mempool); @@ -2889,17 +2879,12 @@ pub mod test { let mut stacks_node = self.stacks_node.take().unwrap(); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); - let old_parent_tip = self.network.parent_stacks_tip.clone(); let old_tip = self.network.stacks_tip.clone(); self.network .refresh_burnchain_view(&indexer, &sortdb, &mut stacks_node.chainstate, false) .unwrap(); - if self.network.stacks_tip != old_tip { - assert_eq!(self.network.parent_stacks_tip, old_tip); - } - self.sortdb = Some(sortdb); self.stacks_node = Some(stacks_node); } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index ec29acb711..6cd160f9e5 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4670,7 +4670,7 @@ impl PeerNetwork { /// Load up the parent stacks tip. /// For epoch 2.x, this is the pointer to the parent block of the current stacks tip - /// For epoch 3.x, this is the pointer to the tenure-start block of the parent tenure of the + /// For epoch 3.x, this is the pointer to the _tenure-start_ block of the parent tenure of the /// current stacks tip. /// If this is the first tenure in epoch 3.x, then this is the pointer to the epoch 2.x block /// that it builds atop. @@ -4921,6 +4921,10 @@ impl PeerNetwork { Ok(tip) => tip, Err(net_error::DBError(db_error::NotFoundError)) => { // this is the first block + debug!( + "First-ever block (no parent): {:?} ({}/{})", + &new_stacks_tip_block_id, &stacks_tip_ch, &stacks_tip_bhh + ); StacksTipInfo { consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), block_hash: FIRST_STACKS_BLOCK_HASH.clone(), @@ -5086,6 +5090,11 @@ impl PeerNetwork { self.get_local_peer(), &self.stacks_tip ); + test_debug!( + "{:?}: parent canonical Stacks tip is now {:?}", + self.get_local_peer(), + &self.parent_stacks_tip + ); } Ok(ret) From 7768347f72d811e44c634cb475b2199d4c852cd6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 Jul 2024 08:05:19 -0500 Subject: [PATCH 0452/1400] test: fix threshold assertion in signer::v0 Co-authored-by: Jeff Bencin --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 04c1b1a8a8..d66751cccf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -132,7 +132,7 @@ impl SignerTest { // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. - assert!(signature.len() >= num_signers / 7 * 10); + assert!(signature.len() >= num_signers * 7 / 10); let reward_cycle = self.get_current_reward_cycle(); let signers = self.get_reward_set_signers(reward_cycle); From 8ea1a8a4c1363bff654a4abe86731243423a9621 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 Jul 2024 08:28:56 -0500 Subject: [PATCH 0453/1400] chore: add test assertion comment --- testnet/stacks-node/src/tests/signer/v0.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d66751cccf..a837365dad 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -141,6 +141,8 @@ impl SignerTest { let mut signer_index = 0; let mut signature_index = 0; let validated = loop { + // Since we've already checked `signature.len()`, this means we've + // validated all the signatures in this loop let Some(signature) = signature.get(signature_index) else { break true; }; From d05c20b1f6d19ae6105f791388f2d43fd5970606 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 1 Jul 2024 13:12:01 -0400 Subject: [PATCH 0454/1400] Add verify-vote and generate-vote cli commands for voting on SIPs Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 + stacks-signer/Cargo.toml | 1 + stacks-signer/src/cli.rs | 107 ++++++++++++++++++++++++++++++++++++ stacks-signer/src/main.rs | 113 +++++++++++++++++++++++++++++++++++++- 4 files changed, 219 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86c318f3b6..0eb36fe701 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3469,6 +3469,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_stacker", + "sha2 0.10.8", "slog", "slog-json", "slog-term", diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 087a0a4472..5fd7eefde1 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -30,6 +30,7 @@ libstackerdb = { path = "../libstackerdb" } prometheus = { version = "0.9", optional = true } rand_core = "0.6" reqwest = { version = "0.11.22", default-features = false, features = ["blocking", "json", "rustls-tls"] } +sha2 = "0.10" serde = "1" serde_derive = "1" serde_stacker = "0.1" diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 29e1e5f232..7f9f693a36 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -19,7 +19,12 @@ use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; use blockstack_lib::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; use clap::{ArgAction, Parser, ValueEnum}; +use clarity::types::chainstate::StacksPublicKey; +use clarity::types::{PrivateKey, PublicKey}; +use clarity::util::hash::Sha512Trunc256Sum; +use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::QualifiedContractIdentifier; +use sha2::{Digest, Sha512_256}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, @@ -55,6 +60,10 @@ pub enum Command { GenerateStackingSignature(GenerateStackingSignatureArgs), /// Check a configuration file and output config information CheckConfig(RunSignerArgs), + /// Vote for a specified SIP with a yes or no vote + GenerateVote(GenerateVoteArgs), + /// Verify the vote for a specified SIP against a public key and vote info + VerifyVote(VerifyVoteArgs), } /// Basic arguments for all cyrptographic and stacker-db functionality @@ -123,6 +132,89 @@ pub struct RunSignerArgs { pub config: PathBuf, } +#[derive(Parser, Debug, Clone, Copy)] +/// Arguments for the Vote command +pub struct GenerateVoteArgs { + /// The Stacks private key to use in hexademical format + #[arg(short, long, value_parser = parse_private_key)] + pub private_key: StacksPrivateKey, + /// The vote info being cast + #[clap(flatten)] + pub vote_info: VoteInfo, +} + +#[derive(Parser, Debug, Clone, Copy)] +/// Arguments for the VerifyVote command +pub struct VerifyVoteArgs { + /// The Stacks public key to verify against + #[arg(short, long, value_parser = parse_public_key)] + pub public_key: StacksPublicKey, + /// The message signature in hexadecimal format + #[arg(short, long, value_parser = parse_message_signature)] + pub signature: MessageSignature, + /// The vote info being verified + #[clap(flatten)] + pub vote_info: VoteInfo, +} + +#[derive(Parser, Debug, Clone, Copy)] +/// Information about a SIP vote +pub struct VoteInfo { + /// The SIP number to vote on + #[arg(long)] + pub sip: u32, + /// The vote to cast + #[arg(long, value_parser = parse_vote)] + pub vote: Vote, +} + +impl VoteInfo { + /// Get the digest to sign that authenticates this vote data + fn digest(&self) -> Sha512Trunc256Sum { + let mut hasher = Sha512_256::new(); + hasher.update(&self.sip.to_be_bytes()); + hasher.update((self.vote as u8).to_be_bytes()); + Sha512Trunc256Sum::from_hasher(hasher) + } + + /// Sign the vote data and return the signature + pub fn sign(&self, private_key: &StacksPrivateKey) -> Result { + let digest = self.digest(); + private_key.sign(digest.as_bytes()) + } + + /// Verify the vote data against the provided public key and signature + pub fn verify( + &self, + public_key: &StacksPublicKey, + signature: &MessageSignature, + ) -> Result { + let digest = self.digest(); + public_key.verify(digest.as_bytes(), signature) + } +} + +#[derive(Debug, Clone, Copy)] +#[repr(u8)] +/// A given vote for a SIP +pub enum Vote { + /// Vote yes + Yes, + /// Vote no + No, +} + +impl TryFrom<&str> for Vote { + type Error = String; + fn try_from(input: &str) -> Result { + match input.to_lowercase().as_str() { + "yes" => Ok(Vote::Yes), + "no" => Ok(Vote::No), + _ => Err(format!("Invalid vote: {}. Must be `yes` or `no`.", input)), + } + } +} + #[derive(Clone, Debug, PartialEq)] /// Wrapper around `Pox4SignatureTopic` to implement `ValueEnum` pub struct StackingSignatureMethod(Pox4SignatureTopic); @@ -233,6 +325,21 @@ fn parse_private_key(private_key: &str) -> Result { StacksPrivateKey::from_hex(private_key).map_err(|e| format!("Invalid private key: {}", e)) } +/// Parse the hexadecimal Stacks public key +fn parse_public_key(public_key: &str) -> Result { + StacksPublicKey::from_hex(public_key).map_err(|e| format!("Invalid public key: {}", e)) +} + +/// Parse the vote +fn parse_vote(vote: &str) -> Result { + Vote::try_from(vote) +} + +/// Parse the hexadecimal encoded message signature +fn parse_message_signature(signature: &str) -> Result { + MessageSignature::from_hex(signature).map_err(|e| format!("Invalid message signature: {}", e)) +} + /// Parse the input data fn parse_data(data: &str) -> Result, String> { let encoded_data = if data == "-" { diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index dcabfffc08..15998b098a 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -39,8 +39,8 @@ use stacks_common::debug; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; use stacks_signer::cli::{ - Cli, Command, GenerateStackingSignatureArgs, GetChunkArgs, GetLatestChunkArgs, PutChunkArgs, - RunSignerArgs, StackerDBArgs, + Cli, Command, GenerateStackingSignatureArgs, GenerateVoteArgs, GetChunkArgs, + GetLatestChunkArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, }; use stacks_signer::config::GlobalConfig; use stacks_signer::v0::SpawnedSigner; @@ -164,6 +164,29 @@ fn handle_check_config(args: RunSignerArgs) { println!("Config: {}", config); } +fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { + let message_signature = args.vote_info.sign(&args.private_key).unwrap(); + if do_print { + println!("{}", to_hex(message_signature.as_bytes())); + } + message_signature +} + +fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { + let valid_vote = args + .vote_info + .verify(&args.public_key, &args.signature) + .unwrap(); + if do_print { + if valid_vote { + println!("Valid vote"); + } else { + println!("Invalid vote"); + } + } + valid_vote +} + fn main() { let cli = Cli::parse(); @@ -194,6 +217,12 @@ fn main() { Command::CheckConfig(args) => { handle_check_config(args); } + Command::GenerateVote(args) => { + handle_generate_vote(args, true); + } + Command::VerifyVote(args) => { + handle_verify_vote(args, true); + } } } @@ -204,11 +233,13 @@ pub mod tests { use blockstack_lib::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_message_hash, Pox4SignatureTopic, }; + use clarity::util::secp256k1::Secp256k1PrivateKey; use clarity::vm::{execute_v2, Value}; + use rand::RngCore; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::PublicKey; use stacks_common::util::secp256k1::Secp256k1PublicKey; - use stacks_signer::cli::parse_pox_addr; + use stacks_signer::cli::{parse_pox_addr, VerifyVoteArgs, Vote, VoteInfo}; use super::{handle_generate_stacking_signature, *}; use crate::{GenerateStackingSignatureArgs, GlobalConfig}; @@ -338,4 +369,80 @@ pub mod tests { assert!(verify_result.is_ok()); assert!(verify_result.unwrap()); } + + #[test] + fn test_vote() { + let mut rand = rand::thread_rng(); + let private_key = Secp256k1PrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); + let vote_info = VoteInfo { + vote: Vote::No, + sip: rand.next_u32(), + }; + let args = GenerateVoteArgs { + private_key, + vote_info, + }; + let message_signature = handle_generate_vote(args, false); + assert!( + args.vote_info + .verify(&public_key, &message_signature) + .unwrap(), + "Vote should be valid" + ); + } + + #[test] + fn test_verify_vote() { + let mut rand = rand::thread_rng(); + let private_key = Secp256k1PrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); + + let invalid_private_key = Secp256k1PrivateKey::new(); + let invalid_public_key = StacksPublicKey::from_private(&invalid_private_key); + + let sip = rand.next_u32(); + let vote_info = VoteInfo { + vote: Vote::No, + sip, + }; + + let args = VerifyVoteArgs { + public_key, + signature: vote_info.sign(&private_key).unwrap(), + vote_info, + }; + let valid = handle_verify_vote(args, false); + assert!(valid, "Vote should be valid"); + + let args = VerifyVoteArgs { + public_key: invalid_public_key, + signature: vote_info.sign(&private_key).unwrap(), // Invalid corresponding public key + vote_info, + }; + let valid = handle_verify_vote(args, false); + assert!(!valid, "Vote should be invalid"); + + let args = VerifyVoteArgs { + public_key, + signature: vote_info.sign(&private_key).unwrap(), + vote_info: VoteInfo { + vote: Vote::Yes, // Invalid vote + sip, + }, + }; + let valid = handle_verify_vote(args, false); + assert!(!valid, "Vote should be invalid"); + + let args = VerifyVoteArgs { + public_key, + signature: vote_info.sign(&private_key).unwrap(), + vote_info: VoteInfo { + vote: Vote::No, + sip: sip.wrapping_add(1), // Invalid sip number + }, + }; + let valid = handle_verify_vote(args, false); + assert!(!valid, "Vote should be invalid"); + } } From e400ddc582a4f6f42d6500dbf6cdb710e29a74fa Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 Jul 2024 12:03:09 -0500 Subject: [PATCH 0455/1400] chore: improve code comments --- stacks-signer/src/client/stackerdb.rs | 5 +++-- stackslib/src/net/api/postblock_v3.rs | 2 +- .../stacks-node/src/nakamoto_node/sign_coordinator.rs | 10 ++++++---- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 499bdddd5f..7303d3f967 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -290,12 +290,13 @@ mod tests { }; let mock_server = mock_server_from_config(&config); debug!("Spawning msg sender"); - let h = spawn(move || stackerdb.send_message_with_retry(signer_message).unwrap()); + let sender_thread = + spawn(move || stackerdb.send_message_with_retry(signer_message).unwrap()); let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); let payload = serde_json::to_string(&ack).expect("Failed to serialize ack"); response_bytes.extend(payload.as_bytes()); std::thread::sleep(Duration::from_millis(500)); write_response(mock_server, response_bytes.as_slice()); - assert_eq!(ack, h.join().unwrap()); + assert_eq!(ack, sender_thread.join().unwrap()); } } diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index f7f595913a..2754d4dc56 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -167,7 +167,7 @@ impl HttpResponse for RPCPostBlockRequestHandler { body: &[u8], ) -> Result { let accepted: StacksBlockAcceptedData = parse_json(preamble, body)?; - Ok(HttpResponsePayload::try_from_json(accepted)?) + HttpResponsePayload::try_from_json(accepted) } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 63f46a9557..8a6b5312b7 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -331,7 +331,8 @@ impl SignCoordinator { .expect("FATAL: tried to initialize WSTS coordinator before first burn block height") } - fn send_signers_message( + /// Send a message over the miners contract using a `Scalar` private key + fn send_miners_message_scalar( message_key: &Scalar, sortdb: &SortitionDB, tip: &BlockSnapshot, @@ -355,6 +356,7 @@ impl SignCoordinator { ) } + /// Send a message over the miners contract using a `StacksPrivateKey` pub fn send_miners_message( miner_sk: &StacksPrivateKey, sortdb: &SortitionDB, @@ -439,7 +441,7 @@ impl SignCoordinator { "Failed to start signing round in FIRE coordinator: {e:?}" )) })?; - Self::send_signers_message::( + Self::send_miners_message_scalar::( &self.message_key, sortdb, burn_tip, @@ -591,7 +593,7 @@ impl SignCoordinator { } } for msg in outbound_msgs { - match Self::send_signers_message::( + match Self::send_miners_message_scalar::( &self.message_key, sortdb, burn_tip, @@ -653,7 +655,7 @@ impl SignCoordinator { debug!("Sending block proposal message to signers"; "signer_signature_hash" => ?&block.header.signer_signature_hash().0, ); - Self::send_signers_message::( + Self::send_miners_message_scalar::( &self.message_key, sortdb, burn_tip, From 98ea0973f0c155edc3e8a79b87a27a7a9ef8f0f2 Mon Sep 17 00:00:00 2001 From: BowTiedRadone Date: Tue, 2 Jul 2024 21:12:56 +0300 Subject: [PATCH 0456/1400] Update `time@0.3.34` to `time@0.3.36` for nightly build This commit updates the `time@0.3.34` crate to the latest version. This update addresses build failures that occur when using the nightly toolchain `nightly-x86_64-unknown-linux-gnu`. --- Cargo.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86c318f3b6..8a5f63612b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3295,7 +3295,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.34", + "time 0.3.36", ] [[package]] @@ -3308,7 +3308,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.34", + "time 0.3.36", ] [[package]] @@ -3777,9 +3777,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -3789,7 +3789,7 @@ dependencies = [ "powerfmt", "serde", "time-core", - "time-macros 0.2.17", + "time-macros 0.2.18", ] [[package]] @@ -3810,9 +3810,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", From 192f758ce8945a72f4dece4fb02b668cde850ba9 Mon Sep 17 00:00:00 2001 From: maldiohead Date: Fri, 28 Jun 2024 17:44:55 +0800 Subject: [PATCH 0457/1400] [fix] replacement the abs() with unsigned_abs() Signed-off-by: maldiohead --- stacks-common/src/deps_common/bitcoin/blockdata/script.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs index 8b08ab998a..be5a6144c7 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs @@ -201,7 +201,7 @@ fn build_scriptint(n: i64) -> Vec { let neg = n < 0; - let mut abs = n.abs() as usize; + let mut abs = n.unsigned_abs() as usize; let mut v = Vec::with_capacity(size_of::() + 1); while abs > 0xFF { v.push((abs & 0xFF) as u8); From c63534fc4c6f8cdbc025fcafde2d8c865a85384a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 2 Jul 2024 15:01:00 -0400 Subject: [PATCH 0458/1400] fix: process burn ops when a tenure-change tx is found --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 424005db67..fcc7faf2b2 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3429,7 +3429,7 @@ impl NakamotoChainState { }; let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops, vote_for_agg_key_ops) = - if new_tenure { + if new_tenure || tenure_extend { StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( chainstate_tx, &parent_index_hash, From e7edcc5b082d38bb9fa2c164867456a3f2637303 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 2 Jul 2024 15:01:21 -0400 Subject: [PATCH 0459/1400] Fix clippy warnings Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 7f9f693a36..62ca06f13e 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -172,7 +172,7 @@ impl VoteInfo { /// Get the digest to sign that authenticates this vote data fn digest(&self) -> Sha512Trunc256Sum { let mut hasher = Sha512_256::new(); - hasher.update(&self.sip.to_be_bytes()); + hasher.update(self.sip.to_be_bytes()); hasher.update((self.vote as u8).to_be_bytes()); Sha512Trunc256Sum::from_hasher(hasher) } From e9f81a8dfced7ce4ca311c72e5373331ad23f8df Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 2 Jul 2024 15:01:28 -0400 Subject: [PATCH 0460/1400] fix: fix failing unit tests --- .../src/chainstate/nakamoto/tests/mod.rs | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 88f28ec60b..d8bd63cb6a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -69,7 +69,7 @@ use crate::chainstate::nakamoto::tenure::NakamotoTenure; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ - query_rows, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SortitionHandle, + query_row, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SortitionHandle, FIRST_STACKS_BLOCK_ID, }; use crate::chainstate::stacks::boot::{ @@ -98,15 +98,25 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { pub fn get_all_blocks_in_tenure( &self, tenure_id_consensus_hash: &ConsensusHash, + tip: &StacksBlockId, ) -> Result, ChainstateError> { - let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 ORDER BY height ASC"; - let args: &[&dyn ToSql] = &[tenure_id_consensus_hash]; - let block_data: Vec> = query_rows(self, qry, args)?; - let mut blocks = Vec::with_capacity(block_data.len()); - for data in block_data.into_iter() { - let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?; + let mut blocks = vec![]; + let mut cursor = tip.clone(); + let qry = "SELECT data FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; + loop { + let Some(block_data): Option> = + query_row(self, qry, rusqlite::params![&cursor])? + else { + break; + }; + let block = NakamotoBlock::consensus_deserialize(&mut block_data.as_slice())?; + if &block.header.consensus_hash != tenure_id_consensus_hash { + break; + } + cursor = block.header.parent_block_id.clone(); blocks.push(block); } + blocks.reverse(); Ok(blocks) } } @@ -873,7 +883,7 @@ pub fn test_load_store_update_nakamoto_blocks() { parent_block_id: nakamoto_header_2.block_id(), tx_merkle_root: nakamoto_tx_merkle_root_3, state_index_root: TrieHash([0x07; 32]), - timestamp: 9, + timestamp: 8, miner_signature: MessageSignature::empty(), signer_signature: vec![MessageSignature::from_bytes(&[0x01; 65]).unwrap()], pox_treatment: BitVec::zeros(1).unwrap(), From 95e37b7912a4003faa829a1c6c75eb43695717b2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 2 Jul 2024 15:02:05 -0400 Subject: [PATCH 0461/1400] fix: fix failing unit test --- stackslib/src/net/tests/download/nakamoto.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 5903f5978c..9de9fb087b 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -370,15 +370,27 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { let parent_tip_ch = peer.network.parent_stacks_tip.consensus_hash.clone(); let current_reward_sets = peer.network.current_reward_sets.clone(); + let last_block_in_confirmed_tenure = NakamotoChainState::get_highest_block_header_in_tenure( + &mut peer.chainstate().index_conn(), + &tip_block_id, + &parent_tip_ch, + ) + .unwrap() + .unwrap(); + + // NOTE: we have to account for malleablized blocks! let unconfirmed_tenure = peer .chainstate() .nakamoto_blocks_db() - .get_all_blocks_in_tenure(&tip_ch) + .get_all_blocks_in_tenure(&tip_ch, &tip_block_id) .unwrap(); let last_confirmed_tenure = peer .chainstate() .nakamoto_blocks_db() - .get_all_blocks_in_tenure(&parent_tip_ch) + .get_all_blocks_in_tenure( + &parent_tip_ch, + &last_block_in_confirmed_tenure.index_block_hash(), + ) .unwrap(); let parent_parent_header = NakamotoChainState::get_block_header_nakamoto( @@ -437,8 +449,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { .as_ref() .unwrap() .header - .parent_block_id - .clone(), + .block_id(), processed: false, burn_height: peer.network.burnchain_tip.block_height, }; From 6060b8e6ff070a7bd21fd5308e0fc1f93f7260c8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 2 Jul 2024 15:02:53 -0400 Subject: [PATCH 0462/1400] fix: use malleablized blocks --- stackslib/src/net/tests/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index e13a9ee4fc..63e19a50c7 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -1087,11 +1087,18 @@ fn test_boot_nakamoto_peer() { NakamotoBootTenure::Sortition(vec![NakamotoBootStep::Block(vec![next_stx_transfer()])]), ]; + // make malleablized blocks + let (test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); + let plan = NakamotoBootPlan::new(&function_name!()) .with_private_key(private_key) .with_pox_constants(10, 3) .with_initial_balances(vec![(addr.into(), 1_000_000)]) - .with_extra_peers(2); + .with_extra_peers(2) + .with_test_signers(test_signers) + .with_test_stackers(test_stackers); let observer = TestEventObserver::new(); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(&observer)); From e7945308a625bbec62875b59ce207934bdd9ca85 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 2 Jul 2024 15:03:07 -0400 Subject: [PATCH 0463/1400] chore: remove unused code --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index d8d3454c9a..92304c0c25 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -157,21 +157,11 @@ impl LastCommit { &self.burn_tip } - /// What's the burn view consensus hash as of this last commit? - pub fn get_burn_view_consensus_hash(&self) -> &ConsensusHash { - &self.burn_tip.consensus_hash - } - /// What's the epoch in which this was sent? pub fn get_epoch_id(&self) -> &StacksEpochId { &self.epoch_id } - /// What's the tenure-start block ID of the tenure this block-commit confirms? - pub fn get_tenure_start_block_id(&self) -> StacksBlockId { - StacksBlockId::new(&self.tenure_consensus_hash, &self.start_block_hash) - } - /// Get the tenure ID of the tenure this commit builds on pub fn get_tenure_id(&self) -> &ConsensusHash { &self.tenure_consensus_hash @@ -1086,7 +1076,7 @@ impl RelayerThread { let burnchain_changed = self .last_committed .as_ref() - .map(|cmt| cmt.get_burn_view_consensus_hash() != &sort_tip.consensus_hash) + .map(|cmt| cmt.get_burn_tip().consensus_hash != &sort_tip.consensus_hash) .unwrap_or(true); // did our view of the highest ongoing tenure change? @@ -1138,7 +1128,7 @@ impl RelayerThread { debug!("Relayer: initiative to commit"; "sortititon tip" => %sort_tip.consensus_hash, "stacks tip" => %stacks_tip, - "last-commit burn view" => %self.last_committed.as_ref().map(|cmt| cmt.get_burn_view_consensus_hash().to_string()).unwrap_or("(not set)".to_string()), + "last-commit burn view" => %self.last_committed.as_ref().map(|cmt| cmt.get_burn_tip().consensus_hash.to_string()).unwrap_or("(not set)".to_string()), "ongoing tenure" => %ongoing_tenure_consensus_hash, "last-commit ongoing tenure" => %self.last_committed.as_ref().map(|cmt| cmt.get_tenure_id().to_string()).unwrap_or("(not set)".to_string()), "tenure epoch" => %tenure_epoch_id, From 8079e4f0ea145f114dce71cd2612acd5f4d0e8d7 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 24 Jun 2024 14:48:58 -0400 Subject: [PATCH 0464/1400] feat: Add sortition state tracking and use it to check block proposals --- stacks-signer/src/lib.rs | 2 + stacks-signer/src/runloop.rs | 5 ++ stacks-signer/src/v0/signer.rs | 96 ++++++++++++++++++++++++++++------ stacks-signer/src/v1/signer.rs | 4 +- 4 files changed, 90 insertions(+), 17 deletions(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 8b0c4d26ed..47367c9bd5 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -46,6 +46,7 @@ mod tests; use std::fmt::{Debug, Display}; use std::sync::mpsc::{channel, Receiver, Sender}; +use chainstate::SortitionsView; use config::GlobalConfig; use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait}; use runloop::SignerResult; @@ -68,6 +69,7 @@ pub trait Signer: Debug + Display { fn process_event( &mut self, stacks_client: &StacksClient, + sortition_state: &mut Option, event: Option<&SignerEvent>, res: Sender>, current_reward_cycle: u64, diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 43bd7977ad..c257bb624c 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -29,6 +29,7 @@ use stacks_common::{debug, error, info, warn}; use wsts::common::MerkleRoot; use wsts::state_machine::OperationResult; +use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, StacksClient}; use crate::config::{GlobalConfig, SignerConfig}; use crate::Signer as SignerTrait; @@ -147,6 +148,8 @@ where pub commands: VecDeque, /// The current reward cycle info. Only None if the runloop is uninitialized pub current_reward_cycle_info: Option, + /// Cache sortitin data from `stacks-node` + pub sortition_state: Option, /// Phantom data for the message codec _phantom_data: std::marker::PhantomData, } @@ -162,6 +165,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo state: State::Uninitialized, commands: VecDeque::new(), current_reward_cycle_info: None, + sortition_state: None, _phantom_data: std::marker::PhantomData, } } @@ -429,6 +433,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> for signer in self.stacks_signers.values_mut() { signer.process_event( &self.stacks_client, + &mut self.sortition_state, event.as_ref(), res.clone(), current_reward_cycle, diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 962c1f623b..d94557ce63 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -19,12 +19,14 @@ use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::PrivateKey; use clarity::util::hash::MerkleHashFunc; +use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::v0::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; +use crate::chainstate::SortitionsView; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerResult}; @@ -77,6 +79,7 @@ impl SignerTrait for Signer { fn process_event( &mut self, stacks_client: &StacksClient, + sortition_state: &mut Option, event: Option<&SignerEvent>, _res: Sender>, current_reward_cycle: u64, @@ -112,7 +115,7 @@ impl SignerTrait for Signer { messages.len() ); } - SignerEvent::MinerMessages(messages, _) => { + SignerEvent::MinerMessages(messages, miner_pubkey) => { debug!( "{self}: Received {} messages from the miner", messages.len(); @@ -120,7 +123,12 @@ impl SignerTrait for Signer { for message in messages { match message { SignerMessage::BlockProposal(block_proposal) => { - self.handle_block_proposal(stacks_client, block_proposal); + self.handle_block_proposal( + stacks_client, + sortition_state, + block_proposal, + miner_pubkey, + ); } SignerMessage::BlockPushed(b) => { let block_push_result = stacks_client.post_block(&b); @@ -139,7 +147,8 @@ impl SignerTrait for Signer { debug!("{self}: Received a status check event."); } SignerEvent::NewBurnBlock(height) => { - debug!("{self}: Receved a new burn block event for block height {height}") + debug!("{self}: Receved a new burn block event for block height {height}"); + *sortition_state = None; } } } @@ -210,7 +219,9 @@ impl Signer { fn handle_block_proposal( &mut self, stacks_client: &StacksClient, + sortition_state: &mut Option, block_proposal: &BlockProposal, + miner_pubkey: &Secp256k1PublicKey, ) { debug!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { @@ -245,23 +256,76 @@ impl Signer { { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } - } else { - debug!( - "{self}: received a block proposal for a new block. Submit block for validation. "; + return; + } + + debug!( + "{self}: received a block proposal for a new block. Submit block for validation. "; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + crate::monitoring::increment_block_proposals_received(); + + // Get sortition view if we don't have it + if sortition_state.is_none() { + *sortition_state = SortitionsView::fetch_view(stacks_client) + .inspect_err(|e| { + warn!( + "{self}: Failed to update sortition view: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ) + }) + .ok(); + } + + let Some(sortition_state) = sortition_state else { + warn!( + "{self}: Cannot validate block, no sortition view"; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), ); - let block_info = BlockInfo::from(block_proposal.clone()); - crate::monitoring::increment_block_proposals_received(); - stacks_client - .submit_block_for_validation(block_info.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}",); - }); - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + return; + }; + + match sortition_state.check_proposal( + stacks_client, + &self.signer_db, + &block_proposal.block, + miner_pubkey, + ) { + // Error validating block + Err(e) => { + warn!( + "{self}: Error checking block proposal: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + return; + } + // Block proposal is bad + Ok(false) => { + warn!( + "{self}: Block proposal invalid"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + return; + } + // Block proposal is good, continue + Ok(true) => {} } + + let block_info = BlockInfo::from(block_proposal.clone()); + stacks_client + .submit_block_for_validation(block_info.block.clone()) + .unwrap_or_else(|e| { + warn!("{self}: Failed to submit block for validation: {e:?}",); + }); + + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } /// Handle the block validate response returned from our prior calls to submit a block for validation diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index e053786058..e660ce7eee 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -53,6 +53,7 @@ use wsts::traits::Signer as _; use wsts::v2; use super::stackerdb_manager::StackerDBManager; +use crate::chainstate::SortitionsView; use crate::client::{ClientError, SignerSlotID, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerCommand, SignerResult}; @@ -161,6 +162,7 @@ impl SignerTrait for Signer { fn process_event( &mut self, stacks_client: &StacksClient, + _sortition_state: &mut Option, event: Option<&SignerEvent>, res: Sender>, current_reward_cycle: u64, @@ -238,7 +240,7 @@ impl SignerTrait for Signer { debug!("{self}: Received a status check event.") } SignerEvent::NewBurnBlock(height) => { - debug!("{self}: Receved a new burn block event for block height {height}") + debug!("{self}: Receved a new burn block event for block height {height}"); } } } From 4e75a7c80afce3cef01bec56fb3196297f618032 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 26 Jun 2024 14:56:03 -0400 Subject: [PATCH 0465/1400] refactor: Don't fail block proposal validation if we don't have sortition view --- stacks-signer/src/v0/signer.rs | 72 +++++++++++++++++----------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index d94557ce63..f3d0969ee8 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -265,6 +265,7 @@ impl Signer { "block_id" => %block_proposal.block.block_id(), ); crate::monitoring::increment_block_proposals_received(); + let mut block_info = BlockInfo::from(block_proposal.clone()); // Get sortition view if we don't have it if sortition_state.is_none() { @@ -279,50 +280,49 @@ impl Signer { .ok(); } - let Some(sortition_state) = sortition_state else { + // If we have sortition state, run some additional checks + if let Some(sortition_state) = sortition_state { + match sortition_state.check_proposal( + stacks_client, + &self.signer_db, + &block_proposal.block, + miner_pubkey, + ) { + // Error validating block + Err(e) => warn!( + "{self}: Error checking block proposal: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ), + // Block proposal is bad + Ok(false) => { + warn!( + "{self}: Block proposal invalid"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + block_info.valid = Some(false); + } + // Block proposal passed check, still don't know if valid + Ok(true) => {} + } + } else { warn!( "{self}: Cannot validate block, no sortition view"; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), - ); - return; + ) }; - match sortition_state.check_proposal( - stacks_client, - &self.signer_db, - &block_proposal.block, - miner_pubkey, - ) { - // Error validating block - Err(e) => { - warn!( - "{self}: Error checking block proposal: {e:?}"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - return; - } - // Block proposal is bad - Ok(false) => { - warn!( - "{self}: Block proposal invalid"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - return; - } - // Block proposal is good, continue - Ok(true) => {} + // This is an expensive call, skip if we already know if block is valid + if block_info.valid.is_none() { + stacks_client + .submit_block_for_validation(block_info.block.clone()) + .unwrap_or_else(|e| { + warn!("{self}: Failed to submit block for validation: {e:?}",); + }); } - let block_info = BlockInfo::from(block_proposal.clone()); - stacks_client - .submit_block_for_validation(block_info.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}",); - }); - self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); From 8869e4f5dd174d1b2a5bdf6840801696f18ee60f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 28 Jun 2024 13:46:09 -0400 Subject: [PATCH 0466/1400] fix: Send reject message if proposal fails check against sortition view --- stacks-signer/src/v0/signer.rs | 57 +++++++++++++++++++++++++--------- 1 file changed, 43 insertions(+), 14 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index f3d0969ee8..f126dd63e2 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,7 +15,7 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; -use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use blockstack_lib::net::api::postblock_proposal::{BlockValidateResponse, ValidateRejectCode}; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::PrivateKey; use clarity::util::hash::MerkleHashFunc; @@ -280,8 +280,8 @@ impl Signer { .ok(); } - // If we have sortition state, run some additional checks - if let Some(sortition_state) = sortition_state { + // Check if proposal can be rejected now if not valid agains sortition view + let block_response = if let Some(sortition_state) = sortition_state { match sortition_state.check_proposal( stacks_client, &self.signer_db, @@ -289,11 +289,17 @@ impl Signer { miner_pubkey, ) { // Error validating block - Err(e) => warn!( - "{self}: Error checking block proposal: {e:?}"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ), + Err(e) => { + warn!( + "{self}: Error checking block proposal: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::ConnectivityIssues, + )) + } // Block proposal is bad Ok(false) => { warn!( @@ -301,25 +307,48 @@ impl Signer { "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), ); - block_info.valid = Some(false); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + )) } // Block proposal passed check, still don't know if valid - Ok(true) => {} + Ok(true) => None, } } else { warn!( "{self}: Cannot validate block, no sortition view"; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), - ) + ); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + )) }; - // This is an expensive call, skip if we already know if block is valid - if block_info.valid.is_none() { + if let Some(block_response) = block_response { + // We know proposal is invalid. Send rejection message, do not do further validation + block_info.valid = Some(false); + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + let res = self + .stackerdb + .send_message_with_retry::(block_response.into()); + + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + } else { + // We don't know if proposal is valid, submit to stacks-node for further checks stacks_client .submit_block_for_validation(block_info.block.clone()) .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}",); + warn!("{self}: Failed to submit block for validation: {e:?}"); }); } From e5128f930f37a176094de25b14a51ee59cada275 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 2 Jul 2024 10:24:50 -0400 Subject: [PATCH 0467/1400] fix: Don't wait for `BlockValidateResponse` message in tests --- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c2f842a613..2b8e5662cb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -262,10 +262,8 @@ fn block_proposal_rejection() { ); info!("------------------------- Test Block Proposal Rejected -------------------------"); - // Verify that the node correctly rejected the node - let proposed_signer_signature_hash = - signer_test.wait_for_validate_reject_response(short_timeout); - assert_eq!(proposed_signer_signature_hash, block_signer_signature_hash); + // Give signer time to reject block + std::thread::sleep(Duration::from_secs(3)); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, From 18702eb0802c079692a8b9262032fbeaf8bb39f4 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 2 Jul 2024 13:41:50 -0400 Subject: [PATCH 0468/1400] chore: Poll for response from signer for 30 seconds --- testnet/stacks-node/src/tests/signer/v0.rs | 48 ++++++++++++---------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2b8e5662cb..86c638c80e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -15,7 +15,7 @@ use std::env; use std::sync::atomic::Ordering; -use std::time::Duration; +use std::time::{Duration, Instant}; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ @@ -262,8 +262,6 @@ fn block_proposal_rejection() { ); info!("------------------------- Test Block Proposal Rejected -------------------------"); - // Give signer time to reject block - std::thread::sleep(Duration::from_secs(3)); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, @@ -280,24 +278,32 @@ fn block_proposal_rejection() { .collect(); assert_eq!(signer_slot_ids.len(), num_signers); - let messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::BlockResponse) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - for message in messages { - if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { - reason: _reason, - reason_code, - signer_signature_hash, - })) = message - { - assert_eq!(signer_signature_hash, block_signer_signature_hash); - assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); - } else { - panic!("Unexpected message type"); + let start_polling = Instant::now(); + 'poll: loop { + std::thread::sleep(Duration::from_secs(1)); + let messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + for message in messages { + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason: _reason, + reason_code, + signer_signature_hash, + })) = message + { + assert_eq!(signer_signature_hash, block_signer_signature_hash); + assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); + break 'poll; + } else { + panic!("Unexpected message type"); + } + } + if start_polling.elapsed() > short_timeout { + panic!("Timed out after waiting for response from signer"); } } signer_test.shutdown(); From 7811988c046172ded83494999f36b9491c2b94f9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 3 Jul 2024 08:11:22 -0400 Subject: [PATCH 0469/1400] CRC: use SIP-018 signing scheme for voting and use config private key Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - stacks-signer/Cargo.toml | 1 - stacks-signer/src/cli.rs | 56 ++++++++++++++++++--------- stacks-signer/src/client/stackerdb.rs | 2 - stacks-signer/src/main.rs | 19 ++++----- stacks-signer/src/v0/signer.rs | 2 +- 6 files changed, 49 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0eb36fe701..86c318f3b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3469,7 +3469,6 @@ dependencies = [ "serde_derive", "serde_json", "serde_stacker", - "sha2 0.10.8", "slog", "slog-json", "slog-term", diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 5fd7eefde1..087a0a4472 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -30,7 +30,6 @@ libstackerdb = { path = "../libstackerdb" } prometheus = { version = "0.9", optional = true } rand_core = "0.6" reqwest = { version = "0.11.22", default-features = false, features = ["blocking", "json", "rustls-tls"] } -sha2 = "0.10" serde = "1" serde_derive = "1" serde_stacker = "0.1" diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 62ca06f13e..24394fde45 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -18,18 +18,24 @@ use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; use blockstack_lib::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; +use blockstack_lib::util_lib::signed_structured_data::{ + make_structured_data_domain, structured_data_message_hash, +}; use clap::{ArgAction, Parser, ValueEnum}; +use clarity::consts::CHAIN_ID_MAINNET; use clarity::types::chainstate::StacksPublicKey; use clarity::types::{PrivateKey, PublicKey}; -use clarity::util::hash::Sha512Trunc256Sum; +use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; -use clarity::vm::types::QualifiedContractIdentifier; -use sha2::{Digest, Sha512_256}; +use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; +use clarity::vm::Value; +use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; +use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; extern crate alloc; @@ -132,12 +138,12 @@ pub struct RunSignerArgs { pub config: PathBuf, } -#[derive(Parser, Debug, Clone, Copy)] +#[derive(Parser, Debug, Clone)] /// Arguments for the Vote command pub struct GenerateVoteArgs { - /// The Stacks private key to use in hexademical format - #[arg(short, long, value_parser = parse_private_key)] - pub private_key: StacksPrivateKey, + /// Path to signer config file + #[arg(long, short, value_name = "FILE")] + pub config: PathBuf, /// The vote info being cast #[clap(flatten)] pub vote_info: VoteInfo, @@ -170,11 +176,15 @@ pub struct VoteInfo { impl VoteInfo { /// Get the digest to sign that authenticates this vote data - fn digest(&self) -> Sha512Trunc256Sum { - let mut hasher = Sha512_256::new(); - hasher.update(self.sip.to_be_bytes()); - hasher.update((self.vote as u8).to_be_bytes()); - Sha512Trunc256Sum::from_hasher(hasher) + fn digest(&self) -> Sha256Sum { + let vote_message = TupleData::from_data(vec![ + ("sip".into(), Value::UInt(self.sip.into())), + ("vote".into(), Value::UInt(self.vote.to_u8().into())), + ]) + .unwrap(); + let data_domain = + make_structured_data_domain("signer-sip-voting", "1.0.0", CHAIN_ID_MAINNET); + structured_data_message_hash(vote_message.into(), data_domain) } /// Sign the vote data and return the signature @@ -194,15 +204,14 @@ impl VoteInfo { } } -#[derive(Debug, Clone, Copy)] -#[repr(u8)] +define_u8_enum!( /// A given vote for a SIP -pub enum Vote { +Vote { /// Vote yes - Yes, + Yes = 0, /// Vote no - No, -} + No = 1 +}); impl TryFrom<&str> for Vote { type Error = String; @@ -215,6 +224,17 @@ impl TryFrom<&str> for Vote { } } +impl TryFrom for Vote { + type Error = String; + fn try_from(input: u8) -> Result { + match input { + 0 => Ok(Vote::Yes), + 1 => Ok(Vote::No), + _ => Err(format!("Invalid vote: {}. Must be 0 or 1.", input)), + } + } +} + #[derive(Clone, Debug, PartialEq)] /// Wrapper around `Pox4SignatureTopic` to implement `ValueEnum` pub struct StackingSignatureMethod(Pox4SignatureTopic); diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 7303d3f967..de77ccbd72 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -235,8 +235,6 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use libsigner::v0::messages::{BlockRejection, BlockResponse, RejectCode, SignerMessage}; - use libsigner::BlockProposal; - use rand::{thread_rng, RngCore}; use super::*; use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 15998b098a..81909b9fa9 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -165,7 +165,8 @@ fn handle_check_config(args: RunSignerArgs) { } fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { - let message_signature = args.vote_info.sign(&args.private_key).unwrap(); + let config = GlobalConfig::try_from(&args.config).unwrap(); + let message_signature = args.vote_info.sign(&config.stacks_private_key).unwrap(); if do_print { println!("{}", to_hex(message_signature.as_bytes())); } @@ -235,7 +236,7 @@ pub mod tests { }; use clarity::util::secp256k1::Secp256k1PrivateKey; use clarity::vm::{execute_v2, Value}; - use rand::RngCore; + use rand::{Rng, RngCore}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::PublicKey; use stacks_common::util::secp256k1::Secp256k1PublicKey; @@ -373,21 +374,21 @@ pub mod tests { #[test] fn test_vote() { let mut rand = rand::thread_rng(); - let private_key = Secp256k1PrivateKey::new(); - let public_key = StacksPublicKey::from_private(&private_key); let vote_info = VoteInfo { - vote: Vote::No, + vote: Vote::try_from(rand.gen_range(0..2)).unwrap(), sip: rand.next_u32(), }; + let config_file = "./src/tests/conf/signer-0.toml"; + let config = GlobalConfig::load_from_file(config_file).unwrap(); + let private_key = config.stacks_private_key; + let public_key = StacksPublicKey::from_private(&private_key); let args = GenerateVoteArgs { - private_key, + config: config_file.into(), vote_info, }; let message_signature = handle_generate_vote(args, false); assert!( - args.vote_info - .verify(&public_key, &message_signature) - .unwrap(), + vote_info.verify(&public_key, &message_signature).unwrap(), "Vote should be valid" ); } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 962c1f623b..f9148dbf84 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -123,7 +123,7 @@ impl SignerTrait for Signer { self.handle_block_proposal(stacks_client, block_proposal); } SignerMessage::BlockPushed(b) => { - let block_push_result = stacks_client.post_block(&b); + let block_push_result = stacks_client.post_block(b); info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), From bb2120990230d38fc38360c5a9cfa2cf52ea3e20 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 3 Jul 2024 10:42:38 -0400 Subject: [PATCH 0470/1400] CRC: use from_u8 in define_enum_u8 macro Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 8 ++------ stacks-signer/src/main.rs | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 24394fde45..3b63ebdd59 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -227,11 +227,7 @@ impl TryFrom<&str> for Vote { impl TryFrom for Vote { type Error = String; fn try_from(input: u8) -> Result { - match input { - 0 => Ok(Vote::Yes), - 1 => Ok(Vote::No), - _ => Err(format!("Invalid vote: {}. Must be 0 or 1.", input)), - } + Vote::from_u8(input).ok_or_else(|| format!("Invalid vote: {}. Must be 0 or 1.", input)) } } @@ -352,7 +348,7 @@ fn parse_public_key(public_key: &str) -> Result { /// Parse the vote fn parse_vote(vote: &str) -> Result { - Vote::try_from(vote) + vote.try_into() } /// Parse the hexadecimal encoded message signature diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 81909b9fa9..184876373b 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -375,7 +375,7 @@ pub mod tests { fn test_vote() { let mut rand = rand::thread_rng(); let vote_info = VoteInfo { - vote: Vote::try_from(rand.gen_range(0..2)).unwrap(), + vote: rand.gen_range(0..2).try_into().unwrap(), sip: rand.next_u32(), }; let config_file = "./src/tests/conf/signer-0.toml"; From 454f10be392f2303a3d2adf6bff26111ca891aab Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 3 Jul 2024 14:15:56 -0400 Subject: [PATCH 0471/1400] Modify block proposal rejection test to pass signer validation checks and enforce submitting to the node for validation Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 15 +++- stacks-signer/src/client/stackerdb.rs | 2 - stacks-signer/src/v0/signer.rs | 4 +- testnet/stacks-node/src/tests/signer/v0.rs | 80 +++++++++++++--------- 4 files changed, 62 insertions(+), 39 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index f831aa9e99..f2a7623622 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -222,7 +222,9 @@ RejectCodeTypePrefix { /// The block was rejected due to connectivity issues with the signer ConnectivityIssues = 1, /// The block was rejected in a prior round - RejectedInPriorRound = 2 + RejectedInPriorRound = 2, + /// The block was rejected due to no sortition view + NoSortitionView = 3 }); impl TryFrom for RejectCodeTypePrefix { @@ -240,6 +242,7 @@ impl From<&RejectCode> for RejectCodeTypePrefix { RejectCode::ValidationFailed(_) => RejectCodeTypePrefix::ValidationFailed, RejectCode::ConnectivityIssues => RejectCodeTypePrefix::ConnectivityIssues, RejectCode::RejectedInPriorRound => RejectCodeTypePrefix::RejectedInPriorRound, + RejectCode::NoSortitionView => RejectCodeTypePrefix::NoSortitionView, } } } @@ -249,6 +252,8 @@ impl From<&RejectCode> for RejectCodeTypePrefix { pub enum RejectCode { /// RPC endpoint Validation failed ValidationFailed(ValidateRejectCode), + /// No Sortition View to verify against + NoSortitionView, /// The block was rejected due to connectivity issues with the signer ConnectivityIssues, /// The block was rejected in a prior round @@ -420,7 +425,9 @@ impl StacksMessageCodec for RejectCode { // Do not do a single match here as we may add other variants in the future and don't want to miss adding it match self { RejectCode::ValidationFailed(code) => write_next(fd, &(*code as u8))?, - RejectCode::ConnectivityIssues | RejectCode::RejectedInPriorRound => { + RejectCode::ConnectivityIssues + | RejectCode::RejectedInPriorRound + | RejectCode::NoSortitionView => { // No additional data to serialize / deserialize } }; @@ -441,6 +448,7 @@ impl StacksMessageCodec for RejectCode { ), RejectCodeTypePrefix::ConnectivityIssues => RejectCode::ConnectivityIssues, RejectCodeTypePrefix::RejectedInPriorRound => RejectCode::RejectedInPriorRound, + RejectCodeTypePrefix::NoSortitionView => RejectCode::NoSortitionView, }; Ok(code) } @@ -459,6 +467,9 @@ impl std::fmt::Display for RejectCode { f, "The block was proposed before and rejected by the signer." ), + RejectCode::NoSortitionView => { + write!(f, "The block was rejected due to no sortition view.") + } } } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 7303d3f967..de77ccbd72 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -235,8 +235,6 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use libsigner::v0::messages::{BlockRejection, BlockResponse, RejectCode, SignerMessage}; - use libsigner::BlockProposal; - use rand::{thread_rng, RngCore}; use super::*; use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index f126dd63e2..fdaf7b59af 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -280,7 +280,7 @@ impl Signer { .ok(); } - // Check if proposal can be rejected now if not valid agains sortition view + // Check if proposal can be rejected now if not valid against sortition view let block_response = if let Some(sortition_state) = sortition_state { match sortition_state.check_proposal( stacks_client, @@ -323,7 +323,7 @@ impl Signer { ); Some(BlockResponse::rejected( block_proposal.block.header.signer_signature_hash(), - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + RejectCode::NoSortitionView, )) }; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 86c638c80e..5266a60ae5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -31,6 +31,8 @@ use stacks::types::chainstate::{StacksAddress, StacksPrivateKey}; use stacks::types::PublicKey; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; +use stacks_common::bitvec::BitVec; +use stacks_signer::chainstate::SortitionsView; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; @@ -189,6 +191,40 @@ impl SignerTest { self.mine_and_verify_confirmed_naka_block(timeout, num_signers); } } + + /// Propose an invalid block to the signers + fn propose_block(&mut self, slot_id: u32, version: u32, block: NakamotoBlock) { + let miners_contract_id = boot_code_id(MINERS_NAME, false); + let mut session = + StackerDBSession::new(&self.running_nodes.conf.node.rpc_bind, miners_contract_id); + let burn_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let reward_cycle = self.get_current_reward_cycle(); + let message = SignerMessage::BlockProposal(BlockProposal { + block, + burn_height, + reward_cycle, + }); + let miner_sk = self + .running_nodes + .conf + .miner + .mining_key + .expect("No mining key"); + + // Submit the block proposal to the miner's slot + let mut chunk = StackerDBChunkData::new(slot_id, version, message.serialize_to_vec()); + chunk.sign(&miner_sk).expect("Failed to sign message chunk"); + debug!("Produced a signature: {:?}", chunk.sig); + let result = session.put_chunk(&chunk).expect("Failed to put chunk"); + debug!("Test Put Chunk ACK: {result:?}"); + assert!( + result.accepted, + "Failed to submit block proposal to signers" + ); + } } #[test] @@ -223,45 +259,23 @@ fn block_proposal_rejection() { let short_timeout = Duration::from_secs(30); info!("------------------------- Send Block Proposal To Signers -------------------------"); - let miners_contract_id = boot_code_id(MINERS_NAME, false); - let mut session = StackerDBSession::new( - &signer_test.running_nodes.conf.node.rpc_bind, - miners_contract_id.clone(), - ); - let block = NakamotoBlock { + let reward_cycle = signer_test.get_current_reward_cycle(); + let view = SortitionsView::fetch_view(&signer_test.stacks_client).unwrap(); + let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), txs: vec![], }; + // We want to force the signer to submit the block to the node for validation + // Must set the pox treatment validly and consensus hash validlty to prevent early termination + block.header.pox_treatment = BitVec::ones(1).unwrap(); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + let block_signer_signature_hash = block.header.signer_signature_hash(); - let burn_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - let reward_cycle = signer_test.get_current_reward_cycle(); - let message = SignerMessage::BlockProposal(BlockProposal { - block, - burn_height, - reward_cycle, - }); - let miner_sk = signer_test - .running_nodes - .conf - .miner - .mining_key - .expect("No mining key"); - - // Submit the block proposal to the miner's slot - let mut chunk = StackerDBChunkData::new(0, 1, message.serialize_to_vec()); - chunk.sign(&miner_sk).expect("Failed to sign message chunk"); - debug!("Produced a signature: {:?}", chunk.sig); - let result = session.put_chunk(&chunk).expect("Failed to put chunk"); - debug!("Test Put Chunk ACK: {result:?}"); - assert!( - result.accepted, - "Failed to submit block proposal to signers" - ); + signer_test.propose_block(0, 1, block); info!("------------------------- Test Block Proposal Rejected -------------------------"); + let rejected_block_hash = signer_test.wait_for_validate_reject_response(short_timeout); + assert_eq!(rejected_block_hash, block_signer_signature_hash); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, From 704ce35d9d8c8aed2a59598dddaa08f8cd106777 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 3 Jul 2024 22:32:59 +0300 Subject: [PATCH 0472/1400] initial sqlite upgrade and params --- Cargo.lock | 57 ++++----- clarity/Cargo.toml | 2 +- clarity/src/vm/database/sqlite.rs | 3 +- stacks-common/Cargo.toml | 2 +- stacks-common/src/types/sqlite.rs | 2 + stacks-signer/Cargo.toml | 2 +- stacks-signer/src/signerdb.rs | 9 +- stackslib/Cargo.toml | 2 +- stackslib/src/burnchains/bitcoin/spv.rs | 3 +- stackslib/src/burnchains/db.rs | 3 +- stackslib/src/burnchains/tests/db.rs | 3 +- stackslib/src/chainstate/burn/db/sortdb.rs | 74 ++++++------ stackslib/src/chainstate/nakamoto/mod.rs | 7 +- .../src/chainstate/nakamoto/signer_set.rs | 3 +- .../src/chainstate/nakamoto/staging_blocks.rs | 7 +- stackslib/src/chainstate/nakamoto/tenure.rs | 5 +- .../src/chainstate/stacks/db/accounts.rs | 9 +- stackslib/src/chainstate/stacks/db/blocks.rs | 83 ++++++------- stackslib/src/chainstate/stacks/db/mod.rs | 11 +- .../src/chainstate/stacks/index/cache.rs | 3 +- stackslib/src/chainstate/stacks/index/file.rs | 3 +- .../src/chainstate/stacks/index/storage.rs | 3 +- .../src/chainstate/stacks/index/trie_sql.rs | 39 ++++--- .../stacks/tests/block_construction.rs | 5 +- stackslib/src/clarity_cli.rs | 3 +- stackslib/src/clarity_vm/clarity.rs | 2 +- stackslib/src/clarity_vm/database/mod.rs | 4 +- stackslib/src/core/mempool.rs | 26 ++--- stackslib/src/core/tests/mod.rs | 9 +- stackslib/src/cost_estimates/fee_medians.rs | 12 +- stackslib/src/cost_estimates/fee_scalar.rs | 9 +- stackslib/src/cost_estimates/pessimistic.rs | 7 +- stackslib/src/main.rs | 15 +-- stackslib/src/monitoring/mod.rs | 3 +- stackslib/src/net/atlas/db.rs | 109 +++++++++--------- stackslib/src/net/atlas/tests.rs | 21 ++-- stackslib/src/net/db.rs | 71 ++++++------ stackslib/src/net/mod.rs | 2 +- stackslib/src/net/rpc.rs | 3 +- stackslib/src/net/stackerdb/db.rs | 39 ++++--- stackslib/src/util_lib/bloom.rs | 5 +- stackslib/src/util_lib/db.rs | 27 ++--- testnet/stacks-node/Cargo.toml | 2 +- .../src/tests/neon_integrations.rs | 2 +- 44 files changed, 359 insertions(+), 352 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86c318f3b6..e209e90bbb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,12 +112,6 @@ dependencies = [ "opaque-debug", ] -[[package]] -name = "ahash" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0453232ace82dee0dd0b4c87a59bd90f7b53b314f3e0f61fe2ee7c8a16482289" - [[package]] name = "ahash" version = "0.8.8" @@ -717,7 +711,7 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff", - "hashbrown 0.14.3", + "hashbrown", "integer-sqrt", "lazy_static", "mutants", @@ -1188,9 +1182,9 @@ dependencies = [ [[package]] name = "fallible-iterator" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fallible-streaming-iterator" @@ -1495,33 +1489,24 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" -dependencies = [ - "ahash 0.4.8", -] - [[package]] name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.8", + "ahash", "allocator-api2", "serde", ] [[package]] name = "hashlink" -version = "0.6.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d99cf782f0dc4372d26846bec3de7804ceb5df083c2d4462c0b8d2330e894fa8" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.9.1", + "hashbrown", ] [[package]] @@ -1758,7 +1743,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown", ] [[package]] @@ -1920,7 +1905,7 @@ name = "libsigner" version = "0.0.1" dependencies = [ "clarity", - "hashbrown 0.14.3", + "hashbrown", "lazy_static", "libc", "libstackerdb", @@ -1946,9 +1931,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.20.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d31059f22935e6c31830db5249ba2b7ecd54fd73a9909286f0a67aa55c2fbd" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" dependencies = [ "cc", "pkg-config", @@ -2722,7 +2707,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown 0.14.3", + "hashbrown", ] [[package]] @@ -2874,17 +2859,15 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.24.2" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38ee71cbab2c827ec0ac24e76f82eca723cee92c509a65f67dee393c25112" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ - "bitflags 1.3.2", - "byteorder", + "bitflags 2.4.2", "fallible-iterator", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", - "memchr", "serde_json", "smallvec", ] @@ -3380,7 +3363,7 @@ dependencies = [ "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", + "hashbrown", "lazy_static", "libc", "nix", @@ -3416,7 +3399,7 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown 0.14.3", + "hashbrown", "http-types", "lazy_static", "libc", @@ -3453,7 +3436,7 @@ dependencies = [ "backoff", "clap 4.5.0", "clarity", - "hashbrown 0.14.3", + "hashbrown", "lazy_static", "libsigner", "libstackerdb", @@ -3493,7 +3476,7 @@ dependencies = [ "criterion", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", + "hashbrown", "integer-sqrt", "lazy_static", "libc", @@ -4611,7 +4594,7 @@ checksum = "9c80d57a61294350ed91e91eb20a6c34da084ec8f15d039bab79ce3efabbd1a4" dependencies = [ "aes-gcm 0.10.3", "bs58 0.5.0", - "hashbrown 0.14.3", + "hashbrown", "hex", "num-traits", "p256k1", diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index e10a36806e..4e563ba99b 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -37,7 +37,7 @@ version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] [dependencies.rusqlite] -version = "=0.24.2" +version = "0.31.0" optional = true features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index bc8fca3dc3..81dd2dfb22 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -17,9 +17,10 @@ use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use rusqlite::{ Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, - Savepoint, NO_PARAMS, + Savepoint, }; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::db_common::tx_busy_handler; use stacks_common::util::hash::Sha512Trunc256Sum; diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 1337aa808c..4b9c290a1d 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -52,7 +52,7 @@ version = "0.24.3" features = ["serde", "recovery"] [dependencies.rusqlite] -version = "=0.24.2" +version = "0.31.0" optional = true features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] diff --git a/stacks-common/src/types/sqlite.rs b/stacks-common/src/types/sqlite.rs index 7aba8a2b1e..646c4f70d4 100644 --- a/stacks-common/src/types/sqlite.rs +++ b/stacks-common/src/types/sqlite.rs @@ -25,6 +25,8 @@ use crate::util::hash::{Hash160, Sha512Trunc256Sum}; use crate::util::secp256k1::MessageSignature; use crate::util::vrf::VRFProof; +pub const NO_PARAMS: &[&dyn rusqlite::ToSql] = &[]; + impl FromSql for Sha256dHash { fn column_result(value: ValueRef) -> FromSqlResult { let hex_str = value.as_str()?; diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 087a0a4472..5d51c2b101 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -53,7 +53,7 @@ polynomial = "0.2.6" num-traits = "0.2.18" [dependencies.rusqlite] -version = "=0.24.2" +version = "0.31.0" features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] [dependencies.serde_json] diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 7450476397..5692b5c6f0 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -21,11 +21,12 @@ use blockstack_lib::util_lib::db::{ query_row, sqlite_open, table_exists, u64_to_sql, Error as DBError, }; use libsigner::BlockProposal; -use rusqlite::{params, Connection, Error as SqliteError, OpenFlags, NO_PARAMS}; +use rusqlite::{params, Connection, Error as SqliteError, OpenFlags}; use serde::{Deserialize, Serialize}; use slog::slog_debug; use stacks_common::debug; use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::Sha512Trunc256Sum; use wsts::net::NonceRequest; @@ -163,7 +164,7 @@ impl SignerDb { ) -> Result<(), DBError> { self.db.execute( "INSERT OR REPLACE INTO signer_states (reward_cycle, encrypted_state) VALUES (?1, ?2)", - params![&u64_to_sql(reward_cycle)?, &encrypted_signer_state], + params![u64_to_sql(reward_cycle)?, encrypted_signer_state], )?; Ok(()) } @@ -178,7 +179,7 @@ impl SignerDb { let result: Option = query_row( &self.db, "SELECT block_info FROM blocks WHERE reward_cycle = ? AND signer_signature_hash = ?", - params![&u64_to_sql(reward_cycle)?, hash.to_string()], + params![u64_to_sql(reward_cycle)?, hash.to_string()], )?; try_deserialize(result) @@ -220,7 +221,7 @@ impl SignerDb { .execute( "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", params![ - u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), &block_json, + u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, signed_over, u64_to_sql(block_info.block.header.chain_length)?, block_info.block.header.consensus_hash.to_hex(), diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index be75337115..f91af55530 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -80,7 +80,7 @@ version = "0.24.3" features = ["serde", "recovery"] [dependencies.rusqlite] -version = "=0.24.2" +version = "0.31.0" features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] [dependencies.ed25519-dalek] diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index a5627db4df..f11b2a5884 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -20,7 +20,7 @@ use std::ops::Deref; use std::{cmp, fs}; use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::deps_common::bitcoin::blockdata::block::{BlockHeader, LoneBlockHeader}; use stacks_common::deps_common::bitcoin::blockdata::constants::genesis_block; use stacks_common::deps_common::bitcoin::network::constants::Network; @@ -31,6 +31,7 @@ use stacks_common::deps_common::bitcoin::network::serialize::{ }; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{hex_bytes, to_hex}; use stacks_common::util::uint::Uint256; use stacks_common::util::{get_epoch_time_secs, log}; diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 79e34b3539..2e67d7f0ed 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -19,9 +19,10 @@ use std::path::Path; use std::{cmp, fmt, fs, io}; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction}; use serde_json; use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::sqlite::NO_PARAMS; use crate::burnchains::affirmation::*; use crate::burnchains::{ diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index df1c3ec1b0..aa023d938b 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -16,11 +16,12 @@ use std::cmp; -use rusqlite::{ToSql, NO_PARAMS}; +use rusqlite::ToSql; use stacks_common::address::AddressHashMode; use stacks_common::deps_common::bitcoin::blockdata::transaction::Transaction as BtcTx; use stacks_common::deps_common::bitcoin::network::serialize::deserialize; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::*; use super::*; diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 14011a2800..d620ad35be 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -29,8 +29,7 @@ use rand; use rand::RngCore; use rusqlite::types::ToSql; use rusqlite::{ - Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Row, Transaction, - TransactionBehavior, NO_PARAMS, + params, Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Row, Transaction, TransactionBehavior }; use sha2::{Digest, Sha512_256}; use stacks_common::address::AddressHashMode; @@ -38,6 +37,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, StacksBlockId, TrieHash, VRFSeed, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; @@ -2982,7 +2982,7 @@ impl SortitionDB { height: u64, ) -> Result, db_error> { let qry = "SELECT * FROM snapshots WHERE block_height = ?1"; - query_rows(conn, qry, &[u64_to_sql(height)?]) + query_rows(conn, qry, params![u64_to_sql(height)?]) } /// Get all preprocessed reward sets and their associated anchor blocks @@ -3070,11 +3070,9 @@ impl SortitionDB { /// Get the database schema version, given a DB connection fn get_schema_version(conn: &Connection) -> Result, db_error> { let version = conn - .query_row( - "SELECT MAX(version) from db_config", - rusqlite::NO_PARAMS, - |row| row.get(0), - ) + .query_row("SELECT MAX(version) from db_config", NO_PARAMS, |row| { + row.get(0) + }) .optional()?; Ok(version) } @@ -3210,7 +3208,7 @@ impl SortitionDB { // skip if this step was done if table_exists(&tx, "stacks_chain_tips")? { let sql = "SELECT 1 FROM stacks_chain_tips WHERE sortition_id = ?1"; - let args = rusqlite::params![&canonical_tip.sortition_id]; + let args = params![canonical_tip.sortition_id]; if let Ok(Some(_)) = query_row::(&tx, sql, args) { info!("`stacks_chain_tips` appears to have been populated already; skipping this step"); return Ok(()); @@ -3507,7 +3505,7 @@ impl SortitionDB { } let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; - let args = rusqlite::params![sortition_id, &rc_json]; + let args = params![sortition_id, rc_json]; sort_tx.execute(sql, args)?; Ok(()) } @@ -4883,7 +4881,7 @@ impl SortitionDB { conn: &Connection, ) -> Result<(u64, BurnchainHeaderHash), db_error> { let sql = "SELECT block_height, burn_header_hash FROM snapshots WHERE consensus_hash = ?1"; - let args = rusqlite::params!(&ConsensusHash::empty()); + let args = params![ConsensusHash::empty()]; let mut stmt = conn.prepare(sql)?; let mut rows = stmt.query(args)?; while let Some(row) = rows.next()? { @@ -6527,9 +6525,9 @@ pub mod tests { use std::sync::mpsc::sync_channel; use std::thread; - use rusqlite::NO_PARAMS; use stacks_common::address::AddressHashMode; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; + use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::vrf::*; @@ -6731,31 +6729,31 @@ pub mod tests { let pox_payouts_json = serde_json::to_string(&pox_payout) .expect("FATAL: could not encode `total_pox_payouts` as JSON"); - let args = rusqlite::params![ - &u64_to_sql(first_snapshot.block_height)?, - &first_snapshot.burn_header_hash, - &u64_to_sql(first_snapshot.burn_header_timestamp)?, - &first_snapshot.parent_burn_header_hash, - &first_snapshot.consensus_hash, - &first_snapshot.ops_hash, - &first_snapshot.total_burn.to_string(), - &first_snapshot.sortition, - &first_snapshot.sortition_hash, - &first_snapshot.winning_block_txid, - &first_snapshot.winning_stacks_block_hash, - &first_snapshot.index_root, - &u64_to_sql(first_snapshot.num_sortitions)?, - &first_snapshot.stacks_block_accepted, - &u64_to_sql(first_snapshot.stacks_block_height)?, - &u64_to_sql(first_snapshot.arrival_index)?, - &u64_to_sql(first_snapshot.canonical_stacks_tip_height)?, - &first_snapshot.canonical_stacks_tip_hash, - &first_snapshot.canonical_stacks_tip_consensus_hash, - &first_snapshot.sortition_id, - &first_snapshot.parent_sortition_id, - &first_snapshot.pox_valid, - &first_snapshot.accumulated_coinbase_ustx.to_string(), - &pox_payouts_json, + let args = params![ + u64_to_sql(first_snapshot.block_height)?, + first_snapshot.burn_header_hash, + u64_to_sql(first_snapshot.burn_header_timestamp)?, + first_snapshot.parent_burn_header_hash, + first_snapshot.consensus_hash, + first_snapshot.ops_hash, + first_snapshot.total_burn.to_string(), + first_snapshot.sortition, + first_snapshot.sortition_hash, + first_snapshot.winning_block_txid, + first_snapshot.winning_stacks_block_hash, + first_snapshot.index_root, + u64_to_sql(first_snapshot.num_sortitions)?, + first_snapshot.stacks_block_accepted, + u64_to_sql(first_snapshot.stacks_block_height)?, + u64_to_sql(first_snapshot.arrival_index)?, + u64_to_sql(first_snapshot.canonical_stacks_tip_height)?, + first_snapshot.canonical_stacks_tip_hash, + first_snapshot.canonical_stacks_tip_consensus_hash, + first_snapshot.sortition_id, + first_snapshot.parent_sortition_id, + first_snapshot.pox_valid, + first_snapshot.accumulated_coinbase_ustx.to_string(), + pox_payouts_json, ]; db_tx.execute("INSERT INTO snapshots \ @@ -6866,7 +6864,7 @@ pub mod tests { let apparent_sender_str = serde_json::to_string(sender).map_err(|e| db_error::SerializationError(e))?; let sql = "SELECT * FROM block_commits WHERE apparent_sender = ?1 ORDER BY block_height DESC LIMIT 1"; - let args = rusqlite::params![&apparent_sender_str]; + let args = params![apparent_sender_str]; query_row(conn, sql, args) } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index bfa92f3198..47e9a4eaac 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -30,7 +30,7 @@ use clarity::vm::{ClarityVersion, SymbolicExpression, Value}; use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -44,6 +44,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; @@ -2056,7 +2057,7 @@ impl NakamotoChainState { let qry = "SELECT DISTINCT tenure_id_consensus_hash AS consensus_hash FROM nakamoto_tenures WHERE coinbase_height = ?1"; let candidate_chs: Vec = - query_rows(tx.tx(), qry, &[u64_to_sql(coinbase_height)?])?; + query_rows(tx.tx(), qry, params![u64_to_sql(coinbase_height)?])?; if candidate_chs.len() == 0 { // no nakamoto_tenures at that tenure height, check if there's a stack block header where @@ -2638,7 +2639,7 @@ impl NakamotoChainState { reward_set: &RewardSet, ) -> Result<(), ChainstateError> { let sql = "INSERT INTO nakamoto_reward_sets (index_block_hash, reward_set) VALUES (?, ?)"; - let args = rusqlite::params![block_id, &reward_set.metadata_serialize(),]; + let args = params![block_id, reward_set.metadata_serialize(),]; tx.execute(sql, args)?; Ok(()) } diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index b6c0aefaa1..26af3ff8d3 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -26,7 +26,7 @@ use clarity::vm::types::{ use clarity::vm::{ClarityVersion, ContractName, SymbolicExpression, Value}; use lazy_static::{__Deref, lazy_static}; use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OptionalExtension, ToSql, NO_PARAMS}; +use rusqlite::{params, Connection, OptionalExtension, ToSql}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -40,6 +40,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 0702a89070..26878f3b1e 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -21,8 +21,9 @@ use std::path::PathBuf; use lazy_static::lazy_static; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; @@ -369,7 +370,7 @@ impl<'a> NakamotoStagingBlocksTx<'a> { WHERE index_block_hash = ?1"; self.execute( &clear_staged_block, - params![&block, &u64_to_sql(get_epoch_time_secs())?], + params![block, u64_to_sql(get_epoch_time_secs())?], )?; Ok(()) @@ -389,7 +390,7 @@ impl<'a> NakamotoStagingBlocksTx<'a> { WHERE index_block_hash = ?1"; self.execute( &clear_staged_block, - params![&block, &u64_to_sql(get_epoch_time_secs())?], + params![block, u64_to_sql(get_epoch_time_secs())?], )?; Ok(()) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 198143d1a9..6589e8d0a0 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -68,7 +68,7 @@ use clarity::vm::events::StacksTransactionEvent; use clarity::vm::types::StacksAddressExtensions; use lazy_static::{__Deref, lazy_static}; use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OptionalExtension, ToSql, NO_PARAMS}; +use rusqlite::{params, Connection, OptionalExtension, ToSql}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::codec::{ read_next, write_next, Error as CodecError, StacksMessageCodec, MAX_MESSAGE_LEN, @@ -80,6 +80,7 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; @@ -587,7 +588,7 @@ impl NakamotoChainState { burn_view: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_tenures WHERE burn_view_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args = rusqlite::params![burn_view]; + let args = params![burn_view]; let tenure_opt: Option = query_row(headers_conn, sql, args)?; Ok(tenure_opt) } diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index affc0bb9d8..f32336e657 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -20,6 +20,7 @@ use clarity::types::chainstate::TenureBlockId; use clarity::vm::database::clarity_store::*; use clarity::vm::database::*; use clarity::vm::types::*; +use rusqlite::params; use rusqlite::types::ToSql; use rusqlite::Row; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -734,12 +735,12 @@ impl StacksChainState { let qry = "SELECT * FROM payments WHERE consensus_hash = ?1 AND block_hash = ?2 AND miner = 1" .to_string(); - let args = [ - consensus_hash as &dyn ToSql, - stacks_block_hash as &dyn ToSql, + let args = params![ + consensus_hash, + stacks_block_hash, ]; let mut rows = - query_rows::(conn, &qry, &args).map_err(Error::DBError)?; + query_rows::(conn, &qry, args).map_err(Error::DBError)?; let len = rows.len(); match len { 0 => { diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 971351b1d5..ab696ac251 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -34,7 +34,7 @@ use clarity::vm::types::{ TypeSignature, Value, }; use rand::{thread_rng, Rng, RngCore}; -use rusqlite::{Connection, DatabaseName, Error as sqlite_error, OptionalExtension}; +use rusqlite::{params, Connection, DatabaseName, Error as sqlite_error, OptionalExtension, Params}; use serde::Serialize; use serde_json::json; use stacks_common::bitvec::BitVec; @@ -42,6 +42,7 @@ use stacks_common::codec::{read_next, write_next, MAX_MESSAGE_LEN}; use stacks_common::types::chainstate::{ BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::to_hex; use stacks_common::util::retry::BoundReader; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; @@ -894,8 +895,7 @@ impl StacksChainState { sql_args: P, ) -> Result>, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, { let mut stmt = conn .prepare(sql_query) @@ -1614,24 +1614,25 @@ impl StacksChainState { processed_time, \ download_time) \ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17)"; - let args: &[&dyn ToSql] = &[ - &block_hash, - &block.header.parent_block, - &consensus_hash, - &parent_consensus_hash, - &block.header.parent_microblock, - &block.header.parent_microblock_sequence, - &block.header.microblock_pubkey_hash, - &u64_to_sql(block.header.total_work.work)?, - &attachable, - &0, - &0, - &u64_to_sql(commit_burn)?, - &u64_to_sql(sortition_burn)?, - &index_block_hash, - &u64_to_sql(get_epoch_time_secs())?, - &0, - &u64_to_sql(download_time)?, + + let args: &[&dyn ToSql] = params![ + block_hash, + block.header.parent_block, + consensus_hash, + parent_consensus_hash, + block.header.parent_microblock, + block.header.parent_microblock_sequence, + block.header.microblock_pubkey_hash, + u64_to_sql(block.header.total_work.work)?, + attachable, + 0, + 0, + u64_to_sql(commit_burn)?, + u64_to_sql(sortition_burn)?, + index_block_hash, + u64_to_sql(get_epoch_time_secs())?, + 0, + u64_to_sql(download_time)?, ]; tx.execute(&sql, args) @@ -1687,16 +1688,16 @@ impl StacksChainState { // store microblock metadata let sql = "INSERT OR REPLACE INTO staging_microblocks (anchored_block_hash, consensus_hash, index_block_hash, microblock_hash, parent_hash, index_microblock_hash, sequence, processed, orphaned) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)"; - let args: &[&dyn ToSql] = &[ - &parent_anchored_block_hash, - &parent_consensus_hash, - &index_block_hash, - µblock.block_hash(), - µblock.header.prev_block, - &index_microblock_hash, - µblock.header.sequence, - &0, - &0, + let args: &[&dyn ToSql] = params![ + parent_anchored_block_hash, + parent_consensus_hash, + index_block_hash, + microblock.block_hash(), + microblock.header.prev_block, + index_microblock_hash, + microblock.header.sequence, + 0, + 0, ]; tx.execute(&sql, args) @@ -2832,10 +2833,10 @@ impl StacksChainState { "SELECT {},{} FROM staging_blocks WHERE index_block_hash = ?1", consensus_hash_col, anchored_block_col ); - let args = [index_block_hash as &dyn ToSql]; + let args = params![index_block_hash]; blocks_db - .query_row(&sql, &args, |row| { + .query_row(&sql, args, |row| { let anchored_block_hash = BlockHeaderHash::from_column(row, anchored_block_col) .expect("Expected anchored_block_hash - database corrupted"); let consensus_hash = ConsensusHash::from_column(row, consensus_hash_col) @@ -2884,11 +2885,11 @@ impl StacksChainState { staging_microblocks JOIN staging_microblocks_data \ ON staging_microblocks.microblock_hash = staging_microblocks_data.block_hash \ WHERE staging_microblocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2"; - let args = [ - parent_index_block_hash as &dyn ToSql, - microblock_hash as &dyn ToSql, + let args = params![ + parent_index_block_hash, + microblock_hash, ]; - query_row(blocks_conn, sql, &args).map_err(Error::DBError) + query_row(blocks_conn, sql, args).map_err(Error::DBError) } /// Load up the metadata on a microblock stream (but don't get the data itself) @@ -2900,9 +2901,9 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 ORDER BY sequence" .to_string(); - let args = [parent_index_block_hash as &dyn ToSql]; + let args = params![parent_index_block_hash]; let microblock_info = - query_rows::(blocks_conn, &sql, &args).map_err(Error::DBError)?; + query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; Ok(microblock_info) } @@ -2942,7 +2943,7 @@ impl StacksChainState { ) -> Result { let sql = "SELECT 1 FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND height >= ?1 AND arrival_time >= ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?, &u64_to_sql(deadline)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(height)?, u64_to_sql(deadline)?]; let res = conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -2958,7 +2959,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND arrival_time >= ?1 ORDER BY height DESC LIMIT 1"; - let res = query_row(conn, sql, &[u64_to_sql(deadline)?])?; + let res = query_row(conn, sql, params![u64_to_sql(deadline)?])?; Ok(res) } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index d9b6d47775..983fdbc72d 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -36,11 +36,12 @@ use clarity::vm::types::TupleData; use clarity::vm::{SymbolicExpression, Value}; use lazy_static::lazy_static; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use serde::de::Error as de_Error; use serde::Deserialize; use stacks_common::codec::{read_next, write_next, StacksMessageCodec}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, TrieHash}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util; use stacks_common::util::hash::{hex_bytes, to_hex}; @@ -1022,10 +1023,10 @@ impl StacksChainState { } tx.execute( "INSERT INTO db_config (version,mainnet,chain_id) VALUES (?1,?2,?3)", - &[ - &"1".to_string(), - &(if mainnet { 1 } else { 0 }) as &dyn ToSql, - &chain_id as &dyn ToSql, + params![ + "1".to_string(), + (if mainnet { 1 } else { 0 }), + chain_id, ], )?; diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index a711603447..7f92efdd8b 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -27,11 +27,12 @@ use std::{cmp, env, error, fmt, fs, io, os}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OpenFlags, OptionalExtension, - Transaction, NO_PARAMS, + Transaction, }; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; +use stacks_common::types::sqlite::NO_PARAMS; use crate::chainstate::stacks::index::bits::{ get_node_byte_len, get_node_hash, read_block_identifier, read_hash_bytes, read_node_hash_bytes, diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 1477f9a7dd..4123b1310a 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -28,11 +28,12 @@ use std::{cmp, env, error, fmt, fs, io, os}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OpenFlags, OptionalExtension, - Transaction, NO_PARAMS, + Transaction, }; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; +use stacks_common::types::sqlite::NO_PARAMS; use crate::chainstate::stacks::index::bits::{ get_node_byte_len, get_node_hash, read_block_identifier, read_hash_bytes, read_node_hash_bytes, diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 8f01117153..6994c7ad05 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -27,12 +27,13 @@ use std::{cmp, env, error, fmt, fs, io, os}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OpenFlags, OptionalExtension, - Transaction, NO_PARAMS, + Transaction, }; use sha2::Digest; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::to_hex; use stacks_common::util::log; diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index be1ae91c21..a5e9c91aaa 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -28,10 +28,11 @@ use std::{error, fmt, fs, io, os}; use regex::Regex; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, ToSql}; -use rusqlite::{Connection, Error as SqliteError, OptionalExtension, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, Error as SqliteError, OptionalExtension, Transaction}; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::log; use crate::chainstate::stacks::index::bits::{ @@ -220,7 +221,7 @@ pub fn get_block_hash(conn: &Connection, local_id: u32) -> Result let result = conn .query_row( "SELECT block_hash FROM marf_data WHERE block_id = ?", - &[local_id], + params![local_id], |row| row.get("block_hash"), ) .optional()?; @@ -236,7 +237,13 @@ pub fn write_trie_blob( block_hash: &T, data: &[u8], ) -> Result { - let args: &[&dyn ToSql] = &[block_hash, &data, &0, &0, &0]; + let args: &[&dyn ToSql] = params![ + block_hash, + data, + 0, + 0, + 0, + ]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, ?, ?)")?; let block_id = s @@ -263,13 +270,13 @@ fn inner_write_external_trie_blob( let block_id = if let Some(block_id) = block_id { // existing entry (i.e. a migration) let empty_blob: &[u8] = &[]; - let args: &[&dyn ToSql] = &[ + let args: &[&dyn ToSql] = params![ block_hash, - &empty_blob, - &0, - &u64_to_sql(offset)?, - &u64_to_sql(length)?, - &block_id, + empty_blob, + 0, + u64_to_sql(offset)?, + u64_to_sql(length)?, + block_id, ]; let mut s = conn.prepare("UPDATE marf_data SET block_hash = ?1, data = ?2, unconfirmed = ?3, external_offset = ?4, external_length = ?5 WHERE block_id = ?6")?; @@ -283,12 +290,12 @@ fn inner_write_external_trie_blob( } else { // new entry let empty_blob: &[u8] = &[]; - let args: &[&dyn ToSql] = &[ + let args: &[&dyn ToSql] = params![ block_hash, - &empty_blob, - &0, - &u64_to_sql(offset)?, - &u64_to_sql(length)?, + empty_blob, + 0, + u64_to_sql(offset)?, + u64_to_sql(length)?, ]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, ?, ?)")?; @@ -378,7 +385,7 @@ pub fn write_trie_blob_to_unconfirmed( .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); } else { // doesn't exist yet; insert - let args: &[&dyn ToSql] = &[block_hash, &data, &1]; + let args: &[&dyn ToSql] = params![block_hash, data, 1]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, 0, 0)")?; s.execute(args) @@ -429,7 +436,7 @@ pub fn read_all_block_hashes_and_roots( let rows = s.query_and_then(NO_PARAMS, |row| { let block_hash: T = row.get_unwrap("block_hash"); let data = row - .get_raw("data") + .get_ref("data")? .as_blob() .expect("DB Corruption: MARF data is non-blob"); let start = TrieStorageConnection::::root_ptr_disk() as usize; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 0979d5d989..56ae3d8d52 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -32,6 +32,7 @@ use clarity::vm::test_util::TEST_BURN_STATE_DB; use clarity::vm::types::*; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; +use rusqlite::params; use stacks_common::address::*; use stacks_common::types::chainstate::SortitionId; use stacks_common::util::hash::MerkleTree; @@ -4799,7 +4800,7 @@ fn paramaterized_mempool_walk_test( mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![Some(123.0), &txid], + params![Some(123.0), &txid], ) .unwrap(); } else { @@ -4807,7 +4808,7 @@ fn paramaterized_mempool_walk_test( mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![none, &txid], + params![none, &txid], ) .unwrap(); } diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 7d2b53eb0e..53b53c36b7 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -24,7 +24,7 @@ use clarity::vm::coverage::CoverageReporter; use lazy_static::lazy_static; use rand::Rng; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, Row, Transaction, NO_PARAMS}; +use rusqlite::{Connection, OpenFlags, Row, Transaction}; use serde::Serialize; use serde_json::json; use stacks_common::address::c32::c32_address; @@ -33,6 +33,7 @@ use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, VRFSeed, *, }; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{bytes_to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::{get_epoch_time_ms, log}; diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 812d39bb97..c89679f414 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -1887,9 +1887,9 @@ mod tests { use clarity::vm::database::{ClarityBackingStore, STXBalance}; use clarity::vm::test_util::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; use clarity::vm::types::{StandardPrincipalData, Value}; - use rusqlite::NO_PARAMS; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::ConsensusHash; + use stacks_common::types::sqlite::NO_PARAMS; use super::*; use crate::chainstate::stacks::index::ClarityMarfTrieId; diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 8fffc76d7d..dffc5b8fc5 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -12,7 +12,7 @@ use clarity::vm::database::{ }; use clarity::vm::errors::{InterpreterResult, RuntimeErrorType}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, TupleData}; -use rusqlite::{Connection, OptionalExtension, Row, ToSql}; +use rusqlite::{params, Connection, OptionalExtension, Row, ToSql}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, TenureBlockId, VRFSeed, @@ -649,7 +649,7 @@ fn get_matured_reward( let parent_id_bhh = conn .query_row( &format!("SELECT parent_block_id FROM {table_name} WHERE index_block_hash = ?"), - [child_id_bhh.0].iter(), + params![child_id_bhh.0], |x| { Ok(StacksBlockId::from_column(x, "parent_block_id") .expect("Bad parent_block_id in database")) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 9eb9b7cf80..35019d494c 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -29,14 +29,14 @@ use rand::distributions::Uniform; use rand::prelude::Distribution; use rusqlite::types::ToSql; use rusqlite::{ - Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction, - NO_PARAMS, + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction }; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{ read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, }; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, StacksBlockId}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::MempoolCollectionBehavior; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use stacks_common::util::retry::{BoundReader, RetryReader}; @@ -1122,7 +1122,7 @@ fn db_set_nonce(conn: &DBConn, address: &StacksAddress, nonce: u64) -> Result<() let nonce_i64 = u64_to_sql(nonce)?; let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; - conn.execute(sql, rusqlite::params![&addr_str, nonce_i64])?; + conn.execute(sql, params![addr_str, nonce_i64])?; Ok(()) } @@ -1130,7 +1130,7 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d let addr_str = address.to_string(); let sql = "SELECT nonce FROM nonces WHERE address = ?"; - query_row(conn, sql, rusqlite::params![&addr_str]) + query_row(conn, sql, params![addr_str]) } #[cfg(test)] @@ -1272,7 +1272,7 @@ impl MemPoolDB { let version = conn .query_row( "SELECT MAX(version) FROM schema_version", - rusqlite::NO_PARAMS, + NO_PARAMS, |row| row.get(0), ) .optional()?; @@ -1489,7 +1489,7 @@ impl MemPoolDB { pub fn reset_nonce_cache(&mut self) -> Result<(), db_error> { debug!("reset nonce cache"); let sql = "DELETE FROM nonces"; - self.db.execute(sql, rusqlite::NO_PARAMS)?; + self.db.execute(sql, NO_PARAMS)?; Ok(()) } @@ -1529,7 +1529,7 @@ impl MemPoolDB { let txs: Vec = query_rows( &sql_tx, "SELECT * FROM mempool as m WHERE m.fee_rate IS NULL LIMIT ?", - &[max_updates], + params![max_updates], )?; let mut updated = 0; for tx_to_estimate in txs { @@ -1554,7 +1554,7 @@ impl MemPoolDB { sql_tx.execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![fee_rate_f64, &txid], + params![fee_rate_f64, txid], )?; updated += 1; } @@ -1945,7 +1945,7 @@ impl MemPoolDB { query_row( conn, "SELECT 1 FROM mempool WHERE txid = ?1", - &[txid as &dyn ToSql], + params![txid], ) .and_then(|row_opt: Option| Ok(row_opt.is_some())) } @@ -1954,7 +1954,7 @@ impl MemPoolDB { query_row( conn, "SELECT * FROM mempool WHERE txid = ?1", - &[txid as &dyn ToSql], + params![txid], ) } @@ -2398,7 +2398,7 @@ impl MemPoolDB { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![fee_rate_estimate, &txid], + params![fee_rate_estimate, txid], ) .map_err(db_error::from)?; @@ -2588,12 +2588,12 @@ impl MemPoolDB { let txids: Vec = query_rows( tx, "SELECT txid FROM tx_blacklist ORDER BY RANDOM() LIMIT ?1", - &[&u64_to_sql(to_delete)? as &dyn ToSql], + params![u64_to_sql(to_delete)?], )?; for txid in txids.into_iter() { tx.execute( "DELETE FROM tx_blacklist WHERE txid = ?1", - &[&txid as &dyn ToSql], + params![txid], )?; } } diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index d77724756d..8e70f4dcbd 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -26,6 +26,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, StacksAddre use clarity::vm::{ClarityName, ContractName, Value}; use rand::prelude::*; use rand::thread_rng; +use rusqlite::params; use stacks_common::address::AddressHashMode; use stacks_common::codec::{read_next, Error as codec_error, StacksMessageCodec}; use stacks_common::types::chainstate::{ @@ -645,7 +646,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![Some(123.0), &txid], + params![Some(123.0), txid], ) .unwrap(); } else { @@ -653,7 +654,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![none, &txid], + params![none, txid], ) .unwrap(); } @@ -1198,7 +1199,7 @@ fn test_iterate_candidates_concurrent_write_lock() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![Some(123.0), &txid], + params![Some(123.0), txid], ) .unwrap(); } else { @@ -1206,7 +1207,7 @@ fn test_iterate_candidates_concurrent_write_lock() { mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - rusqlite::params![none, &txid], + params![none, txid], ) .unwrap(); } diff --git a/stackslib/src/cost_estimates/fee_medians.rs b/stackslib/src/cost_estimates/fee_medians.rs index 88ab0e9c2a..fd8db3bf17 100644 --- a/stackslib/src/cost_estimates/fee_medians.rs +++ b/stackslib/src/cost_estimates/fee_medians.rs @@ -2,11 +2,11 @@ use std::cmp; use std::cmp::Ordering; use std::path::Path; +use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{ - AndThenRows, Connection, Error as SqliteError, OptionalExtension, ToSql, - Transaction as SqlTransaction, + params, AndThenRows, Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqlTransaction }; use serde_json::Value as JsonValue; @@ -89,7 +89,7 @@ impl WeightedMedianFeeRateEstimator { fn instantiate_db(tx: &SqlTransaction) -> Result<(), SqliteError> { if !Self::db_already_instantiated(tx)? { - tx.execute(CREATE_TABLE, rusqlite::NO_PARAMS)?; + tx.execute(CREATE_TABLE, NO_PARAMS)?; } Ok(()) @@ -108,7 +108,7 @@ impl WeightedMedianFeeRateEstimator { let mut mids = Vec::with_capacity(window_size as usize); let mut lows = Vec::with_capacity(window_size as usize); let results = stmt - .query_and_then::<_, SqliteError, _, _>(&[window_size], |row| { + .query_and_then::<_, SqliteError, _, _>(params![window_size], |row| { let high: f64 = row.get("high")?; let middle: f64 = row.get("middle")?; let low: f64 = row.get("low")?; @@ -160,10 +160,10 @@ impl WeightedMedianFeeRateEstimator { FROM median_fee_estimator )"; tx.execute( insert_sql, - rusqlite::params![new_measure.high, new_measure.middle, new_measure.low,], + params![new_measure.high, new_measure.middle, new_measure.low,], ) .expect("SQLite failure"); - tx.execute(deletion_sql, rusqlite::params![self.window_size]) + tx.execute(deletion_sql, params![self.window_size]) .expect("SQLite failure"); let estimate = Self::get_rate_estimates_from_sql(&tx, self.window_size); diff --git a/stackslib/src/cost_estimates/fee_scalar.rs b/stackslib/src/cost_estimates/fee_scalar.rs index 1c0349e42b..fd38f9ca51 100644 --- a/stackslib/src/cost_estimates/fee_scalar.rs +++ b/stackslib/src/cost_estimates/fee_scalar.rs @@ -1,11 +1,12 @@ use std::cmp; use std::path::Path; +use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::{ClaritySerializable, STXBalance}; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{ - Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqlTransaction, + params, Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqlTransaction }; use serde_json::Value as JsonValue; @@ -72,7 +73,7 @@ impl ScalarFeeRateEstimator { fn instantiate_db(tx: &SqlTransaction) -> Result<(), SqliteError> { if !Self::db_already_instantiated(tx)? { - tx.execute(CREATE_TABLE, rusqlite::NO_PARAMS)?; + tx.execute(CREATE_TABLE, NO_PARAMS)?; } Ok(()) @@ -130,7 +131,7 @@ impl ScalarFeeRateEstimator { tx.execute( sql, - rusqlite::params![ + params![ SINGLETON_ROW_ID, next_estimate.high, next_estimate.middle, @@ -238,7 +239,7 @@ impl FeeEstimator for ScalarFeeRateEstimator { fn get_rate_estimates(&self) -> Result { let sql = "SELECT high, middle, low FROM scalar_fee_estimator WHERE estimate_key = ?"; self.db - .query_row(sql, &[SINGLETON_ROW_ID], |row| { + .query_row(sql, params![SINGLETON_ROW_ID], |row| { let high: f64 = row.get(0)?; let middle: f64 = row.get(1)?; let low: f64 = row.get(2)?; diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index b986d54dc7..d9499ca6ad 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -1,10 +1,11 @@ use std::cmp; use std::path::Path; +use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{ - Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqliteTransaction, + params, Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqliteTransaction }; use serde_json::Value as JsonValue; @@ -146,7 +147,7 @@ impl Samples { let current_value = u64_to_sql(self.mean()).unwrap_or_else(|_| i64::MAX); tx.execute( sql, - rusqlite::params![identifier, current_value, self.to_json()], + params![identifier, current_value, self.to_json()], ) .expect("SQLite failure"); } @@ -205,7 +206,7 @@ impl PessimisticEstimator { fn instantiate_db(tx: &SqliteTransaction) -> Result<(), SqliteError> { if !Self::db_already_instantiated(tx)? { - tx.execute(CREATE_TABLE, rusqlite::NO_PARAMS)?; + tx.execute(CREATE_TABLE, NO_PARAMS)?; } Ok(()) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 63a788cdf6..1743f580bb 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -80,13 +80,14 @@ use blockstack_lib::util_lib::strings::UrlString; use blockstack_lib::{clarity_cli, util_lib}; use libstackerdb::StackerDBChunkData; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags}; +use rusqlite::{params, Connection, Error as SqliteError, OpenFlags}; use serde_json::{json, Value}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, StacksAddress, StacksBlockId, }; use stacks_common::types::net::PeerAddress; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; @@ -769,7 +770,7 @@ simulating a miner. let conn = sqlite_open(&db_path, OpenFlags::SQLITE_OPEN_READ_ONLY, false) .expect("Failed to open DB"); let args: &[&dyn ToSql] = &[&value.to_hex()]; - let res: Result = conn.query_row_and_then( + let res: Result = conn.query_row_and_then( "SELECT value FROM __fork_storage WHERE value_hash = ?1", args, |row| { @@ -829,18 +830,18 @@ simulating a miner. let tip = BlockHeaderHash::from_hex(&argv[3]).unwrap(); let burntip = BurnchainHeaderHash::from_hex(&argv[4]).unwrap(); - let conn = rusqlite::Connection::open(path).unwrap(); + let conn = Connection::open(path).unwrap(); let mut cur_burn = burntip.clone(); let mut cur_tip = tip.clone(); loop { println!("{}, {}", cur_burn, cur_tip); let (next_burn, next_tip) = match conn.query_row("SELECT parent_burn_header_hash, parent_anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ? and burn_header_hash = ?", - &[&cur_tip as &dyn rusqlite::types::ToSql, &cur_burn], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) { + params![cur_tip, cur_burn], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) { Ok(x) => x, Err(e) => { match e { - rusqlite::Error::QueryReturnedNoRows => {}, + SqliteError::QueryReturnedNoRows => {}, e => { eprintln!("SQL Error: {}", e); }, @@ -933,7 +934,7 @@ simulating a miner. }; let mut stmt = conn.prepare(&query).unwrap(); - let mut hashes_set = stmt.query(rusqlite::NO_PARAMS).unwrap(); + let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); let mut index_block_hashes: Vec = vec![]; while let Ok(Some(row)) = hashes_set.next() { @@ -965,7 +966,7 @@ simulating a miner. byte_prefix ); let mut stmt = conn.prepare(&query).unwrap(); - let mut rows = stmt.query(rusqlite::NO_PARAMS).unwrap(); + let mut rows = stmt.query(NO_PARAMS).unwrap(); while let Ok(Some(row)) = rows.next() { let val_string: String = row.get(0).unwrap(); let clarity_value = match clarity::vm::Value::try_deserialize_hex_untyped(&val_string) { diff --git a/stackslib/src/monitoring/mod.rs b/stackslib/src/monitoring/mod.rs index 00411db70c..7f1aa9db26 100644 --- a/stackslib/src/monitoring/mod.rs +++ b/stackslib/src/monitoring/mod.rs @@ -23,6 +23,7 @@ use std::{fmt, fs}; use clarity::vm::costs::ExecutionCost; use lazy_static::lazy_static; use rusqlite::{OpenFlags, OptionalExtension}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::uint::{Uint256, Uint512}; @@ -210,7 +211,7 @@ fn txid_tracking_db(chainstate_root_path: &str) -> Result if create_flag { conn.execute( "CREATE TABLE processed_txids (txid TEXT NOT NULL PRIMARY KEY)", - rusqlite::NO_PARAMS, + NO_PARAMS, )?; } diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index 784bff9639..cb3ad8162f 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -38,9 +38,10 @@ use std::fs; use clarity::vm::types::QualifiedContractIdentifier; use rusqlite::types::{FromSql, FromSqlError, ToSql, ToSqlOutput, ValueRef}; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util; use stacks_common::util::hash::{bin_bytes, hex_bytes, to_bin, to_hex, Hash160}; use stacks_common::util::log; @@ -206,11 +207,9 @@ impl AtlasDB { /// Get the database schema version, given a DB connection fn get_schema_version(conn: &Connection) -> Result { - let version = conn.query_row( - "SELECT MAX(version) from db_config", - rusqlite::NO_PARAMS, - |row| row.get(0), - )?; + let version = conn.query_row("SELECT MAX(version) from db_config", NO_PARAMS, |row| { + row.get(0) + })?; Ok(version) } @@ -228,7 +227,7 @@ impl AtlasDB { tx.execute( "INSERT INTO db_config (version) VALUES (?1)", - &[&ATLASDB_VERSION], + params![ATLASDB_VERSION], )?; if let Some(attachments) = genesis_attachments { @@ -236,10 +235,10 @@ impl AtlasDB { for attachment in attachments { tx.execute( "INSERT INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 1, ?)", - &[ - &attachment.hash() as &dyn ToSql, - &attachment.content as &dyn ToSql, - &now as &dyn ToSql, + params![ + attachment.hash(), + attachment.content, + now, ], ) .map_err(db_error::SqliteError)?; @@ -348,7 +347,7 @@ impl AtlasDB { db_conn.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["2"], + params!["2"], )?; Ok(()) @@ -406,17 +405,17 @@ impl AtlasDB { tx.execute_batch(row_text)?; } - tx.execute("INSERT INTO db_config (version) VALUES (?1)", &["1"])?; + tx.execute("INSERT INTO db_config (version) VALUES (?1)", params!["1"])?; if let Some(attachments) = genesis_attachments { let now = util::get_epoch_time_secs() as i64; for attachment in attachments { tx.execute( "INSERT INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 1, ?)", - rusqlite::params![ - &attachment.hash(), - &attachment.content, - &now, + params![ + attachment.hash(), + attachment.content, + now, ], )?; } @@ -462,9 +461,9 @@ impl AtlasDB { let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let max = (page_index + 1) * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT MIN(block_height) as min, MAX(block_height) as max FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2"; - let args = [&min as &dyn ToSql, &max as &dyn ToSql]; + let args = params![min, max]; let mut stmt = self.conn.prepare(&qry)?; - let mut rows = stmt.query(&args)?; + let mut rows = stmt.query(args)?; match rows.next() { Ok(Some(row)) => { @@ -498,12 +497,12 @@ impl AtlasDB { let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; - let args = [ - &min as &dyn ToSql, - &max as &dyn ToSql, - block_id as &dyn ToSql, + let args = params![ + min, + max, + block_id, ]; - let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, &args)?; + let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; let mut bool_vector = vec![true; AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE as usize]; for (attachment_index, is_available) in rows.into_iter() { @@ -529,10 +528,10 @@ impl AtlasDB { let now = util::get_epoch_time_secs() as i64; let res = tx.execute( "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 0, ?)", - &[ - &attachment.hash() as &dyn ToSql, - &attachment.content as &dyn ToSql, - &now as &dyn ToSql, + params![ + attachment.hash(), + attachment.content, + now, ], ); res.map_err(db_error::SqliteError)?; @@ -544,7 +543,7 @@ impl AtlasDB { let tx = self.tx_begin()?; let res = tx.execute( "DELETE FROM attachments WHERE hash IN (SELECT hash FROM attachments WHERE was_instantiated = 0 ORDER BY created_at ASC LIMIT ?)", - &[&k as &dyn ToSql], + params![k], ); res.map_err(db_error::SqliteError)?; tx.commit().map_err(db_error::SqliteError)?; @@ -557,7 +556,7 @@ impl AtlasDB { let tx = self.tx_begin()?; let res = tx.execute( "DELETE FROM attachments WHERE was_instantiated = 0 AND created_at < ?", - &[&cut_off as &dyn ToSql], + params![cut_off], ); res.map_err(db_error::SqliteError)?; tx.commit().map_err(db_error::SqliteError)?; @@ -586,11 +585,11 @@ impl AtlasDB { let tx = self.tx_begin()?; tx.execute( "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 1, ?)", - rusqlite::params![&attachment.hash(), &attachment.content, &now], + params![attachment.hash(), attachment.content, now], )?; tx.execute( "UPDATE attachment_instances SET is_available = 1 WHERE content_hash = ?1 AND status = ?2", - rusqlite::params![&attachment.hash(), &AttachmentInstanceStatus::Checked], + params![attachment.hash(), AttachmentInstanceStatus::Checked], )?; tx.commit()?; Ok(()) @@ -603,7 +602,7 @@ impl AtlasDB { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 0" .to_string(); - let args = [&hex_content_hash as &dyn ToSql]; + let args = params![hex_content_hash]; let row = query_row::(&self.conn, &qry, &args)?; Ok(row) } @@ -617,7 +616,7 @@ impl AtlasDB { let tx = self.tx_begin()?; let res = tx.execute( "DELETE FROM attachment_instances WHERE is_available = 0 AND created_at < ?", - &[&cut_off as &dyn ToSql], + params![cut_off], ); res.map_err(db_error::SqliteError)?; tx.commit().map_err(db_error::SqliteError)?; @@ -628,7 +627,7 @@ impl AtlasDB { &mut self, ) -> Result, db_error> { let qry = "SELECT * FROM attachment_instances WHERE is_available = 0 AND status = ?"; - let rows = query_rows(&self.conn, qry, &[&AttachmentInstanceStatus::Checked])?; + let rows = query_rows(&self.conn, qry, params![AttachmentInstanceStatus::Checked])?; Ok(rows) } @@ -638,7 +637,7 @@ impl AtlasDB { ) -> Result, db_error> { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT * FROM attachment_instances WHERE content_hash = ?1 AND status = ?2"; - let args = rusqlite::params![&hex_content_hash, &AttachmentInstanceStatus::Checked]; + let args = params![hex_content_hash, AttachmentInstanceStatus::Checked]; let rows = query_rows(&self.conn, qry, args)?; Ok(rows) } @@ -647,7 +646,7 @@ impl AtlasDB { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 1" .to_string(); - let args = [&hex_content_hash as &dyn ToSql]; + let args = params![hex_content_hash]; let row = query_row::(&self.conn, &qry, &args)?; Ok(row) } @@ -681,7 +680,7 @@ impl AtlasDB { query_rows( &self.conn, "SELECT * FROM attachment_instances WHERE status = ?1 LIMIT ?2", - rusqlite::params![&AttachmentInstanceStatus::Queued, MAX_PROCESS_PER_ROUND], + params![AttachmentInstanceStatus::Queued, MAX_PROCESS_PER_ROUND], ) } @@ -694,12 +693,12 @@ impl AtlasDB { self.conn.execute( "UPDATE attachment_instances SET status = ?1, is_available = ?2 WHERE index_block_hash = ?3 AND contract_id = ?4 AND attachment_index = ?5", - rusqlite::params![ - &AttachmentInstanceStatus::Checked, - &is_available, - &attachment.index_block_hash, - &attachment.contract_id.to_string(), - &attachment.attachment_index, + params![ + AttachmentInstanceStatus::Checked, + is_available, + attachment.index_block_hash, + attachment.contract_id.to_string(), + attachment.attachment_index, ], )?; Ok(()) @@ -720,17 +719,17 @@ impl AtlasDB { attachment_index, block_height, is_available, metadata, contract_id, tx_id, status) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)", - rusqlite::params![ - &attachment.content_hash, - &now, - &attachment.index_block_hash, - &attachment.attachment_index, - &u64_to_sql(attachment.stacks_block_height)?, - &is_available, - &attachment.metadata, - &attachment.contract_id.to_string(), - &attachment.tx_id, - &status + params![ + attachment.content_hash, + now, + attachment.index_block_hash, + attachment.attachment_index, + u64_to_sql(attachment.stacks_block_height)?, + is_available, + attachment.metadata, + attachment.contract_id.to_string(), + attachment.tx_id, + status ], )?; sql_tx.commit()?; diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 2ebcb71316..8094c77799 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -18,6 +18,7 @@ use std::collections::{BinaryHeap, HashMap, HashSet}; use std::{thread, time}; use clarity::vm::types::QualifiedContractIdentifier; +use rusqlite::params; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util::hash::Hash160; @@ -832,16 +833,16 @@ fn schema_2_migration() { attachment_index, block_height, is_available, metadata, contract_id, tx_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", - rusqlite::params![ - &attachment.content_hash, - &0, - &attachment.index_block_hash, - &attachment.attachment_index, - &u64_to_sql(attachment.stacks_block_height).unwrap(), - &true, - &attachment.metadata, - &attachment.contract_id.to_string(), - &attachment.tx_id, + params![ + attachment.content_hash, + 0, + attachment.index_block_hash, + attachment.attachment_index, + u64_to_sql(attachment.stacks_block_height).unwrap(), + true, + attachment.metadata, + attachment.contract_id.to_string(), + attachment.tx_id, ], ) .unwrap(); diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 9ef77e169a..4ba6f243af 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -23,8 +23,9 @@ use clarity::vm::types::{ use rand::seq::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util; use stacks_common::util::hash::{ bin_bytes, hex_bytes, to_bin, to_hex, Hash160, Sha256Sum, Sha512Trunc256Sum, @@ -507,11 +508,9 @@ impl PeerDB { fn get_schema_version(conn: &Connection) -> Result { let version = conn - .query_row( - "SELECT MAX(version) from db_config", - rusqlite::NO_PARAMS, - |row| row.get(0), - ) + .query_row("SELECT MAX(version) from db_config", NO_PARAMS, |row| { + row.get(0) + }) .optional()? .unwrap_or("1".to_string()); Ok(version) @@ -819,7 +818,7 @@ impl PeerDB { ) -> Result<(), db_error> { tx.execute( "UPDATE local_peer SET addrbytes = ?1, port = ?2", - &[&to_bin(addrbytes.as_bytes().as_ref()), &port as &dyn ToSql], + params![to_bin(addrbytes.as_bytes()), port], // TODO: double check if delete as_ref here ) .map_err(db_error::SqliteError)?; @@ -830,7 +829,7 @@ impl PeerDB { pub fn set_local_services(tx: &Transaction, services: u16) -> Result<(), db_error> { tx.execute( "UPDATE local_peer SET services = ?1", - &[&services as &dyn ToSql], + params![services], ) .map_err(db_error::SqliteError)?; @@ -916,12 +915,12 @@ impl PeerDB { peer_port: u16, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args = [ - &network_id as &dyn ToSql, - &peer_addr.to_bin() as &dyn ToSql, - &peer_port as &dyn ToSql, + let args = params![ + network_id, + peer_addr.to_bin(), + peer_port, ]; - query_row::(conn, qry, &args) + query_row::(conn, qry, args) } pub fn has_peer( @@ -931,7 +930,7 @@ impl PeerDB { peer_port: u16, ) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args: &[&dyn ToSql] = &[&network_id, &peer_addr.to_bin(), &peer_port]; + let args: &[&dyn ToSql] = params![network_id, peer_addr.to_bin(), peer_port]; Ok(query_row::(conn, &qry, args)? .map(|x| x == 1) .unwrap_or(false)) @@ -945,8 +944,8 @@ impl PeerDB { peer_port: u16, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND port = ?2"; - let args = [&network_id as &dyn ToSql, &peer_port as &dyn ToSql]; - query_row::(conn, &qry, &args) + let args = params![network_id, peer_port]; + query_row::(conn, &qry, args) } /// Get a peer record at a particular slot @@ -956,15 +955,15 @@ impl PeerDB { slot: u32, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND slot = ?2"; - let args = [&network_id as &dyn ToSql, &slot as &dyn ToSql]; - query_row::(conn, &qry, &args) + let args = params![network_id, slot]; + query_row::(conn, &qry, args) } /// Is there any peer at a particular slot? pub fn has_peer_at(conn: &DBConn, network_id: u32, slot: u32) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND slot = ?2"; - let args = [&network_id as &dyn ToSql, &slot as &dyn ToSql]; - Ok(query_row::(conn, &qry, &args)? + let args = params![network_id, slot]; + Ok(query_row::(conn, &qry, args)? .map(|x| x == 1) .unwrap_or(false)) } @@ -1108,10 +1107,10 @@ impl PeerDB { let slot_opt = Self::find_peer_slot(tx, network_id, peer_addr, peer_port)?; tx.execute( "DELETE FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", - &[ - &network_id as &dyn ToSql, - &peer_addr.to_bin() as &dyn ToSql, - &peer_port as &dyn ToSql, + params![ + network_id, + peer_addr.to_bin(), + peer_port, ], ) .map_err(db_error::SqliteError)?; @@ -1132,7 +1131,7 @@ impl PeerDB { let res: Option = query_row( conn, "SELECT initial FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", - &[&network_id as &dyn ToSql, &peer_addr.to_bin(), &peer_port], + params![network_id, peer_addr.to_bin(), peer_port], )?; match res { @@ -1149,7 +1148,7 @@ impl PeerDB { peer_port: u16, ) -> Result<(), db_error> { tx.execute("UPDATE frontier SET initial = 1 WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", - &[&network_id as &dyn ToSql, &peer_addr.to_bin(), &peer_port]) + params![network_id, peer_addr.to_bin(), peer_port]) .map_err(db_error::SqliteError)?; Ok(()) @@ -1173,7 +1172,7 @@ impl PeerDB { allow_deadline: i64, ) -> Result<(), db_error> { let num_updated = tx.execute("UPDATE frontier SET allowed = ?1 WHERE network_id = ?2 AND addrbytes = ?3 AND port = ?4", - &[&allow_deadline as &dyn ToSql, &network_id, &peer_addr.to_bin(), &peer_port]) + params![allow_deadline, network_id, peer_addr.to_bin(), peer_port]) .map_err(db_error::SqliteError)?; if num_updated == 0 { @@ -1723,11 +1722,11 @@ impl PeerDB { fn asn4_insert(tx: &Transaction, asn4: &ASEntry4) -> Result<(), db_error> { tx.execute( "INSERT OR REPLACE INTO asn4 (prefix, mask, asn, org) VALUES (?1, ?2, ?3, ?4)", - &[ - &asn4.prefix as &dyn ToSql, - &asn4.mask as &dyn ToSql, - &asn4.asn as &dyn ToSql, - &asn4.org as &dyn ToSql, + params![ + asn4.prefix, + asn4.mask, + asn4.asn, + asn4.org, ], ) .map_err(db_error::SqliteError)?; @@ -1747,8 +1746,8 @@ impl PeerDB { let addr_u32 = addrbits.ipv4_bits().unwrap(); let qry = "SELECT * FROM asn4 WHERE prefix = (?1 & ~((1 << (32 - mask)) - 1)) ORDER BY prefix DESC LIMIT 1"; - let args = [&addr_u32 as &dyn ToSql]; - let rows = query_rows::(conn, &qry, &args)?; + let args = params![addr_u32]; + let rows = query_rows::(conn, &qry, args)?; match rows.len() { 0 => Ok(None), _ => Ok(Some(rows[0].asn)), @@ -1770,8 +1769,8 @@ impl PeerDB { #[cfg_attr(test, mutants::skip)] pub fn asn_count(conn: &DBConn, asn: u32) -> Result { let qry = "SELECT COUNT(*) FROM frontier WHERE asn = ?1"; - let args = [&asn as &dyn ToSql]; - let count = query_count(conn, &qry, &args)?; + let args = params![asn]; + let count = query_count(conn, &qry, args)?; Ok(count as u64) } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 94c061dc5d..c4749e03e1 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1699,13 +1699,13 @@ pub mod test { use std::{fs, io, thread}; use clarity::boot_util::boot_code_id; + use clarity::types::sqlite::NO_PARAMS; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::STXBalance; use clarity::vm::types::*; use clarity::vm::ClarityVersion; use rand::{Rng, RngCore}; - use rusqlite::NO_PARAMS; use stacks_common::address::*; use stacks_common::codec::StacksMessageCodec; use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 2d6651cdda..6f0e1db01b 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -37,12 +37,13 @@ use clarity::vm::{ClarityName, ClarityVersion, ContractName, SymbolicExpression, use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use rand::prelude::*; use rand::thread_rng; -use rusqlite::{DatabaseName, NO_PARAMS}; +use rusqlite::DatabaseName; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::chunked_encoding::*; use stacks_common::util::get_epoch_time_secs; diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 6cdebb69d9..43fb0ddd11 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -22,8 +22,9 @@ use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; use libstackerdb::{SlotMetadata, STACKERDB_MAX_CHUNK_SIZE}; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction, NO_PARAMS}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; @@ -258,15 +259,15 @@ impl<'a> StackerDBTx<'a> { for (principal, slot_count) in slots.iter() { test_debug!("Create StackerDB slots: ({}, {})", &principal, slot_count); for _ in 0..*slot_count { - let args: &[&dyn ToSql] = &[ - &stackerdb_id, - &principal.to_string(), - &slot_id, - &NO_VERSION, - &0, - &vec![], - &Sha512Trunc256Sum([0u8; 32]), - &MessageSignature::empty(), + let args: &[&dyn ToSql] = params![ + stackerdb_id, + principal.to_string(), + slot_id, + NO_VERSION, + 0, + vec![], + Sha512Trunc256Sum([0u8; 32]), + MessageSignature::empty(), ]; stmt.execute(args)?; @@ -326,15 +327,15 @@ impl<'a> StackerDBTx<'a> { // new slot, or existing slot with a different signer let qry = "INSERT OR REPLACE INTO chunks (stackerdb_id,signer,slot_id,version,write_time,data,data_hash,signature) VALUES (?1,?2,?3,?4,?5,?6,?7,?8)"; let mut stmt = self.sql_tx.prepare(&qry)?; - let args: &[&dyn ToSql] = &[ - &stackerdb_id, - &principal.to_string(), - &slot_id, - &NO_VERSION, - &0, - &vec![], - &Sha512Trunc256Sum([0u8; 32]), - &MessageSignature::empty(), + let args: &[&dyn ToSql] = params![ + stackerdb_id, + principal.to_string(), + slot_id, + NO_VERSION, + 0, + vec![], + Sha512Trunc256Sum([0u8; 32]), + MessageSignature::empty(), ]; stmt.execute(args)?; diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index d34fca233a..75652c8f50 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -22,9 +22,10 @@ use std::io::{Read, Seek, SeekFrom, Write}; use rand::prelude::*; use rand::thread_rng; use rusqlite::blob::Blob; -use rusqlite::{Error as sqlite_error, Row, ToSql, NO_PARAMS}; +use rusqlite::{Error as sqlite_error, Row, ToSql}; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{read_next, write_next, Error as codec_error, StacksMessageCodec}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; use crate::util_lib::db::{query_expect_row, DBConn, DBTx, Error as db_error}; @@ -381,7 +382,7 @@ impl BloomCounter { let sql = format!("SELECT rowid,* FROM {}", table_name); let result = conn.query_row_and_then(&sql, NO_PARAMS, |row| { let mut hasher_blob = row - .get_raw("hasher") + .get_ref("hasher")? .as_blob() .expect("Unable to read hasher as blob"); let hasher = diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 22509a8ac4..cc6fb9a6df 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -25,11 +25,11 @@ use clarity::vm::types::QualifiedContractIdentifier; use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ - Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Row, Transaction, - TransactionBehavior, NO_PARAMS, + params, Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Params, Row, Transaction, TransactionBehavior }; use serde_json::Error as serde_error; use stacks_common::types::chainstate::{SortitionId, StacksAddress, StacksBlockId, TrieHash}; +use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::Address; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; @@ -397,8 +397,7 @@ fn log_sql_eqp(_conn: &Connection, _sql_query: &str) {} /// boilerplate code for querying rows pub fn query_rows(conn: &Connection, sql_query: &str, sql_args: P) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromRow, { log_sql_eqp(conn, sql_query); @@ -412,8 +411,7 @@ where /// if more than 1 row is returned, excess rows are ignored. pub fn query_row(conn: &Connection, sql_query: &str, sql_args: P) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromRow, { log_sql_eqp(conn, sql_query); @@ -433,8 +431,7 @@ pub fn query_expect_row( sql_args: P, ) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromRow, { log_sql_eqp(conn, sql_query); @@ -459,8 +456,7 @@ pub fn query_row_panic( panic_message: F, ) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromRow, F: FnOnce() -> String, { @@ -485,8 +481,7 @@ pub fn query_row_columns( column_name: &str, ) -> Result, Error> where - P: IntoIterator, - P::Item: ToSql, + P: Params, T: FromColumn, { log_sql_eqp(conn, sql_query); @@ -506,8 +501,7 @@ where /// Boilerplate for querying a single integer (first and only item of the query must be an int) pub fn query_int

(conn: &Connection, sql_query: &str, sql_args: P) -> Result where - P: IntoIterator, - P::Item: ToSql, + P: Params, { log_sql_eqp(conn, sql_query); let mut stmt = conn.prepare(sql_query)?; @@ -530,8 +524,7 @@ where pub fn query_count

(conn: &Connection, sql_query: &str, sql_args: P) -> Result where - P: IntoIterator, - P::Item: ToSql, + P: Params, { query_int(conn, sql_query, sql_args) } @@ -790,7 +783,7 @@ fn load_indexed(conn: &DBConn, marf_value: &MARFValue) -> Result, .prepare("SELECT value FROM __fork_storage WHERE value_hash = ?1 LIMIT 2") .map_err(Error::SqliteError)?; let mut rows = stmt - .query(&[&marf_value.to_hex() as &dyn ToSql]) + .query(params![marf_value.to_hex()]) .map_err(Error::SqliteError)?; let mut value = None; diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 42f4f858b7..a9e5db6c7f 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -50,7 +50,7 @@ wsts = {workspace = true} mutants = "0.0.3" [dependencies.rusqlite] -version = "=0.24.2" +version = "0.31.0" features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] [[bin]] diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index fb1535685a..d23ed3d593 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -8765,7 +8765,7 @@ fn atlas_stress_integration_test() { let mut hashes = query_row_columns::( &atlasdb.conn, "SELECT content_hash FROM attachment_instances WHERE index_block_hash = ?1 AND attachment_index = ?2", - &[ibh as &dyn ToSql, &u64_to_sql(*index).unwrap() as &dyn ToSql], + params![ibh, u64_to_sql(*index).unwrap()], "content_hash") .unwrap(); if hashes.len() > 0 { From 3670614996d92bc80487194bf58e9523518e20b7 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 4 Jul 2024 17:45:44 +0300 Subject: [PATCH 0473/1400] use params! macro everywhere & other rusqlite standardisations --- clarity/src/vm/database/sqlite.rs | 34 +- stacks-common/src/bitvec.rs | 4 +- stacks-common/src/types/sqlite.rs | 2 +- stackslib/src/burnchains/bitcoin/spv.rs | 24 +- stackslib/src/burnchains/db.rs | 101 ++--- stackslib/src/burnchains/tests/db.rs | 4 +- stackslib/src/chainstate/burn/db/sortdb.rs | 349 +++++++++--------- stackslib/src/chainstate/nakamoto/mod.rs | 64 ++-- .../src/chainstate/nakamoto/signer_set.rs | 4 +- .../src/chainstate/nakamoto/staging_blocks.rs | 16 +- stackslib/src/chainstate/nakamoto/tenure.rs | 36 +- .../src/chainstate/nakamoto/tests/mod.rs | 5 +- .../src/chainstate/stacks/db/accounts.rs | 70 ++-- stackslib/src/chainstate/stacks/db/blocks.rs | 118 +++--- stackslib/src/chainstate/stacks/db/headers.rs | 54 +-- stackslib/src/chainstate/stacks/db/mod.rs | 8 +- .../src/chainstate/stacks/index/trie_sql.rs | 50 +-- stackslib/src/chainstate/stacks/mod.rs | 4 +- stackslib/src/clarity_vm/database/mod.rs | 9 +- stackslib/src/core/mempool.rs | 106 +++--- stackslib/src/cost_estimates/fee_medians.rs | 7 +- stackslib/src/cost_estimates/fee_scalar.rs | 7 +- stackslib/src/cost_estimates/pessimistic.rs | 50 ++- stackslib/src/main.rs | 2 +- stackslib/src/net/atlas/db.rs | 18 +- stackslib/src/net/db.rs | 194 +++++----- stackslib/src/net/mod.rs | 3 +- stackslib/src/net/stackerdb/db.rs | 44 +-- stackslib/src/util_lib/bloom.rs | 5 +- stackslib/src/util_lib/db.rs | 3 +- .../src/tests/neon_integrations.rs | 2 +- 31 files changed, 674 insertions(+), 723 deletions(-) diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 81dd2dfb22..361bf337a2 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -16,7 +16,7 @@ use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use rusqlite::{ - Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, + params, Connection, Error as SqliteError, ErrorCode as SqliteErrorCode, OptionalExtension, Row, Savepoint, }; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; @@ -44,11 +44,8 @@ pub struct SqliteConnection { } fn sqlite_put(conn: &Connection, key: &str, value: &str) -> Result<()> { - let params: [&dyn ToSql; 2] = [&key, &value]; - match conn.execute( - "REPLACE INTO data_table (key, value) VALUES (?, ?)", - ¶ms, - ) { + let params: &[&dyn ToSql] = params![key, value]; + match conn.execute("REPLACE INTO data_table (key, value) VALUES (?, ?)", params) { Ok(_) => Ok(()), Err(e) => { error!("Failed to insert/replace ({},{}): {:?}", key, value, &e); @@ -59,11 +56,11 @@ fn sqlite_put(conn: &Connection, key: &str, value: &str) -> Result<()> { fn sqlite_get(conn: &Connection, key: &str) -> Result> { trace!("sqlite_get {}", key); - let params: [&dyn ToSql; 1] = [&key]; + let params: &[&dyn ToSql] = params![key]; let res = match conn .query_row( "SELECT value FROM data_table WHERE key = ?", - ¶ms, + params, |row| row.get(0), ) .optional() @@ -156,11 +153,11 @@ impl SqliteConnection { value: &str, ) -> Result<()> { let key = format!("clr-meta::{}::{}", contract_hash, key); - let params: [&dyn ToSql; 3] = [&bhh, &key, &value]; + let params: &[&dyn ToSql] = params![bhh, key, value]; if let Err(e) = conn.execute( "INSERT INTO metadata_table (blockhash, key, value) VALUES (?, ?, ?)", - ¶ms, + params, ) { error!( "Failed to insert ({},{},{}): {:?}", @@ -179,10 +176,10 @@ impl SqliteConnection { from: &StacksBlockId, to: &StacksBlockId, ) -> Result<()> { - let params = [to, from]; + let params: &[&dyn ToSql] = params![to, from]; if let Err(e) = conn.execute( "UPDATE metadata_table SET blockhash = ? WHERE blockhash = ?", - ¶ms, + params, ) { error!("Failed to update {} to {}: {:?}", &from, &to, &e); return Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()); @@ -191,7 +188,10 @@ impl SqliteConnection { } pub fn drop_metadata(conn: &Connection, from: &StacksBlockId) -> Result<()> { - if let Err(e) = conn.execute("DELETE FROM metadata_table WHERE blockhash = ?", &[from]) { + if let Err(e) = conn.execute( + "DELETE FROM metadata_table WHERE blockhash = ?", + params![from], + ) { error!("Failed to drop metadata from {}: {:?}", &from, &e); return Err(InterpreterError::DBError(SQL_FAIL_MESSAGE.into()).into()); } @@ -205,12 +205,12 @@ impl SqliteConnection { key: &str, ) -> Result> { let key = format!("clr-meta::{}::{}", contract_hash, key); - let params: [&dyn ToSql; 2] = [&bhh, &key]; + let params: &[&dyn ToSql] = params![bhh, key]; match conn .query_row( "SELECT value FROM metadata_table WHERE blockhash = ? AND key = ?", - ¶ms, + params, |row| row.get(0), ) .optional() @@ -265,10 +265,10 @@ impl SqliteConnection { pub fn check_schema(conn: &Connection) -> Result<()> { let sql = "SELECT sql FROM sqlite_master WHERE name=?"; let _: String = conn - .query_row(sql, &["data_table"], |row| row.get(0)) + .query_row(sql, params!["data_table"], |row| row.get(0)) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; let _: String = conn - .query_row(sql, &["metadata_table"], |row| row.get(0)) + .query_row(sql, params!["metadata_table"], |row| row.get(0)) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; Ok(()) } diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index cd7209a801..792532e135 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -15,9 +15,7 @@ // along with this program. If not, see . #[cfg(feature = "canonical")] -use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}; -#[cfg(feature = "canonical")] -use rusqlite::ToSql; +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; use crate::codec::{ diff --git a/stacks-common/src/types/sqlite.rs b/stacks-common/src/types/sqlite.rs index 646c4f70d4..183ec61fbc 100644 --- a/stacks-common/src/types/sqlite.rs +++ b/stacks-common/src/types/sqlite.rs @@ -25,7 +25,7 @@ use crate::util::hash::{Hash160, Sha512Trunc256Sum}; use crate::util::secp256k1::MessageSignature; use crate::util::vrf::VRFProof; -pub const NO_PARAMS: &[&dyn rusqlite::ToSql] = &[]; +pub const NO_PARAMS: &[&dyn ToSql] = &[]; impl FromSql for Sha256dHash { fn column_result(value: ValueRef) -> FromSqlResult { diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index f11b2a5884..71c167332f 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -20,7 +20,7 @@ use std::ops::Deref; use std::{cmp, fs}; use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use stacks_common::deps_common::bitcoin::blockdata::block::{BlockHeader, LoneBlockHeader}; use stacks_common::deps_common::bitcoin::blockdata::constants::genesis_block; use stacks_common::deps_common::bitcoin::network::constants::Network; @@ -424,7 +424,7 @@ impl SpvClient { } let tx = self.tx_begin()?; - let args: &[&dyn ToSql] = &[&u64_to_sql(interval)?, &work.to_hex_be()]; + let args: &[&dyn ToSql] = params![u64_to_sql(interval)?, work.to_hex_be()]; tx.execute( "INSERT OR REPLACE INTO chain_work (interval,work) VALUES (?1,?2)", args, @@ -707,7 +707,7 @@ impl SpvClient { let mut headers = vec![]; let sql_query = "SELECT * FROM headers WHERE height >= ?1 AND height < ?2 ORDER BY height"; - let sql_args: &[&dyn ToSql] = &[&u64_to_sql(start_block)?, &u64_to_sql(end_block)?]; + let sql_args: &[&dyn ToSql] = params![u64_to_sql(start_block)?, u64_to_sql(end_block)?]; let mut stmt = self .headers_db @@ -749,15 +749,15 @@ impl SpvClient { let sql = "INSERT OR REPLACE INTO headers (version, prev_blockhash, merkle_root, time, bits, nonce, height, hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"; - let args: &[&dyn ToSql] = &[ - &header.version, - &header.prev_blockhash, - &header.merkle_root, - &header.time, - &header.bits, - &header.nonce, - &u64_to_sql(height)?, - &BurnchainHeaderHash::from_bitcoin_hash(&header.bitcoin_hash()), + let args: &[&dyn ToSql] = params![ + header.version, + header.prev_blockhash, + header.merkle_root, + header.time, + header.bits, + header.nonce, + u64_to_sql(height)?, + BurnchainHeaderHash::from_bitcoin_hash(&header.bitcoin_hash()), ]; tx.execute(sql, args) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 2e67d7f0ed..10d928fcf8 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -19,7 +19,7 @@ use std::path::Path; use std::{cmp, fmt, fs, io}; use rusqlite::types::ToSql; -use rusqlite::{Connection, OpenFlags, OptionalExtension, Row, Transaction}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension, Row, Transaction}; use serde_json; use stacks_common::types::chainstate::BurnchainHeaderHash; use stacks_common::types::sqlite::NO_PARAMS; @@ -322,12 +322,12 @@ impl<'a> BurnchainDBTransaction<'a> { let sql = "INSERT OR IGNORE INTO burnchain_db_block_headers (block_height, block_hash, parent_block_hash, num_txs, timestamp) VALUES (?, ?, ?, ?, ?)"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(header.block_height)?, - &header.block_hash, - &header.parent_block_hash, - &u64_to_sql(header.num_txs)?, - &u64_to_sql(header.timestamp)?, + let args: &[&dyn ToSql] = params![ + u64_to_sql(header.block_height)?, + header.block_hash, + header.parent_block_hash, + u64_to_sql(header.num_txs)?, + u64_to_sql(header.timestamp)?, ]; let affected_rows = self.sql_tx.execute(sql, args)?; if affected_rows == 0 { @@ -347,7 +347,7 @@ impl<'a> BurnchainDBTransaction<'a> { ) -> Result { let weight = affirmation_map.weight(); let sql = "INSERT INTO affirmation_maps (affirmation_map,weight) VALUES (?1,?2)"; - let args: &[&dyn ToSql] = &[&affirmation_map.encode(), &u64_to_sql(weight)?]; + let args: &[&dyn ToSql] = params![affirmation_map.encode(), u64_to_sql(weight)?]; match self.sql_tx.execute(sql, args) { Ok(_) => { let am_id = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &affirmation_map)? @@ -368,11 +368,11 @@ impl<'a> BurnchainDBTransaction<'a> { affirmation_id: u64, ) -> Result<(), DBError> { let sql = "UPDATE block_commit_metadata SET affirmation_id = ?1, anchor_block_descendant = ?2 WHERE burn_block_hash = ?3 AND txid = ?4"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(affirmation_id)?, - &opt_u64_to_sql(anchor_block_descendant)?, - &block_commit.burn_header_hash, - &block_commit.txid, + let args: &[&dyn ToSql] = params![ + u64_to_sql(affirmation_id)?, + opt_u64_to_sql(anchor_block_descendant)?, + block_commit.burn_header_hash, + block_commit.txid, ]; match self.sql_tx.execute(sql, args) { Ok(_) => { @@ -391,16 +391,16 @@ impl<'a> BurnchainDBTransaction<'a> { target_reward_cycle: u64, ) -> Result<(), DBError> { let sql = "INSERT OR REPLACE INTO anchor_blocks (reward_cycle) VALUES (?1)"; - let args: &[&dyn ToSql] = &[&u64_to_sql(target_reward_cycle)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(target_reward_cycle)?]; self.sql_tx .execute(sql, args) .map_err(|e| DBError::SqliteError(e))?; let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE burn_block_hash = ?2 AND txid = ?3"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(target_reward_cycle)?, - &block_commit.burn_header_hash, - &block_commit.txid, + let args: &[&dyn ToSql] = params![ + u64_to_sql(target_reward_cycle)?, + block_commit.burn_header_hash, + block_commit.txid, ]; match self.sql_tx.execute(sql, args) { Ok(_) => { @@ -421,7 +421,7 @@ impl<'a> BurnchainDBTransaction<'a> { /// Unmark all block-commit(s) that were anchor block(s) for this reward cycle. pub fn clear_anchor_block(&self, reward_cycle: u64) -> Result<(), DBError> { let sql = "UPDATE block_commit_metadata SET anchor_block = NULL WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; self.sql_tx .execute(sql, args) .map(|_| ()) @@ -878,14 +878,14 @@ impl<'a> BurnchainDBTransaction<'a> { (burn_block_hash, txid, block_height, vtxindex, anchor_block, anchor_block_descendant, affirmation_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"; let mut stmt = self.sql_tx.prepare(commit_metadata_sql)?; - let args: &[&dyn ToSql] = &[ - &bcm.burn_block_hash, - &bcm.txid, - &u64_to_sql(bcm.block_height)?, - &bcm.vtxindex, - &opt_u64_to_sql(bcm.anchor_block)?, - &opt_u64_to_sql(bcm.anchor_block_descendant)?, - &u64_to_sql(bcm.affirmation_id)?, + let args: &[&dyn ToSql] = params![ + bcm.burn_block_hash, + bcm.txid, + u64_to_sql(bcm.block_height)?, + bcm.vtxindex, + opt_u64_to_sql(bcm.anchor_block)?, + opt_u64_to_sql(bcm.anchor_block_descendant)?, + u64_to_sql(bcm.affirmation_id)?, ]; stmt.execute(args)?; Ok(()) @@ -904,7 +904,8 @@ impl<'a> BurnchainDBTransaction<'a> { for op in block_ops.iter() { let serialized_op = serde_json::to_string(op).expect("Failed to serialize parsed BlockstackOp"); - let args: &[&dyn ToSql] = &[&block_header.block_hash, op.txid_ref(), &serialized_op]; + let args: &[&dyn ToSql] = + params![block_header.block_hash, op.txid_ref(), serialized_op]; stmt.execute(args)?; } @@ -959,7 +960,7 @@ impl<'a> BurnchainDBTransaction<'a> { assert_eq!((affirmation_map.len() as u64) + 1, reward_cycle); let qry = "INSERT OR REPLACE INTO overrides (reward_cycle, affirmation_map) VALUES (?1, ?2)"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?, &affirmation_map.encode()]; + let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?, affirmation_map.encode()]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; @@ -968,7 +969,7 @@ impl<'a> BurnchainDBTransaction<'a> { pub fn clear_override_affirmation_map(&self, reward_cycle: u64) -> Result<(), DBError> { let qry = "DELETE FROM overrides WHERE reward_cycle = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; @@ -981,7 +982,7 @@ impl BurnchainDB { let exists: i64 = query_row( self.conn(), "SELECT 1 FROM sqlite_master WHERE type = 'index' AND name = ?1", - &[LAST_BURNCHAIN_DB_INDEX], + params![LAST_BURNCHAIN_DB_INDEX], )? .unwrap_or(0); if exists == 0 { @@ -1039,7 +1040,7 @@ impl BurnchainDB { db_tx.sql_tx.execute_batch(BURNCHAIN_DB_SCHEMA)?; db_tx.sql_tx.execute( "INSERT INTO db_config (version) VALUES (?1)", - &[&BURNCHAIN_DB_VERSION], + params![&BURNCHAIN_DB_VERSION], )?; let first_block_header = BurnchainBlockHeader { @@ -1121,7 +1122,7 @@ impl BurnchainDB { height: u64, ) -> Result { let qry = "SELECT 1 FROM burnchain_db_block_headers WHERE block_height = ?1"; - let args = &[&u64_to_sql(height)?]; + let args = params![u64_to_sql(height)?]; let res: Option = query_row(conn, qry, args)?; Ok(res.is_some()) } @@ -1135,7 +1136,7 @@ impl BurnchainDB { return Ok(None); }; let qry = "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ?1"; - let args = &[&hdr.block_hash]; + let args = params![hdr.block_hash]; let res: Option = query_row(conn, qry, args)?; Ok(res) } @@ -1148,9 +1149,9 @@ impl BurnchainDB { "SELECT * FROM burnchain_db_block_headers WHERE block_hash = ? LIMIT 1"; let block_ops_qry = "SELECT DISTINCT * FROM burnchain_db_block_ops WHERE block_hash = ?"; - let block_header = query_row(conn, block_header_qry, &[block])? + let block_header = query_row(conn, block_header_qry, params![block])? .ok_or_else(|| BurnchainError::UnknownBlock(block.clone()))?; - let block_ops = query_rows(conn, block_ops_qry, &[block])?; + let block_ops = query_rows(conn, block_ops_qry, params![block])?; Ok(BurnchainBlockData { header: block_header, @@ -1165,7 +1166,7 @@ impl BurnchainDB { ) -> Option { let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = &[txid, burn_header_hash]; + let args: &[&dyn ToSql] = params![txid, burn_header_hash]; match query_row(conn, qry, args) { Ok(res) => res, @@ -1184,7 +1185,7 @@ impl BurnchainDB { txid: &Txid, ) -> Option { let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1"; - let args: &[&dyn ToSql] = &[txid]; + let args: &[&dyn ToSql] = params![txid]; let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); @@ -1255,7 +1256,7 @@ impl BurnchainDB { affirmation_id: u64, ) -> Result, DBError> { let sql = "SELECT affirmation_map FROM affirmation_maps WHERE affirmation_id = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(affirmation_id)?]; + let args: &[&dyn ToSql] = params![&u64_to_sql(affirmation_id)?]; query_row(conn, sql, args) } @@ -1264,7 +1265,7 @@ impl BurnchainDB { affirmation_id: u64, ) -> Result, DBError> { let sql = "SELECT weight FROM affirmation_maps WHERE affirmation_id = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(affirmation_id)?]; + let args: &[&dyn ToSql] = params![&u64_to_sql(affirmation_id)?]; query_row(conn, sql, args) } @@ -1273,7 +1274,7 @@ impl BurnchainDB { affirmation_map: &AffirmationMap, ) -> Result, DBError> { let sql = "SELECT affirmation_id FROM affirmation_maps WHERE affirmation_map = ?1"; - let args: &[&dyn ToSql] = &[&affirmation_map.encode()]; + let args: &[&dyn ToSql] = params![&affirmation_map.encode()]; query_row(conn, sql, args) } @@ -1283,7 +1284,7 @@ impl BurnchainDB { txid: &Txid, ) -> Result, DBError> { let sql = "SELECT affirmation_id FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2"; - let args: &[&dyn ToSql] = &[burn_header_hash, txid]; + let args: &[&dyn ToSql] = params![burn_header_hash, txid]; query_row(conn, sql, args) } @@ -1304,13 +1305,13 @@ impl BurnchainDB { txid: &Txid, ) -> Result { let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block IS NOT NULL AND burn_block_hash = ?1 AND txid = ?2"; - let args: &[&dyn ToSql] = &[burn_header_hash, txid]; + let args: &[&dyn ToSql] = params![burn_header_hash, txid]; query_row(conn, sql, args)?.ok_or(DBError::NotFoundError) } pub fn has_anchor_block(conn: &DBConn, reward_cycle: u64) -> Result { let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; Ok(query_row::(conn, sql, args)?.is_some()) } @@ -1319,7 +1320,7 @@ impl BurnchainDB { reward_cycle: u64, ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; let metadatas: Vec = query_rows(conn, sql, args)?; Ok(metadatas) @@ -1331,7 +1332,7 @@ impl BurnchainDB { reward_cycle: u64, ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; let metadatas: Vec = query_rows(conn, sql, args)?; for metadata in metadatas { @@ -1372,7 +1373,7 @@ impl BurnchainDB { ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1 AND burn_block_hash = ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?, anchor_block_burn_header_hash]; + let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?, anchor_block_burn_header_hash]; if let Some(commit_metadata) = query_row::(conn, sql, args)? { let commit = BurnchainDB::get_block_commit( conn, @@ -1450,7 +1451,7 @@ impl BurnchainDB { vtxindex: u16, ) -> Result, DBError> { let qry = "SELECT txid FROM block_commit_metadata WHERE block_height = ?1 AND vtxindex = ?2 AND burn_block_hash = ?3"; - let args: &[&dyn ToSql] = &[&block_ptr, &vtxindex, &header_hash]; + let args: &[&dyn ToSql] = params![block_ptr, vtxindex, header_hash]; let txid = match query_row(&conn, qry, args) { Ok(Some(txid)) => txid, Ok(None) => { @@ -1496,7 +1497,7 @@ impl BurnchainDB { burn_block_hash: &BurnchainHeaderHash, txid: &Txid, ) -> Result, DBError> { - let args: &[&dyn ToSql] = &[burn_block_hash, txid]; + let args: &[&dyn ToSql] = params![burn_block_hash, txid]; query_row_panic( conn, "SELECT * FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2", @@ -1611,7 +1612,7 @@ impl BurnchainDB { let am_opt: Option = query_row_panic( conn, "SELECT affirmation_map FROM overrides WHERE reward_cycle = ?1", - &[&u64_to_sql(reward_cycle)?], + params![u64_to_sql(reward_cycle)?], || format!("BUG: more than one override affirmation map for the same reward cycle"), )?; if let Some(am) = &am_opt { diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index aa023d938b..1ab35bb2c6 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -54,8 +54,10 @@ impl BurnchainDB { &self, block_hash: &BurnchainHeaderHash, ) -> Result, BurnchainError> { + use rusqlite::params; + let sql = "SELECT op FROM burnchain_db_block_ops WHERE block_hash = ?1"; - let args: &[&dyn ToSql] = &[block_hash]; + let args: &[&dyn ToSql] = params![block_hash]; let mut ops: Vec = query_rows(&self.conn, sql, args)?; ops.sort_by(|a, b| a.vtxindex().cmp(&b.vtxindex())); Ok(ops) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index d620ad35be..0652af304c 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -29,7 +29,8 @@ use rand; use rand::RngCore; use rusqlite::types::ToSql; use rusqlite::{ - params, Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Row, Transaction, TransactionBehavior + params, Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Row, Transaction, + TransactionBehavior, }; use sha2::{Digest, Sha512_256}; use stacks_common::address::AddressHashMode; @@ -860,7 +861,7 @@ pub fn get_block_commit_by_txid( txid: &Txid, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND txid = ?2 LIMIT 1"; - let args: &[&dyn ToSql] = &[sort_id, txid]; + let args: &[&dyn ToSql] = params![sort_id, txid]; query_row(conn, qry, args) } @@ -1205,10 +1206,10 @@ impl<'a> SortitionHandleTx<'a> { }; let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = &[ - &ancestor_snapshot.sortition_id, - &u64_to_sql(key_block_height)?, - &key_vtxindex, + let args: &[&dyn ToSql] = params![ + ancestor_snapshot.sortition_id, + u64_to_sql(key_block_height)?, + key_vtxindex, ]; query_row_panic(self.tx(), qry, args, || { format!( @@ -1674,7 +1675,7 @@ impl<'a> SortitionHandleTx<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition_id]; + let args: &[&dyn ToSql] = params![sortition_id]; let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; let pox_addrs: (Vec, u128) = @@ -1769,11 +1770,11 @@ impl<'a> SortitionHandleTx<'a> { stacks_block_height: u64, ) -> Result<(), db_error> { let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; - let args: &[&dyn ToSql] = &[ + let args: &[&dyn ToSql] = params![ sort_id, consensus_hash, stacks_block_hash, - &u64_to_sql(stacks_block_height)?, + u64_to_sql(stacks_block_height)?, ]; self.execute(sql, args)?; Ok(()) @@ -1819,9 +1820,9 @@ impl<'a> SortitionHandleTx<'a> { // in epoch 2.x, where we track canonical stacks tip via the sortition DB let arrival_index = SortitionDB::get_max_arrival_index(self)?; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(stacks_block_height)?, - &u64_to_sql(arrival_index + 1)?, + let args: &[&dyn ToSql] = params![ + u64_to_sql(stacks_block_height)?, + u64_to_sql(arrival_index + 1)?, consensus_hash, stacks_block_hash, ]; @@ -2539,7 +2540,7 @@ impl<'a> SortitionHandleConn<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition_id]; + let args: &[&dyn ToSql] = params![sortition_id]; let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; let pox_addrs: (Vec, u128) = @@ -2849,12 +2850,12 @@ impl SortitionDB { ) -> Result<(), db_error> { let epochs = StacksEpoch::validate_epochs(epochs); for epoch in epochs.into_iter() { - let args: &[&dyn ToSql] = &[ - &(epoch.epoch_id as u32), - &u64_to_sql(epoch.start_height)?, - &u64_to_sql(epoch.end_height)?, - &epoch.block_limit, - &epoch.network_epoch, + let args: &[&dyn ToSql] = params![ + (epoch.epoch_id as u32), + u64_to_sql(epoch.start_height)?, + u64_to_sql(epoch.end_height)?, + epoch.block_limit, + epoch.network_epoch, ]; db_tx.execute( "INSERT INTO epochs (epoch_id,start_block_height,end_block_height,block_limit,network_epoch) VALUES (?1,?2,?3,?4,?5)", @@ -2923,12 +2924,12 @@ impl SortitionDB { info!("Replace existing epochs with new epochs"); db_tx.execute("DELETE FROM epochs;", NO_PARAMS)?; for epoch in epochs.into_iter() { - let args: &[&dyn ToSql] = &[ - &(epoch.epoch_id as u32), - &u64_to_sql(epoch.start_height)?, - &u64_to_sql(epoch.end_height)?, - &epoch.block_limit, - &epoch.network_epoch, + let args: &[&dyn ToSql] = params![ + (epoch.epoch_id as u32), + u64_to_sql(epoch.start_height)?, + u64_to_sql(epoch.end_height)?, + epoch.block_limit, + epoch.network_epoch, ]; db_tx.execute( "INSERT INTO epochs (epoch_id,start_block_height,end_block_height,block_limit,network_epoch) VALUES (?1,?2,?3,?4,?5)", @@ -2945,8 +2946,8 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE txid = ?1 AND sortition_id = ?2"; - let args: [&dyn ToSql; 2] = [&txid, &sortition_id]; - query_row(conn, qry, &args) + let args: &[&dyn ToSql] = params![txid, sortition_id]; + query_row(conn, qry, args) } /// Get the Sortition ID for the burnchain block containing `txid`'s parent. @@ -2957,7 +2958,7 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let qry = "SELECT parent_sortition_id AS sortition_id FROM block_commit_parents WHERE block_commit_parents.block_commit_txid = ?1 AND block_commit_parents.block_commit_sortition_id = ?2"; - let args: &[&dyn ToSql] = &[txid, sortition_id]; + let args: &[&dyn ToSql] = params![txid, sortition_id]; query_row(conn, qry, args) } @@ -3108,11 +3109,11 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - let typical_rules: &[&dyn ToSql] = &[&(ASTRules::Typical as u8), &0i64]; + let typical_rules: &[&dyn ToSql] = params![(ASTRules::Typical as u8), 0i64]; - let precheck_size_rules: &[&dyn ToSql] = &[ - &(ASTRules::PrecheckSize as u8), - &u64_to_sql(AST_RULES_PRECHECK_SIZE)?, + let precheck_size_rules: &[&dyn ToSql] = params![ + (ASTRules::PrecheckSize as u8), + u64_to_sql(AST_RULES_PRECHECK_SIZE)?, ]; tx.execute( @@ -3208,7 +3209,7 @@ impl SortitionDB { // skip if this step was done if table_exists(&tx, "stacks_chain_tips")? { let sql = "SELECT 1 FROM stacks_chain_tips WHERE sortition_id = ?1"; - let args = params![canonical_tip.sortition_id]; + let args: &[&dyn ToSql] = params![canonical_tip.sortition_id]; if let Ok(Some(_)) = query_row::(&tx, sql, args) { info!("`stacks_chain_tips` appears to have been populated already; skipping this step"); return Ok(()); @@ -3224,11 +3225,11 @@ impl SortitionDB { ); for snapshot in snapshots.into_iter() { let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; - let args: &[&dyn ToSql] = &[ - &snapshot.sortition_id, - &snapshot.canonical_stacks_tip_consensus_hash, - &snapshot.canonical_stacks_tip_hash, - &u64_to_sql(snapshot.canonical_stacks_tip_height)?, + let args: &[&dyn ToSql] = params![ + snapshot.sortition_id, + snapshot.canonical_stacks_tip_consensus_hash, + snapshot.canonical_stacks_tip_hash, + u64_to_sql(snapshot.canonical_stacks_tip_height)?, ]; tx.execute(sql, args)?; } @@ -3452,7 +3453,7 @@ impl SortitionDB { ast_rules: ASTRules, height: u64, ) -> Result<(), db_error> { - let rules: &[&dyn ToSql] = &[&u64_to_sql(height)?, &(ast_rules as u8)]; + let rules: &[&dyn ToSql] = params![u64_to_sql(height)?, (ast_rules as u8)]; tx.execute( "UPDATE ast_rule_heights SET block_height = ?1 WHERE ast_rule_id = ?2", @@ -3505,7 +3506,7 @@ impl SortitionDB { } let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; - let args = params![sortition_id, rc_json]; + let args: &[&dyn ToSql] = params![sortition_id, rc_json]; sort_tx.execute(sql, args)?; Ok(()) } @@ -3588,7 +3589,7 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let sql = "SELECT reward_set FROM preprocessed_reward_sets WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition_id]; + let args: &[&dyn ToSql] = params![sortition_id]; let reward_set_opt: Option = sortdb.query_row(sql, args, |row| row.get(0)).optional()?; @@ -3820,7 +3821,7 @@ impl<'a> SortitionDBConn<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition_id]; + let args: &[&dyn ToSql] = params![sortition_id]; let pox_addrs_json: String = query_row(self.conn(), sql, args)?.ok_or(db_error::NotFoundError)?; @@ -4027,21 +4028,21 @@ impl SortitionDB { stacks_block_accepted: Option, ) -> Result<(), BurnchainError> { if let Some(stacks_block_accepted) = stacks_block_accepted { - let args: &[&dyn ToSql] = &[ + let args: &[&dyn ToSql] = params![ sortition_id, - &u64_to_sql(canonical_stacks_height)?, + u64_to_sql(canonical_stacks_height)?, canonical_stacks_bhh, canonical_stacks_ch, - &stacks_block_accepted, + stacks_block_accepted, ]; tx.execute( "UPDATE snapshots SET pox_valid = 1, canonical_stacks_tip_height = ?2, canonical_stacks_tip_hash = ?3, canonical_stacks_tip_consensus_hash = ?4, stacks_block_accepted = ?5 WHERE sortition_id = ?1", args )?; } else { - let args: &[&dyn ToSql] = &[ + let args: &[&dyn ToSql] = params![ sortition_id, - &u64_to_sql(canonical_stacks_height)?, + u64_to_sql(canonical_stacks_height)?, canonical_stacks_bhh, canonical_stacks_ch, ]; @@ -4606,7 +4607,7 @@ impl SortitionDB { burnchain_header_hash: &BurnchainHeaderHash, ) -> Result, db_error> { let sql = "SELECT parent_burn_header_hash AS burn_header_hash FROM snapshots WHERE burn_header_hash = ?1"; - let args: &[&dyn ToSql] = &[burnchain_header_hash]; + let args: &[&dyn ToSql] = params![burnchain_header_hash]; let mut rows = query_rows::(conn, sql, args)?; // there can be more than one if there was a PoX reorg. If so, make sure they're _all the @@ -4881,7 +4882,7 @@ impl SortitionDB { conn: &Connection, ) -> Result<(u64, BurnchainHeaderHash), db_error> { let sql = "SELECT block_height, burn_header_hash FROM snapshots WHERE consensus_hash = ?1"; - let args = params![ConsensusHash::empty()]; + let args: &[&dyn ToSql] = params![ConsensusHash::empty()]; let mut stmt = conn.prepare(sql)?; let mut rows = stmt.query(args)?; while let Some(row) = rows.next()? { @@ -4969,7 +4970,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 ORDER BY vtxindex ASC"; - let args: &[&dyn ToSql] = &[sortition]; + let args: &[&dyn ToSql] = params![sortition]; query_rows(conn, qry, args) } @@ -4981,7 +4982,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM missed_commits WHERE intended_sortition_id = ?1"; - let args: &[&dyn ToSql] = &[sortition]; + let args: &[&dyn ToSql] = params![sortition]; query_rows(conn, qry, args) } @@ -4993,7 +4994,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 ORDER BY vtxindex ASC"; - let args: &[&dyn ToSql] = &[sortition]; + let args: &[&dyn ToSql] = params![sortition]; query_rows(conn, qry, args) } @@ -5007,7 +5008,7 @@ impl SortitionDB { let qry = "SELECT vtxindex FROM block_commits WHERE sortition_id = ?1 AND txid = ( SELECT winning_block_txid FROM snapshots WHERE sortition_id = ?2 LIMIT 1) LIMIT 1"; - let args: &[&dyn ToSql] = &[sortition, sortition]; + let args: &[&dyn ToSql] = params![sortition, sortition]; conn.query_row(qry, args, |row| row.get(0)) .optional() .map_err(db_error::from) @@ -5089,7 +5090,7 @@ impl SortitionDB { assert!(block_height < BLOCK_HEIGHT_MAX); let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = &[sortition, &u64_to_sql(block_height)?, &vtxindex]; + let args: &[&dyn ToSql] = params![sortition, u64_to_sql(block_height)?, vtxindex]; query_row_panic(conn, qry, args, || { format!( "Multiple parent blocks at {},{} in {}", @@ -5118,10 +5119,10 @@ impl SortitionDB { }; let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = &[ - &ancestor_snapshot.sortition_id, - &u64_to_sql(key_block_height)?, - &key_vtxindex, + let args: &[&dyn ToSql] = params![ + ancestor_snapshot.sortition_id, + u64_to_sql(key_block_height)?, + key_vtxindex, ]; query_row_panic(ic, qry, args, || { format!( @@ -5156,8 +5157,8 @@ impl SortitionDB { }; let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_header_hash = ?2 AND txid = ?3"; - let args: [&dyn ToSql; 3] = [&sortition_id, &block_hash, &winning_txid]; - query_row_panic(conn, qry, &args, || { + let args: &[&dyn ToSql] = params![sortition_id, block_hash, winning_txid]; + query_row_panic(conn, qry, args, || { format!("FATAL: multiple block commits for {}", &block_hash) }) } @@ -5212,9 +5213,9 @@ impl SortitionDB { ) -> Result, db_error> { let sql = "SELECT * FROM epochs WHERE start_block_height <= ?1 AND ?2 < end_block_height LIMIT 1"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(burn_block_height)?, - &u64_to_sql(burn_block_height)?, + let args: &[&dyn ToSql] = params![ + u64_to_sql(burn_block_height)?, + u64_to_sql(burn_block_height)?, ]; query_row(conn, sql, args) } @@ -5243,7 +5244,7 @@ impl SortitionDB { epoch_id: &StacksEpochId, ) -> Result, db_error> { let sql = "SELECT * FROM epochs WHERE epoch_id = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[&(*epoch_id as u32)]; + let args: &[&dyn ToSql] = params![*epoch_id as u32]; query_row(conn, sql, args) } @@ -5480,9 +5481,9 @@ impl<'a> SortitionHandleTx<'a> { let create = "CREATE TABLE IF NOT EXISTS snapshot_burn_distributions (sortition_id TEXT PRIMARY KEY, data TEXT NOT NULL);"; self.execute(create, NO_PARAMS).unwrap(); let sql = "INSERT INTO snapshot_burn_distributions (sortition_id, data) VALUES (?, ?)"; - let args: &[&dyn ToSql] = &[ + let args: &[&dyn ToSql] = params![ new_sortition, - &serde_json::to_string(&transition.burn_dist).unwrap(), + serde_json::to_string(&transition.burn_dist).unwrap(), ]; self.execute(sql, args).unwrap(); } @@ -5501,10 +5502,10 @@ impl<'a> SortitionHandleTx<'a> { transition: &BurnchainStateTransition, ) -> Result<(), db_error> { let sql = "INSERT INTO snapshot_transition_ops (sortition_id, accepted_ops, consumed_keys) VALUES (?, ?, ?)"; - let args: &[&dyn ToSql] = &[ + let args: &[&dyn ToSql] = params![ new_sortition, - &serde_json::to_string(&transition.accepted_ops).unwrap(), - &serde_json::to_string(&transition.consumed_leader_keys).unwrap(), + serde_json::to_string(&transition.accepted_ops).unwrap(), + serde_json::to_string(&transition.consumed_leader_keys).unwrap(), ]; self.execute(sql, args)?; self.store_burn_distribution(new_sortition, transition); @@ -5592,14 +5593,14 @@ impl<'a> SortitionHandleTx<'a> { ) -> Result<(), db_error> { assert!(leader_key.block_height < BLOCK_HEIGHT_MAX); - let args: &[&dyn ToSql] = &[ - &leader_key.txid, - &leader_key.vtxindex, - &u64_to_sql(leader_key.block_height)?, - &leader_key.burn_header_hash, - &leader_key.consensus_hash, - &leader_key.public_key.to_hex(), - &to_hex(&leader_key.memo), + let args: &[&dyn ToSql] = params![ + leader_key.txid, + leader_key.vtxindex, + u64_to_sql(leader_key.block_height)?, + leader_key.burn_header_hash, + leader_key.consensus_hash, + leader_key.public_key.to_hex(), + to_hex(&leader_key.memo), sort_id, ]; @@ -5610,18 +5611,18 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a stack-stx op fn insert_stack_stx(&mut self, op: &StackStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &op.reward_addr.to_db_string(), - &op.stacked_ustx.to_string(), - &op.num_cycles, - &serde_json::to_string(&op.signer_key).unwrap(), - &serde_json::to_string(&op.max_amount).unwrap(), - &op.auth_id, + let args: &[&dyn ToSql] = params![ + op.txid, + op.vtxindex, + u64_to_sql(op.block_height)?, + op.burn_header_hash, + op.sender.to_string(), + op.reward_addr.to_db_string(), + op.stacked_ustx.to_string(), + op.num_cycles, + serde_json::to_string(&op.signer_key).unwrap(), + serde_json::to_string(&op.max_amount).unwrap(), + op.auth_id, ]; self.execute("REPLACE INTO stack_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, reward_addr, stacked_ustx, num_cycles, signer_key, max_amount, auth_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)", args)?; @@ -5631,16 +5632,16 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a delegate-stx op fn insert_delegate_stx(&mut self, op: &DelegateStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &op.delegate_to.to_string(), - &serde_json::to_string(&op.reward_addr).unwrap(), - &op.delegated_ustx.to_string(), - &opt_u64_to_sql(op.until_burn_height)?, + let args: &[&dyn ToSql] = params![ + op.txid, + op.vtxindex, + u64_to_sql(op.block_height)?, + op.burn_header_hash, + op.sender.to_string(), + op.delegate_to.to_string(), + serde_json::to_string(&op.reward_addr).unwrap(), + op.delegated_ustx.to_string(), + opt_u64_to_sql(op.until_burn_height)?, ]; self.execute("REPLACE INTO delegate_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, delegate_to, reward_addr, delegated_ustx, until_burn_height) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", args)?; @@ -5653,17 +5654,17 @@ impl<'a> SortitionHandleTx<'a> { &mut self, op: &VoteForAggregateKeyOp, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &serde_json::to_string(&op.aggregate_key).unwrap(), - &op.round, - &u64_to_sql(op.reward_cycle)?, - &op.signer_index, - &serde_json::to_string(&op.signer_key).unwrap(), + let args: &[&dyn ToSql] = params![ + op.txid, + op.vtxindex, + u64_to_sql(op.block_height)?, + op.burn_header_hash, + op.sender.to_string(), + serde_json::to_string(&op.aggregate_key).unwrap(), + op.round, + u64_to_sql(op.reward_cycle)?, + op.signer_index, + serde_json::to_string(&op.signer_key).unwrap(), ]; self.execute("REPLACE INTO vote_for_aggregate_key (txid, vtxindex, block_height, burn_header_hash, sender_addr, aggregate_key, round, reward_cycle, signer_index, signer_key) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)", args)?; @@ -5673,15 +5674,15 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a transfer-stx op fn insert_transfer_stx(&mut self, op: &TransferStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &op.recipient.to_string(), - &op.transfered_ustx.to_string(), - &to_hex(&op.memo), + let args: &[&dyn ToSql] = params![ + op.txid, + op.vtxindex, + u64_to_sql(op.block_height)?, + op.burn_header_hash, + op.sender.to_string(), + op.recipient.to_string(), + op.transfered_ustx.to_string(), + to_hex(&op.memo), ]; self.execute("REPLACE INTO transfer_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, recipient_addr, transfered_ustx, memo) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", args)?; @@ -5722,32 +5723,32 @@ impl<'a> SortitionHandleTx<'a> { } } - let args: &[&dyn ToSql] = &[ - &block_commit.txid, - &block_commit.vtxindex, - &u64_to_sql(block_commit.block_height)?, - &block_commit.burn_header_hash, - &block_commit.block_header_hash, - &block_commit.new_seed, - &block_commit.parent_block_ptr, - &block_commit.parent_vtxindex, - &block_commit.key_block_ptr, - &block_commit.key_vtxindex, - &to_hex(&block_commit.memo[..]), - &block_commit.burn_fee.to_string(), - &tx_input_str, + let args: &[&dyn ToSql] = params![ + block_commit.txid, + block_commit.vtxindex, + u64_to_sql(block_commit.block_height)?, + block_commit.burn_header_hash, + block_commit.block_header_hash, + block_commit.new_seed, + block_commit.parent_block_ptr, + block_commit.parent_vtxindex, + block_commit.key_block_ptr, + block_commit.key_vtxindex, + to_hex(&block_commit.memo[..]), + block_commit.burn_fee.to_string(), + tx_input_str, sort_id, - &serde_json::to_value(&block_commit.commit_outs).unwrap(), - &block_commit.sunset_burn.to_string(), - &apparent_sender_str, - &block_commit.burn_parent_modulus, - &serde_json::to_string(&block_commit.treatment).unwrap(), + serde_json::to_value(&block_commit.commit_outs).unwrap(), + block_commit.sunset_burn.to_string(), + apparent_sender_str, + block_commit.burn_parent_modulus, + serde_json::to_string(&block_commit.treatment).unwrap(), ]; self.execute("INSERT INTO block_commits (txid, vtxindex, block_height, burn_header_hash, block_header_hash, new_seed, parent_block_ptr, parent_vtxindex, key_block_ptr, key_vtxindex, memo, burn_fee, input, sortition_id, commit_outs, sunset_burn, apparent_sender, burn_parent_modulus, punished) \ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19)", args)?; - let parent_args: &[&dyn ToSql] = &[sort_id, &block_commit.txid, &parent_sortition_id]; + let parent_args: &[&dyn ToSql] = params![sort_id, block_commit.txid, parent_sortition_id]; debug!( "Parent sortition of {},{},{} is {} (parent at {},{})", @@ -5775,7 +5776,7 @@ impl<'a> SortitionHandleTx<'a> { let tx_input_str = serde_json::to_string(&op.input).map_err(|e| db_error::SerializationError(e))?; - let args: &[&dyn ToSql] = &[&op.txid, &op.intended_sortition, &tx_input_str]; + let args: &[&dyn ToSql] = params![op.txid, op.intended_sortition, tx_input_str]; self.execute( "INSERT OR REPLACE INTO missed_commits (txid, intended_sortition_id, input) \ @@ -5827,32 +5828,32 @@ impl<'a> SortitionHandleTx<'a> { } } - let args: &[&dyn ToSql] = &[ - &u64_to_sql(snapshot.block_height)?, - &snapshot.burn_header_hash, - &u64_to_sql(snapshot.burn_header_timestamp)?, - &snapshot.parent_burn_header_hash, - &snapshot.consensus_hash, - &snapshot.ops_hash, - &snapshot.total_burn.to_string(), - &snapshot.sortition, - &snapshot.sortition_hash, - &snapshot.winning_block_txid, - &snapshot.winning_stacks_block_hash, - &snapshot.index_root, - &u64_to_sql(snapshot.num_sortitions)?, - &snapshot.stacks_block_accepted, - &u64_to_sql(snapshot.stacks_block_height)?, - &u64_to_sql(snapshot.arrival_index)?, - &u64_to_sql(snapshot.canonical_stacks_tip_height)?, - &snapshot.canonical_stacks_tip_hash, - &snapshot.canonical_stacks_tip_consensus_hash, - &snapshot.sortition_id, - &snapshot.parent_sortition_id, - &snapshot.pox_valid, - &snapshot.accumulated_coinbase_ustx.to_string(), - &pox_payouts_json, - &snapshot.miner_pk_hash, + let args: &[&dyn ToSql] = params![ + u64_to_sql(snapshot.block_height)?, + snapshot.burn_header_hash, + u64_to_sql(snapshot.burn_header_timestamp)?, + snapshot.parent_burn_header_hash, + snapshot.consensus_hash, + snapshot.ops_hash, + snapshot.total_burn.to_string(), + snapshot.sortition, + snapshot.sortition_hash, + snapshot.winning_block_txid, + snapshot.winning_stacks_block_hash, + snapshot.index_root, + u64_to_sql(snapshot.num_sortitions)?, + snapshot.stacks_block_accepted, + u64_to_sql(snapshot.stacks_block_height)?, + u64_to_sql(snapshot.arrival_index)?, + u64_to_sql(snapshot.canonical_stacks_tip_height)?, + snapshot.canonical_stacks_tip_hash, + snapshot.canonical_stacks_tip_consensus_hash, + snapshot.sortition_id, + snapshot.parent_sortition_id, + snapshot.pox_valid, + snapshot.accumulated_coinbase_ustx.to_string(), + pox_payouts_json, + snapshot.miner_pk_hash, ]; self.execute("INSERT INTO snapshots \ @@ -6457,11 +6458,11 @@ impl<'a> SortitionHandleTx<'a> { best_bhh: BlockHeaderHash, best_height: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &best_chh, - &best_bhh, - &u64_to_sql(best_height)?, - &u64_to_sql(tip.block_height)?, + let args: &[&dyn ToSql] = params![ + best_chh, + best_bhh, + u64_to_sql(best_height)?, + u64_to_sql(tip.block_height)?, ]; debug!( @@ -6729,7 +6730,7 @@ pub mod tests { let pox_payouts_json = serde_json::to_string(&pox_payout) .expect("FATAL: could not encode `total_pox_payouts` as JSON"); - let args = params![ + let args: &[&dyn ToSql] = params![ u64_to_sql(first_snapshot.block_height)?, first_snapshot.burn_header_hash, u64_to_sql(first_snapshot.burn_header_timestamp)?, @@ -6787,7 +6788,7 @@ pub mod tests { height: u64, ) -> Result<(), db_error> { let tip = SortitionDB::get_canonical_burn_chain_tip(conn)?; - let args: &[&dyn ToSql] = &[ch, bhh, &u64_to_sql(height)?, &tip.sortition_id]; + let args: &[&dyn ToSql] = params![ch, bhh, u64_to_sql(height)?, tip.sortition_id]; conn.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 WHERE sortition_id = ?4", args) .map_err(db_error::SqliteError)?; @@ -6864,7 +6865,7 @@ pub mod tests { let apparent_sender_str = serde_json::to_string(sender).map_err(|e| db_error::SerializationError(e))?; let sql = "SELECT * FROM block_commits WHERE apparent_sender = ?1 ORDER BY block_height DESC LIMIT 1"; - let args = params![apparent_sender_str]; + let args: &[&dyn ToSql] = params![apparent_sender_str]; query_row(conn, sql, args) } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 47e9a4eaac..0252e2ec19 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -29,8 +29,8 @@ use clarity::vm::types::{PrincipalData, StacksAddressExtensions, TupleData}; use clarity::vm::{ClarityVersion, SymbolicExpression, Value}; use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; -use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql}; +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -127,13 +127,13 @@ define_named_enum!(HeaderTypeNames { }); impl ToSql for HeaderTypeNames { - fn to_sql(&self) -> rusqlite::Result> { + fn to_sql(&self) -> rusqlite::Result> { self.get_name_str().to_sql() } } impl FromSql for HeaderTypeNames { - fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult { + fn column_result(value: rusqlite::types::ValueRef<'_>) -> FromSqlResult { Self::lookup_by_name(value.as_str()?).ok_or_else(|| FromSqlError::InvalidType) } } @@ -2289,7 +2289,7 @@ impl NakamotoChainState { block_hash: &BlockHeaderHash, ) -> Result, ChainstateError> { let sql = "SELECT processed, orphaned FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = &[consensus_hash, block_hash]; + let args: &[&dyn ToSql] = params![consensus_hash, block_hash]; let Some((processed, orphaned)) = query_row_panic(&staging_blocks_conn, sql, args, || { "FATAL: multiple rows for the same consensus hash and block hash".to_string() }) @@ -2327,7 +2327,7 @@ impl NakamotoChainState { consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT vrf_proof FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND tenure_changed = 1"; - let args: &[&dyn ToSql] = &[consensus_hash]; + let args: &[&dyn ToSql] = params![consensus_hash]; let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; if let Some(bytes) = proof_bytes { let proof = VRFProof::from_hex(&bytes) @@ -2415,32 +2415,32 @@ impl NakamotoChainState { )) })?; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(*stacks_block_height)?, - &index_root, - &consensus_hash, - &burn_header_hash, - &burn_header_height, - &u64_to_sql(*burn_header_timestamp)?, - &block_size_str, - &HeaderTypeNames::Nakamoto, - &header.version, - &u64_to_sql(header.chain_length)?, - &u64_to_sql(header.burn_spent)?, - &header.miner_signature, - &signer_signature, - &header.tx_merkle_root, - &header.state_index_root, - &u64_to_sql(header.timestamp)?, - &block_hash, - &index_block_hash, + let args: &[&dyn ToSql] = params![ + u64_to_sql(*stacks_block_height)?, + index_root, + consensus_hash, + burn_header_hash, + burn_header_height, + u64_to_sql(*burn_header_timestamp)?, + block_size_str, + HeaderTypeNames::Nakamoto, + header.version, + u64_to_sql(header.chain_length)?, + u64_to_sql(header.burn_spent)?, + header.miner_signature, + signer_signature, + header.tx_merkle_root, + header.state_index_root, + u64_to_sql(header.timestamp)?, + block_hash, + index_block_hash, block_cost, total_tenure_cost, - &tenure_tx_fees.to_string(), - &header.parent_block_id, - if tenure_changed { &1i64 } else { &0i64 }, - &vrf_proof_bytes.as_ref(), - &header.pox_treatment, + tenure_tx_fees.to_string(), + header.parent_block_id, + if tenure_changed { 1i64 } else { 0i64 }, + vrf_proof_bytes.as_ref(), + header.pox_treatment, tip_info.burn_view.as_ref().ok_or_else(|| { error!( "Attempted to store nakamoto block header information without burnchain view"; @@ -2622,7 +2622,7 @@ impl NakamotoChainState { if applied_epoch_transition { debug!("Block {} applied an epoch transition", &index_block_hash); let sql = "INSERT INTO epoch_transitions (block_id) VALUES (?)"; - let args: &[&dyn ToSql] = &[&index_block_hash]; + let args: &[&dyn ToSql] = params![index_block_hash]; headers_tx.deref_mut().execute(sql, args)?; } @@ -2639,7 +2639,7 @@ impl NakamotoChainState { reward_set: &RewardSet, ) -> Result<(), ChainstateError> { let sql = "INSERT INTO nakamoto_reward_sets (index_block_hash, reward_set) VALUES (?, ?)"; - let args = params![block_id, reward_set.metadata_serialize(),]; + let args: &[&dyn ToSql] = params![block_id, reward_set.metadata_serialize(),]; tx.execute(sql, args)?; Ok(()) } diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index 26af3ff8d3..103248d240 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -25,8 +25,8 @@ use clarity::vm::types::{ }; use clarity::vm::{ClarityVersion, ContractName, SymbolicExpression, Value}; use lazy_static::{__Deref, lazy_static}; -use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OptionalExtension, ToSql}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; +use rusqlite::{params, Connection, OptionalExtension}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 26878f3b1e..59a82f3726 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -20,8 +20,8 @@ use std::path::PathBuf; use lazy_static::lazy_static; use rusqlite::blob::Blob; -use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; +use rusqlite::{params, Connection, OpenFlags, OptionalExtension}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; @@ -188,7 +188,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { // this block must be a processed Nakamoto block let ibh = StacksBlockId::new(&ch, &bhh); let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE processed = 1 AND index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[&ibh]; + let args: &[&dyn ToSql] = params![ibh]; let res: Option = query_row(self, qry, args)?; Ok(res.is_some()) } @@ -202,7 +202,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result { let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args: &[&dyn ToSql] = params![index_block_hash]; let res: Option = query_row(self, qry, args)?; Ok(res.is_some()) } @@ -213,7 +213,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE is_tenure_start = 1 AND consensus_hash = ?1"; - let args: &[&dyn ToSql] = &[consensus_hash]; + let args: &[&dyn ToSql] = params![consensus_hash]; let data: Option> = query_row(self, qry, args)?; let Some(block_bytes) = data else { return Ok(None); @@ -235,7 +235,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT rowid FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args: &[&dyn ToSql] = params![index_block_hash]; let res: Option = query_row(self, sql, args)?; Ok(res) } @@ -250,7 +250,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args: &[&dyn ToSql] = params![index_block_hash]; let res: Option> = query_row(self, qry, args)?; let Some(block_bytes) = res else { return Ok(None); @@ -279,7 +279,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT length(data) FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args: &[&dyn ToSql] = params![index_block_hash]; let res = query_row(self, qry, args)? .map(|size: i64| u64::try_from(size).expect("FATAL: block size exceeds i64::MAX")); Ok(res) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 6589e8d0a0..51b824077b 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -67,8 +67,8 @@ use clarity::vm::database::BurnStateDB; use clarity::vm::events::StacksTransactionEvent; use clarity::vm::types::StacksAddressExtensions; use lazy_static::{__Deref, lazy_static}; -use rusqlite::types::{FromSql, FromSqlError}; -use rusqlite::{params, Connection, OptionalExtension, ToSql}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; +use rusqlite::{params, Connection, OptionalExtension}; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::codec::{ read_next, write_next, Error as CodecError, StacksMessageCodec, MAX_MESSAGE_LEN, @@ -438,7 +438,7 @@ impl NakamotoChainState { ) -> Result { // a tenure will have been processed if any of its children have been processed let sql = "SELECT 1 FROM nakamoto_tenures WHERE prev_tenure_id_consensus_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[tenure_id_consensus_hash]; + let args: &[&dyn ToSql] = params![tenure_id_consensus_hash]; let found: Option = query_row(conn, sql, args)?; Ok(found.is_some()) } @@ -454,16 +454,16 @@ impl NakamotoChainState { ) -> Result<(), ChainstateError> { // NOTE: this is checked with check_nakamoto_tenure() assert_eq!(block_header.consensus_hash, tenure.tenure_consensus_hash); - let args: &[&dyn ToSql] = &[ - &tenure.tenure_consensus_hash, - &tenure.prev_tenure_consensus_hash, - &tenure.burn_view_consensus_hash, - &tenure.cause.as_u8(), - &block_header.block_hash(), - &block_header.block_id(), - &u64_to_sql(coinbase_height)?, - &u64_to_sql(tenure_index)?, - &tenure.previous_tenure_blocks, + let args: &[&dyn ToSql] = params![ + tenure.tenure_consensus_hash, + tenure.prev_tenure_consensus_hash, + tenure.burn_view_consensus_hash, + tenure.cause.as_u8(), + block_header.block_hash(), + block_header.block_id(), + u64_to_sql(coinbase_height)?, + u64_to_sql(tenure_index)?, + tenure.previous_tenure_blocks, ]; tx.execute( "INSERT INTO nakamoto_tenures @@ -511,7 +511,7 @@ impl NakamotoChainState { consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT prev_tenure_id_consensus_hash AS consensus_hash FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[consensus_hash]; + let args: &[&dyn ToSql] = params![consensus_hash]; query_row(chainstate_conn, sql, args).map_err(ChainstateError::DBError) } @@ -577,7 +577,7 @@ impl NakamotoChainState { tenure_consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[&tenure_consensus_hash]; + let args: &[&dyn ToSql] = params![tenure_consensus_hash]; let tenure_opt: Option = query_row(headers_conn, sql, args)?; Ok(tenure_opt) } @@ -588,7 +588,7 @@ impl NakamotoChainState { burn_view: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_tenures WHERE burn_view_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args = params![burn_view]; + let args: &[&dyn ToSql] = params![burn_view]; let tenure_opt: Option = query_row(headers_conn, sql, args)?; Ok(tenure_opt) } @@ -601,9 +601,9 @@ impl NakamotoChainState { tenure_id_consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 AND cause = ?2 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[ + let args: &[&dyn ToSql] = params![ tenure_id_consensus_hash, - &TenureChangeCause::BlockFound.as_u8(), + TenureChangeCause::BlockFound.as_u8(), ]; let tenure_opt: Option = query_row(headers_conn, sql, args)?; Ok(tenure_opt) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index bf1a09fc17..b2a2127b40 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -26,7 +26,8 @@ use clarity::vm::types::StacksAddressExtensions; use clarity::vm::Value; use libstackerdb::StackerDBChunkData; use rand::{thread_rng, RngCore}; -use rusqlite::{Connection, ToSql}; +use rusqlite::types::ToSql; +use rusqlite::{params, Connection}; use stacks_common::address::AddressHashMode; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -180,7 +181,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { tenure_id_consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 ORDER BY height ASC"; - let args: &[&dyn ToSql] = &[tenure_id_consensus_hash]; + let args: &[&dyn ToSql] = params![tenure_id_consensus_hash]; let block_data: Vec> = query_rows(self, qry, args)?; let mut blocks = Vec::with_capacity(block_data.len()); for data in block_data.into_iter() { diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index f32336e657..4d15c001a2 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -20,9 +20,8 @@ use clarity::types::chainstate::TenureBlockId; use clarity::vm::database::clarity_store::*; use clarity::vm::database::*; use clarity::vm::types::*; -use rusqlite::params; use rusqlite::types::ToSql; -use rusqlite::Row; +use rusqlite::{params, Row}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use crate::burnchains::Address; @@ -415,24 +414,24 @@ impl StacksChainState { } }; - let args: &[&dyn ToSql] = &[ - &block_reward.address.to_string(), - &block_reward.recipient.to_string(), - &block_reward.block_hash, - &block_reward.consensus_hash, - &block_reward.parent_block_hash, - &block_reward.parent_consensus_hash, - &block_reward.coinbase.to_string(), - &db_tx_fees_anchored.to_string(), - &db_tx_fees_streamed.to_string(), - &u64_to_sql(block_reward.burnchain_commit_burn)?, - &u64_to_sql(block_reward.burnchain_sortition_burn)?, - &u64_to_sql(block_reward.stacks_block_height)?, - &true, - &0i64, - &index_block_hash, - &payment_type, - &"0".to_string(), + let args: &[&dyn ToSql] = params![ + block_reward.address.to_string(), + block_reward.recipient.to_string(), + block_reward.block_hash, + block_reward.consensus_hash, + block_reward.parent_block_hash, + block_reward.parent_consensus_hash, + block_reward.coinbase.to_string(), + db_tx_fees_anchored.to_string(), + db_tx_fees_streamed.to_string(), + u64_to_sql(block_reward.burnchain_commit_burn)?, + u64_to_sql(block_reward.burnchain_sortition_burn)?, + u64_to_sql(block_reward.stacks_block_height)?, + true, + 0i64, + index_block_hash, + payment_type, + "0".to_string(), ]; tx.execute( @@ -505,14 +504,14 @@ impl StacksChainState { child_index_block_hash ) VALUES (?1,?2,?3,?4,?5,?6,?7,?8,?9)"; - let args: &[&dyn ToSql] = &[ - &reward.address.to_string(), - &reward.recipient.to_string(), - &reward.vtxindex, - &reward.coinbase.to_string(), - &reward.tx_fees_anchored.to_string(), - &reward.tx_fees_streamed_confirmed.to_string(), - &reward.tx_fees_streamed_produced.to_string(), + let args: &[&dyn ToSql] = params![ + reward.address.to_string(), + reward.recipient.to_string(), + reward.vtxindex, + reward.coinbase.to_string(), + reward.tx_fees_anchored.to_string(), + reward.tx_fees_streamed_confirmed.to_string(), + reward.tx_fees_streamed_produced.to_string(), parent_block_id, child_block_id, ]; @@ -614,7 +613,7 @@ impl StacksChainState { child_block_id: &TenureBlockId, ) -> Result, Error> { let sql = "SELECT * FROM matured_rewards WHERE parent_index_block_hash = ?1 AND child_index_block_hash = ?2 AND vtxindex = 0"; - let args: &[&dyn ToSql] = &[&parent_block_id.0, &child_block_id.0]; + let args: &[&dyn ToSql] = params![parent_block_id.0, child_block_id.0]; let ret: Vec = query_rows(conn, sql, args).map_err(|e| Error::DBError(e))?; Ok(ret) } @@ -677,7 +676,7 @@ impl StacksChainState { ) -> Result, Error> { let qry = "SELECT * FROM payments WHERE index_block_hash = ?1 ORDER BY vtxindex ASC".to_string(); - let args: &[&dyn ToSql] = &[index_block_hash]; + let args: &[&dyn ToSql] = params![index_block_hash]; let rows = query_rows::(conn, &qry, args).map_err(Error::DBError)?; test_debug!("{} rewards in {}", rows.len(), index_block_hash); @@ -699,9 +698,9 @@ impl StacksChainState { }; let qry = "SELECT * FROM payments WHERE block_hash = ?1 AND consensus_hash = ?2 ORDER BY vtxindex ASC".to_string(); - let args: &[&dyn ToSql] = &[ - &ancestor_info.anchored_header.block_hash(), - &ancestor_info.consensus_hash, + let args: &[&dyn ToSql] = params![ + ancestor_info.anchored_header.block_hash(), + ancestor_info.consensus_hash, ]; let rows = query_rows::(tx, &qry, args).map_err(Error::DBError)?; test_debug!( @@ -735,10 +734,7 @@ impl StacksChainState { let qry = "SELECT * FROM payments WHERE consensus_hash = ?1 AND block_hash = ?2 AND miner = 1" .to_string(); - let args = params![ - consensus_hash, - stacks_block_hash, - ]; + let args: &[&dyn ToSql] = params![consensus_hash, stacks_block_hash,]; let mut rows = query_rows::(conn, &qry, args).map_err(Error::DBError)?; let len = rows.len(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index ab696ac251..d03941464f 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -34,7 +34,10 @@ use clarity::vm::types::{ TypeSignature, Value, }; use rand::{thread_rng, Rng, RngCore}; -use rusqlite::{params, Connection, DatabaseName, Error as sqlite_error, OptionalExtension, Params}; +use rusqlite::types::ToSql; +use rusqlite::{ + params, Connection, DatabaseName, Error as sqlite_error, OptionalExtension, Params, +}; use serde::Serialize; use serde_json::json; use stacks_common::bitvec::BitVec; @@ -767,11 +770,11 @@ impl StacksChainState { for (consensus_hash, block_hash) in blocks.drain(..) { let list_microblock_sql = "SELECT * FROM staging_microblocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 ORDER BY sequence".to_string(); - let list_microblock_args: [&dyn ToSql; 2] = [&block_hash, &consensus_hash]; + let list_microblock_args: &[&dyn ToSql] = params![block_hash, consensus_hash]; let mut microblocks = query_rows::( blocks_conn, &list_microblock_sql, - &list_microblock_args, + list_microblock_args, ) .map_err(Error::DBError)?; @@ -961,7 +964,7 @@ impl StacksChainState { minimum_block_height: i64, ) -> bool { let sql = "SELECT 1 FROM staging_blocks WHERE microblock_pubkey_hash = ?1 AND height >= ?2"; - let args: &[&dyn ToSql] = &[pubkey_hash, &minimum_block_height]; + let args: &[&dyn ToSql] = params![pubkey_hash, minimum_block_height]; block_conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -977,7 +980,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND orphaned = 0 AND processed = 0".to_string(); - let args: &[&dyn ToSql] = &[&block_hash, &consensus_hash]; + let args: &[&dyn ToSql] = params![block_hash, consensus_hash]; let mut rows = query_rows::(block_conn, &sql, args).map_err(Error::DBError)?; let len = rows.len(); @@ -1006,7 +1009,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE index_block_hash = ?1 AND orphaned = 0"; - let args: &[&dyn ToSql] = &[&index_block_hash]; + let args: &[&dyn ToSql] = params![index_block_hash]; query_row::(block_conn, sql, args).map_err(Error::DBError) } @@ -1056,7 +1059,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT microblock_pubkey_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0"; - let args: &[&dyn ToSql] = &[&block_hash, &consensus_hash]; + let args: &[&dyn ToSql] = params![block_hash, consensus_hash]; let rows = query_row_columns::(block_conn, sql, args, "microblock_pubkey_hash") .map_err(Error::DBError)?; match rows.len() { @@ -1111,7 +1114,7 @@ impl StacksChainState { microblock_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND orphaned = 0 LIMIT 1"; - let args: &[&dyn ToSql] = &[&parent_index_block_hash, µblock_hash]; + let args: &[&dyn ToSql] = params![parent_index_block_hash, microblock_hash]; query_row::(blocks_conn, sql, args).map_err(Error::DBError) } @@ -1124,7 +1127,7 @@ impl StacksChainState { index_microblock_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_microblock_hash = ?1 AND orphaned = 0 LIMIT 1"; - let args: &[&dyn ToSql] = &[&index_microblock_hash]; + let args: &[&dyn ToSql] = params![index_microblock_hash]; query_row::(blocks_conn, sql, args).map_err(Error::DBError) } @@ -1329,7 +1332,7 @@ impl StacksChainState { "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 AND sequence < ?3 AND orphaned = 0 ORDER BY sequence ASC".to_string() }; - let args: &[&dyn ToSql] = &[parent_index_block_hash, &start_seq, &last_seq]; + let args: &[&dyn ToSql] = params![parent_index_block_hash, start_seq, last_seq]; let staging_microblocks = query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; @@ -1563,7 +1566,7 @@ impl StacksChainState { let has_unprocessed_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0 LIMIT 1"; let has_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 LIMIT 1"; let has_parent_args: &[&dyn ToSql] = - &[&block.header.parent_block, &parent_consensus_hash]; + params![block.header.parent_block, parent_consensus_hash]; let has_unprocessed_parent_rows = query_row_columns::( &tx, has_unprocessed_parent_sql, @@ -1707,7 +1710,7 @@ impl StacksChainState { let block_sql = "INSERT OR REPLACE INTO staging_microblocks_data \ (block_hash, block_data) VALUES (?1, ?2)"; - let block_args: &[&dyn ToSql] = &[µblock.block_hash(), µblock_bytes]; + let block_args: &[&dyn ToSql] = params![microblock.block_hash(), microblock_bytes]; tx.execute(&block_sql, block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -1852,7 +1855,7 @@ impl StacksChainState { }; let sql = "SELECT 1 FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND processed = 1 AND orphaned = 0"; - let args: &[&dyn ToSql] = &[&parent_index_block_hash, &parent_microblock_hash]; + let args: &[&dyn ToSql] = params![parent_index_block_hash, parent_microblock_hash]; let res = self .db() .query_row(sql, args, |_r| Ok(())) @@ -2025,10 +2028,8 @@ impl StacksChainState { ); let sql = "SELECT COALESCE(MIN(block_height), 0), COALESCE(MAX(block_height), 0) FROM block_headers WHERE burn_header_height >= ?1 AND burn_header_height < ?2"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(burn_height_start)?, - &u64_to_sql(burn_height_end)?, - ]; + let args: &[&dyn ToSql] = + params![u64_to_sql(burn_height_start)?, u64_to_sql(burn_height_end)?,]; self.db() .query_row(sql, args, |row| { @@ -2076,7 +2077,7 @@ impl StacksChainState { FROM staging_blocks LEFT JOIN staging_microblocks \ ON staging_blocks.parent_microblock_hash = staging_microblocks.microblock_hash \ WHERE staging_blocks.height >= ?1 AND staging_blocks.height <= ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(start_height)?, &u64_to_sql(end_height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let mut stmt = self.db().prepare(sql)?; @@ -2153,7 +2154,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let qry = "SELECT consensus_hash FROM staging_blocks WHERE anchored_block_hash = ?1"; - let args: &[&dyn ToSql] = &[block_hash]; + let args: &[&dyn ToSql] = params![block_hash]; query_rows(conn, qry, args).map_err(|e| e.into()) } @@ -2299,16 +2300,17 @@ impl StacksChainState { ) -> Result<(), Error> { // This block is orphaned let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_block_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_block_args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; // All descendants of this processed block are never attachable. // Indicate this by marking all children as orphaned (but not procesed), across all burnchain forks. let update_children_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 0, attachable = 0 WHERE parent_consensus_hash = ?1 AND parent_anchored_block_hash = ?2"; - let update_children_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_children_args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let find_orphaned_microblocks_args: &[&dyn ToSql] = + params![consensus_hash, anchored_block_hash]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2318,7 +2320,8 @@ impl StacksChainState { // drop microblocks (this processes them) let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_microblock_children_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_microblock_children_args: &[&dyn ToSql] = + params![consensus_hash, anchored_block_hash]; tx.execute(update_block_sql, update_block_args)?; @@ -2365,7 +2368,7 @@ impl StacksChainState { ); let sql = "DELETE FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1 AND processed = 1"; - let args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; tx.execute(sql, args)?; @@ -2389,7 +2392,7 @@ impl StacksChainState { accept: bool, ) -> Result<(), Error> { let sql = "SELECT * FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 0".to_string(); - let args: &[&dyn ToSql] = &[&consensus_hash, &anchored_block_hash]; + let args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; let has_stored_block = StacksChainState::has_stored_block( tx, @@ -2403,7 +2406,7 @@ impl StacksChainState { 0 => { // not an error if this block was already orphaned let orphan_sql = "SELECT * FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1".to_string(); - let orphan_args: &[&dyn ToSql] = &[&consensus_hash, &anchored_block_hash]; + let orphan_args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; let orphan_rows = query_rows::(tx, &orphan_sql, orphan_args) .map_err(Error::DBError)?; if orphan_rows.len() == 1 { @@ -2457,10 +2460,10 @@ impl StacksChainState { } let update_sql = "UPDATE staging_blocks SET processed = 1, processed_time = ?1 WHERE consensus_hash = ?2 AND anchored_block_hash = ?3".to_string(); - let update_args: &[&dyn ToSql] = &[ - &u64_to_sql(get_epoch_time_secs())?, - &consensus_hash, - &anchored_block_hash, + let update_args: &[&dyn ToSql] = params![ + u64_to_sql(get_epoch_time_secs())?, + consensus_hash, + anchored_block_hash, ]; tx.execute(&update_sql, update_args) @@ -2525,11 +2528,12 @@ impl StacksChainState { &index_block_hash ); let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_block_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_block_args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let find_orphaned_microblocks_args: &[&dyn ToSql] = + params![consensus_hash, anchored_block_hash]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2546,7 +2550,8 @@ impl StacksChainState { &index_block_hash ); let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_microblock_children_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash]; + let update_microblock_children_args: &[&dyn ToSql] = + params![consensus_hash, anchored_block_hash]; tx.execute(&update_block_sql, update_block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2582,7 +2587,8 @@ impl StacksChainState { ) -> Result<(), Error> { // find offending sequence let seq_sql = "SELECT sequence FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND microblock_hash = ?3 AND processed = 0 AND orphaned = 0".to_string(); - let seq_args: &[&dyn ToSql] = &[&consensus_hash, &anchored_block_hash, &invalid_block_hash]; + let seq_args: &[&dyn ToSql] = + params![consensus_hash, anchored_block_hash, invalid_block_hash]; let seq = match query_int::<_>(tx, &seq_sql, seq_args) { Ok(seq) => seq, Err(e) => match e { @@ -2603,7 +2609,7 @@ impl StacksChainState { // drop staging children at and beyond the invalid block let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE anchored_block_hash = ?1 AND sequence >= ?2".to_string(); - let update_microblock_children_args: &[&dyn ToSql] = &[&anchored_block_hash, &seq]; + let update_microblock_children_args: &[&dyn ToSql] = params![anchored_block_hash, seq]; tx.execute( &update_microblock_children_sql, @@ -2613,7 +2619,7 @@ impl StacksChainState { // find all orphaned microblocks hashes, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE anchored_block_hash = ?1 AND sequence >= ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = &[&anchored_block_hash, &seq]; + let find_orphaned_microblocks_args: &[&dyn ToSql] = params![anchored_block_hash, seq]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2668,7 +2674,8 @@ impl StacksChainState { test_debug!("Set {}-{} processed", &parent_index_hash, &mblock_hash); // confirm this microblock - let args: &[&dyn ToSql] = &[&parent_consensus_hash, &parent_block_hash, &mblock_hash]; + let args: &[&dyn ToSql] = + params![parent_consensus_hash, parent_block_hash, mblock_hash]; tx.execute(sql, args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2739,7 +2746,7 @@ impl StacksChainState { index_microblock_hash: &StacksBlockId, ) -> Result { let sql = "SELECT 1 FROM staging_microblocks WHERE index_microblock_hash = ?1 AND processed = 1 AND orphaned = 0"; - let args: &[&dyn ToSql] = &[index_microblock_hash]; + let args: &[&dyn ToSql] = params![index_microblock_hash]; let res = conn .query_row(&sql, args, |_r| Ok(())) .optional() @@ -2833,7 +2840,7 @@ impl StacksChainState { "SELECT {},{} FROM staging_blocks WHERE index_block_hash = ?1", consensus_hash_col, anchored_block_col ); - let args = params![index_block_hash]; + let args: &[&dyn ToSql] = params![index_block_hash]; blocks_db .query_row(&sql, args, |row| { @@ -2885,10 +2892,7 @@ impl StacksChainState { staging_microblocks JOIN staging_microblocks_data \ ON staging_microblocks.microblock_hash = staging_microblocks_data.block_hash \ WHERE staging_microblocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2"; - let args = params![ - parent_index_block_hash, - microblock_hash, - ]; + let args: &[&dyn ToSql] = params![parent_index_block_hash, microblock_hash,]; query_row(blocks_conn, sql, args).map_err(Error::DBError) } @@ -2901,7 +2905,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 ORDER BY sequence" .to_string(); - let args = params![parent_index_block_hash]; + let args: &[&dyn ToSql] = params![parent_index_block_hash]; let microblock_info = query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; Ok(microblock_info) @@ -3173,7 +3177,7 @@ impl StacksChainState { parent_block_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT 1 FROM epoch_transitions WHERE block_id = ?1"; - let args: &[&dyn ToSql] = &[&StacksBlockHeader::make_index_block_hash( + let args: &[&dyn ToSql] = params![StacksBlockHeader::make_index_block_hash( parent_consensus_hash, parent_block_hash, )]; @@ -3837,7 +3841,7 @@ impl StacksChainState { end_height: u64, ) -> Result, Error> { let sql = "SELECT processed_time - arrival_time FROM staging_blocks WHERE processed = 1 AND height >= ?1 AND height < ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(start_height)?, &u64_to_sql(end_height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let list = query_rows::(blocks_conn, &sql, args)?; Ok(list) } @@ -3850,7 +3854,7 @@ impl StacksChainState { end_height: u64, ) -> Result, Error> { let sql = "SELECT download_time FROM staging_blocks WHERE height >= ?1 AND height < ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(start_height)?, &u64_to_sql(end_height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let list = query_rows::(blocks_conn, &sql, args)?; Ok(list) } @@ -3932,9 +3936,9 @@ impl StacksChainState { // not the first-ever block. Does this connect to a previously-accepted // block in the headers database? let hdr_sql = "SELECT * FROM block_headers WHERE block_hash = ?1 AND consensus_hash = ?2".to_string(); - let hdr_args: &[&dyn ToSql] = &[ - &candidate.parent_anchored_block_hash, - &candidate.parent_consensus_hash, + let hdr_args: &[&dyn ToSql] = params![ + candidate.parent_anchored_block_hash, + candidate.parent_consensus_hash, ]; let hdr_row = query_row_panic::( blocks_tx, @@ -6533,7 +6537,7 @@ impl StacksChainState { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_bhh]; + let args: &[&dyn ToSql] = params![consensus_hash, block_bhh]; query_row(&self.db(), sql, args).map_err(Error::DBError) } @@ -6542,7 +6546,7 @@ impl StacksChainState { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_bhh]; + let args: &[&dyn ToSql] = params![consensus_hash, block_bhh]; let Some(staging_block): Option = query_row(&self.db(), sql, args).map_err(Error::DBError)? else { @@ -6555,7 +6559,7 @@ impl StacksChainState { pub fn get_stacks_chain_tips_at_height(&self, height: u64) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND height = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(height)?]; query_rows(&self.db(), sql, args).map_err(Error::DBError) } @@ -6565,9 +6569,9 @@ impl StacksChainState { staging_block: &StagingBlock, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[ - &staging_block.parent_consensus_hash, - &staging_block.parent_anchored_block_hash, + let args: &[&dyn ToSql] = params![ + staging_block.parent_consensus_hash, + staging_block.parent_anchored_block_hash, ]; query_row(&self.db(), sql, args).map_err(Error::DBError) } @@ -6579,7 +6583,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT height FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[consensus_hash, block_hash]; + let args: &[&dyn ToSql] = params![consensus_hash, block_hash]; query_row(&self.db(), sql, args).map_err(Error::DBError) } diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index 0079f3d7d5..b42b9545d0 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -139,29 +139,29 @@ impl StacksChainState { assert!(block_height < (i64::MAX as u64)); - let args: &[&dyn ToSql] = &[ - &header.version, - &total_burn_str, - &total_work_str, - &header.proof, - &header.parent_block, - &header.parent_microblock, - &header.parent_microblock_sequence, - &header.tx_merkle_root, - &header.state_index_root, - &header.microblock_pubkey_hash, - &block_hash, - &index_block_hash, - &consensus_hash, - &burn_header_hash, - &(burn_header_height as i64), - &(burn_header_timestamp as i64), - &(block_height as i64), - &index_root, + let args: &[&dyn ToSql] = params![ + header.version, + total_burn_str, + total_work_str, + header.proof, + header.parent_block, + header.parent_microblock, + header.parent_microblock_sequence, + header.tx_merkle_root, + header.state_index_root, + header.microblock_pubkey_hash, + block_hash, + index_block_hash, + consensus_hash, + burn_header_hash, + (burn_header_height as i64), + (burn_header_timestamp as i64), + (block_height as i64), + index_root, anchored_block_cost, - &block_size_str, + block_size_str, parent_id, - &u64_to_sql(affirmation_weight)?, + u64_to_sql(affirmation_weight)?, ]; tx.execute("INSERT INTO block_headers \ @@ -209,7 +209,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT 1 FROM block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_hash]; + let args: &[&dyn ToSql] = params![consensus_hash, block_hash]; match conn.query_row(sql, args, |_| Ok(true)) { Ok(_) => Ok(true), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(false), @@ -225,7 +225,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_hash]; + let args: &[&dyn ToSql] = params![consensus_hash, block_hash]; query_row_panic(conn, sql, args, || { "FATAL: multiple rows for the same block hash".to_string() }) @@ -319,7 +319,7 @@ impl StacksChainState { pub fn get_genesis_header_info(conn: &Connection) -> Result { // by construction, only one block can have height 0 in this DB let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1 AND block_height = 0"; - let args: &[&dyn ToSql] = &[&FIRST_BURNCHAIN_CONSENSUS_HASH]; + let args: &[&dyn ToSql] = params![FIRST_BURNCHAIN_CONSENSUS_HASH]; let row_opt = query_row(conn, sql, args)?; Ok(row_opt.expect("BUG: no genesis header info")) } @@ -330,7 +330,7 @@ impl StacksChainState { block_id: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT parent_block_id FROM block_headers WHERE index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[block_id]; + let args: &[&dyn ToSql] = params![block_id]; let mut rows = query_row_columns::(conn, sql, args, "parent_block_id")?; Ok(rows.pop()) } @@ -338,7 +338,7 @@ impl StacksChainState { /// Is this block present and processed? pub fn has_stacks_block(conn: &Connection, block_id: &StacksBlockId) -> Result { let sql = "SELECT 1 FROM block_headers WHERE index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[block_id]; + let args: &[&dyn ToSql] = params![block_id]; Ok(conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -383,7 +383,7 @@ impl StacksChainState { ) -> Result, Error> { let qry = "SELECT * FROM block_headers WHERE block_height = ?1 AND affirmation_weight = ?2 ORDER BY burn_header_height DESC"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?, &u64_to_sql(affirmation_weight)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(height)?, u64_to_sql(affirmation_weight)?]; query_rows(conn, qry, args).map_err(|e| e.into()) } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 983fdbc72d..cd76b6ee9b 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -669,7 +669,7 @@ impl<'a> ChainstateTx<'a> { let txid = tx_event.transaction.txid(); let tx_hex = tx_event.transaction.serialize_to_dbstring(); let result = tx_event.result.to_string(); - let params: &[&dyn ToSql] = &[&txid, block_id, &tx_hex, &result]; + let params: &[&dyn ToSql] = params![txid, block_id, tx_hex, result]; if let Err(e) = self.tx.tx().execute(insert, params) { warn!("Failed to log TX: {}", e); } @@ -1023,11 +1023,7 @@ impl StacksChainState { } tx.execute( "INSERT INTO db_config (version,mainnet,chain_id) VALUES (?1,?2,?3)", - params![ - "1".to_string(), - (if mainnet { 1 } else { 0 }), - chain_id, - ], + params!["1".to_string(), (if mainnet { 1 } else { 0 }), chain_id,], )?; if migrate { diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index a5e9c91aaa..75abe038fa 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -28,7 +28,9 @@ use std::{error, fmt, fs, io, os}; use regex::Regex; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, ToSql}; -use rusqlite::{params, Connection, Error as SqliteError, OptionalExtension, Transaction}; +use rusqlite::{ + params, Connection, DatabaseName, Error as SqliteError, OptionalExtension, Transaction, +}; use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; @@ -237,13 +239,7 @@ pub fn write_trie_blob( block_hash: &T, data: &[u8], ) -> Result { - let args: &[&dyn ToSql] = params![ - block_hash, - data, - 0, - 0, - 0, - ]; + let args: &[&dyn ToSql] = params![block_hash, data, 0, 0, 0,]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, ?, ?)")?; let block_id = s @@ -346,13 +342,13 @@ pub fn write_trie_blob_to_mined( ) -> Result { if let Ok(block_id) = get_mined_block_identifier(conn, block_hash) { // already exists; update - let args: &[&dyn ToSql] = &[&data, &block_id]; + let args: &[&dyn ToSql] = params![data, block_id]; let mut s = conn.prepare("UPDATE mined_blocks SET data = ? WHERE block_id = ?")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); } else { // doesn't exist yet; insert - let args: &[&dyn ToSql] = &[block_hash, &data]; + let args: &[&dyn ToSql] = params![block_hash, data]; let mut s = conn.prepare("INSERT INTO mined_blocks (block_hash, data) VALUES (?, ?)")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); @@ -379,7 +375,7 @@ pub fn write_trie_blob_to_unconfirmed( if let Ok(Some(block_id)) = get_unconfirmed_block_identifier(conn, block_hash) { // already exists; update - let args: &[&dyn ToSql] = &[&data, &block_id]; + let args: &[&dyn ToSql] = params![data, block_id]; let mut s = conn.prepare("UPDATE marf_data SET data = ? WHERE block_id = ?")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); @@ -405,7 +401,7 @@ pub fn write_trie_blob_to_unconfirmed( /// Open a trie blob. Returns a Blob<'a> readable/writeable handle to it. pub fn open_trie_blob<'a>(conn: &'a Connection, block_id: u32) -> Result, Error> { let blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -417,7 +413,7 @@ pub fn open_trie_blob<'a>(conn: &'a Connection, block_id: u32) -> Result readable handle to it. pub fn open_trie_blob_readonly<'a>(conn: &'a Connection, block_id: u32) -> Result, Error> { let blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -454,7 +450,7 @@ pub fn read_node_hash_bytes( ptr: &TriePtr, ) -> Result<(), Error> { let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -476,13 +472,7 @@ pub fn read_node_hash_bytes_by_bhh( &[bhh], |r| r.get("block_id"), )?; - let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, - "marf_data", - "data", - row_id, - true, - )?; + let mut blob = conn.blob_open(DatabaseName::Main, "marf_data", "data", row_id, true)?; let hash_buff = bits_read_node_hash_bytes(&mut blob, ptr)?; w.write_all(&hash_buff).map_err(|e| e.into()) } @@ -494,7 +484,7 @@ pub fn read_node_type( ptr: &TriePtr, ) -> Result<(TrieNodeType, TrieHash), Error> { let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -510,7 +500,7 @@ pub fn read_node_type_nohash( ptr: &TriePtr, ) -> Result { let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -525,7 +515,7 @@ pub fn get_external_trie_offset_length( block_id: u32, ) -> Result<(u64, u64), Error> { let qry = "SELECT external_offset, external_length FROM marf_data WHERE block_id = ?1"; - let args: &[&dyn ToSql] = &[&block_id]; + let args: &[&dyn ToSql] = params![block_id]; let (offset, length) = query_row(conn, qry, args)?.ok_or(Error::NotFoundError)?; Ok((offset, length)) } @@ -536,7 +526,7 @@ pub fn get_external_trie_offset_length_by_bhh( bhh: &T, ) -> Result<(u64, u64), Error> { let qry = "SELECT external_offset, external_length FROM marf_data WHERE block_hash = ?1"; - let args: &[&dyn ToSql] = &[bhh]; + let args: &[&dyn ToSql] = params![bhh]; let (offset, length) = query_row(conn, qry, args)?.ok_or(Error::NotFoundError)?; Ok((offset, length)) } @@ -588,7 +578,7 @@ pub fn get_node_hash_bytes( ptr: &TriePtr, ) -> Result { let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, + DatabaseName::Main, "marf_data", "data", block_id.into(), @@ -608,13 +598,7 @@ pub fn get_node_hash_bytes_by_bhh( &[bhh], |r| r.get("block_id"), )?; - let mut blob = conn.blob_open( - rusqlite::DatabaseName::Main, - "marf_data", - "data", - row_id, - true, - )?; + let mut blob = conn.blob_open(DatabaseName::Main, "marf_data", "data", row_id, true)?; let hash_buff = bits_read_node_hash_bytes(&mut blob, ptr)?; Ok(TrieHash(hash_buff)) } diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 92da8ac283..69e88ef501 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -27,8 +27,8 @@ use clarity::vm::types::{ PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, Value, }; use clarity::vm::ClarityVersion; -use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, ValueRef}; -use rusqlite::{Error as RusqliteError, ToSql}; +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; +use rusqlite::Error as RusqliteError; use serde::{Deserialize, Serialize}; use serde_json::json; use sha2::{Digest, Sha512_256}; diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index dffc5b8fc5..eb05d31193 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -12,7 +12,8 @@ use clarity::vm::database::{ }; use clarity::vm::errors::{InterpreterResult, RuntimeErrorType}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, TupleData}; -use rusqlite::{params, Connection, OptionalExtension, Row, ToSql}; +use rusqlite::types::ToSql; +use rusqlite::{params, Connection, OptionalExtension, Row}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksAddress, StacksBlockId, TenureBlockId, VRFSeed, @@ -512,7 +513,7 @@ pub fn get_stacks_header_column_from_table( where F: Fn(&Row) -> R, { - let args: &[&dyn ToSql] = &[id_bhh]; + let args: &[&dyn ToSql] = params![id_bhh]; let table_name = if nakamoto { "nakamoto_block_headers" } else { @@ -589,7 +590,7 @@ fn get_first_block_in_tenure( }; let ch = consensus_hash .expect("Unexpected SQL failure querying block header table for 'consensus_hash'"); - let args: &[&dyn ToSql] = &[&ch]; + let args: &[&dyn ToSql] = params![ch]; conn.query_row( " SELECT index_block_hash @@ -618,7 +619,7 @@ fn get_miner_column( where F: FnOnce(&Row) -> R, { - let args: &[&dyn ToSql] = &[&id_bhh.0]; + let args: &[&dyn ToSql] = params![id_bhh.0]; conn.query_row( &format!( "SELECT {} FROM payments WHERE index_block_hash = ? AND miner = 1", diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 35019d494c..2131523c94 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -29,7 +29,7 @@ use rand::distributions::Uniform; use rand::prelude::Distribution; use rusqlite::types::ToSql; use rusqlite::{ - params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction, }; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{ @@ -859,7 +859,7 @@ impl<'a> MemPoolTx<'a> { /// Used to clear out txids that are now outside the bloom counter's depth. fn prune_bloom_counter(&mut self, target_height: u64) -> Result<(), MemPoolRejection> { let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(target_height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(target_height)?]; let txids: Vec = query_rows(&self.tx, sql, args)?; let _num_txs = txids.len(); @@ -871,7 +871,7 @@ impl<'a> MemPoolTx<'a> { bloom_counter.remove_raw(dbtx, &txid.0)?; let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; - let args: &[&dyn ToSql] = &[&txid]; + let args: &[&dyn ToSql] = params![txid]; dbtx.execute(sql, args).map_err(db_error::SqliteError)?; } // help the type inference out @@ -902,7 +902,7 @@ impl<'a> MemPoolTx<'a> { ) -> Result, MemPoolRejection> { // is this the first-ever txid at this height? let sql = "SELECT 1 FROM mempool WHERE height = ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(height)?]; let present: Option = query_row(&self.tx, sql, args)?; if present.is_none() && height > (BLOOM_COUNTER_DEPTH as u64) { // this is the first-ever tx at this height. @@ -925,7 +925,7 @@ impl<'a> MemPoolTx<'a> { // remove lowest-fee tx (they're paying the least, so replication is // deprioritized) let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height > ?1 ORDER BY a.tx_fee ASC LIMIT 1"; - let args: &[&dyn ToSql] = &[&u64_to_sql( + let args: &[&dyn ToSql] = params![u64_to_sql( height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), )?]; let evict_txid: Option = query_row(&dbtx, sql, args)?; @@ -933,7 +933,7 @@ impl<'a> MemPoolTx<'a> { bloom_counter.remove_raw(dbtx, &evict_txid.0)?; let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; - let args: &[&dyn ToSql] = &[&evict_txid]; + let args: &[&dyn ToSql] = params![evict_txid]; dbtx.execute(sql, args).map_err(db_error::SqliteError)?; Some(evict_txid) @@ -963,7 +963,7 @@ impl<'a> MemPoolTx<'a> { let hashed_txid = Txid(Sha512Trunc256Sum::from_data(&randomized_buff).0); let sql = "INSERT OR REPLACE INTO randomized_txids (txid,hashed_txid) VALUES (?1,?2)"; - let args: &[&dyn ToSql] = &[txid, &hashed_txid]; + let args: &[&dyn ToSql] = params![txid, hashed_txid]; self.execute(sql, args).map_err(db_error::SqliteError)?; @@ -1504,12 +1504,12 @@ impl MemPoolDB { ) -> Result, db_error> { let sql = "SELECT DISTINCT origin_address FROM mempool WHERE height > ?1 AND height <= ?2 AND tx_fee >= ?3 ORDER BY tx_fee DESC LIMIT ?4 OFFSET ?5"; - let args: &[&dyn ToSql] = &[ - &start_height, - &end_height, - &u64_to_sql(min_fees)?, - &count, - &offset, + let args: &[&dyn ToSql] = params![ + start_height, + end_height, + u64_to_sql(min_fees)?, + count, + offset, ]; query_row_columns(self.conn(), sql, args, "origin_address") } @@ -1942,20 +1942,12 @@ impl MemPoolDB { } pub fn db_has_tx(conn: &DBConn, txid: &Txid) -> Result { - query_row( - conn, - "SELECT 1 FROM mempool WHERE txid = ?1", - params![txid], - ) - .and_then(|row_opt: Option| Ok(row_opt.is_some())) + query_row(conn, "SELECT 1 FROM mempool WHERE txid = ?1", params![txid]) + .and_then(|row_opt: Option| Ok(row_opt.is_some())) } pub fn get_tx(conn: &DBConn, txid: &Txid) -> Result, db_error> { - query_row( - conn, - "SELECT * FROM mempool WHERE txid = ?1", - params![txid], - ) + query_row(conn, "SELECT * FROM mempool WHERE txid = ?1", params![txid]) } /// Get all transactions across all tips @@ -1974,7 +1966,7 @@ impl MemPoolDB { block_header_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT * FROM mempool WHERE consensus_hash = ?1 AND block_header_hash = ?2"; - let args: &[&dyn ToSql] = &[consensus_hash, block_header_hash]; + let args: &[&dyn ToSql] = params![consensus_hash, block_header_hash]; let rows = query_rows::(conn, &sql, args)?; Ok(rows.len()) } @@ -1988,7 +1980,8 @@ impl MemPoolDB { timestamp: u64, ) -> Result, db_error> { let sql = "SELECT * FROM mempool WHERE accept_time = ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY origin_nonce ASC"; - let args: &[&dyn ToSql] = &[&u64_to_sql(timestamp)?, consensus_hash, block_header_hash]; + let args: &[&dyn ToSql] = + params![u64_to_sql(timestamp)?, consensus_hash, block_header_hash]; let rows = query_rows::(conn, &sql, args)?; Ok(rows) } @@ -1996,7 +1989,7 @@ impl MemPoolDB { /// Given a chain tip, find the highest block-height from _before_ this tip pub fn get_previous_block_height(conn: &DBConn, height: u64) -> Result, db_error> { let sql = "SELECT height FROM mempool WHERE height < ?1 ORDER BY height DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(height)?]; query_row(conn, sql, args) } @@ -2009,11 +2002,11 @@ impl MemPoolDB { count: u64, ) -> Result, db_error> { let sql = "SELECT * FROM mempool WHERE accept_time >= ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY tx_fee DESC LIMIT ?4"; - let args: &[&dyn ToSql] = &[ - &u64_to_sql(timestamp)?, + let args: &[&dyn ToSql] = params![ + u64_to_sql(timestamp)?, consensus_hash, block_header_hash, - &u64_to_sql(count)?, + u64_to_sql(count)?, ]; let rows = query_rows::(conn, &sql, args)?; Ok(rows) @@ -2046,7 +2039,7 @@ impl MemPoolDB { FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2", if is_origin { "origin" } else { "sponsor" } ); - let args: &[&dyn ToSql] = &[&addr.to_string(), &u64_to_sql(nonce)?]; + let args: &[&dyn ToSql] = params![addr.to_string(), u64_to_sql(nonce)?]; query_row(conn, &sql, args) } @@ -2181,19 +2174,19 @@ impl MemPoolDB { tx) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)"; - let args: &[&dyn ToSql] = &[ - &txid, - &origin_address.to_string(), - &u64_to_sql(origin_nonce)?, - &sponsor_address.to_string(), - &u64_to_sql(sponsor_nonce)?, - &u64_to_sql(tx_fee)?, - &u64_to_sql(length)?, + let args: &[&dyn ToSql] = params![ + txid, + origin_address.to_string(), + u64_to_sql(origin_nonce)?, + sponsor_address.to_string(), + u64_to_sql(sponsor_nonce)?, + u64_to_sql(tx_fee)?, + u64_to_sql(length)?, consensus_hash, block_header_hash, - &u64_to_sql(height)?, - &u64_to_sql(get_epoch_time_secs())?, - &tx_bytes, + u64_to_sql(height)?, + u64_to_sql(get_epoch_time_secs())?, + tx_bytes, ]; tx.execute(sql, args) @@ -2243,7 +2236,7 @@ impl MemPoolDB { event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), db_error> { let threshold_time = get_epoch_time_secs().saturating_sub(age.as_secs()); - let args: &[&dyn ToSql] = &[&u64_to_sql(threshold_time)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(threshold_time)?]; if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE accept_time < ?1"; let txids = query_rows(tx, sql, args)?; @@ -2264,7 +2257,7 @@ impl MemPoolDB { min_height: u64, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[&u64_to_sql(min_height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(min_height)?]; if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE height < ?1"; @@ -2562,7 +2555,7 @@ impl MemPoolDB { ) -> Result<(), db_error> { for txid in txids { let sql = "INSERT OR REPLACE INTO tx_blacklist (txid, arrival_time) VALUES (?1, ?2)"; - let args: &[&dyn ToSql] = &[&txid, &u64_to_sql(now)?]; + let args: &[&dyn ToSql] = params![txid, &u64_to_sql(now)?]; tx.execute(sql, args)?; } Ok(()) @@ -2577,7 +2570,7 @@ impl MemPoolDB { max_size: u64, ) -> Result<(), db_error> { let sql = "DELETE FROM tx_blacklist WHERE arrival_time + ?1 < ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(timeout)?, &u64_to_sql(now)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(timeout)?, u64_to_sql(now)?]; tx.execute(sql, args)?; // if we get too big, then drop some txs at random @@ -2591,10 +2584,7 @@ impl MemPoolDB { params![u64_to_sql(to_delete)?], )?; for txid in txids.into_iter() { - tx.execute( - "DELETE FROM tx_blacklist WHERE txid = ?1", - params![txid], - )?; + tx.execute("DELETE FROM tx_blacklist WHERE txid = ?1", params![txid])?; } } Ok(()) @@ -2606,7 +2596,7 @@ impl MemPoolDB { txid: &Txid, ) -> Result, db_error> { let sql = "SELECT arrival_time FROM tx_blacklist WHERE txid = ?1"; - let args: &[&dyn ToSql] = &[&txid]; + let args: &[&dyn ToSql] = params![txid]; query_row(conn, sql, args) } @@ -2729,7 +2719,7 @@ impl MemPoolDB { }; let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); let sql = "SELECT mempool.txid FROM mempool WHERE height > ?1 AND height <= ?2 AND NOT EXISTS (SELECT 1 FROM removed_txids WHERE txid = mempool.txid)"; - let args: &[&dyn ToSql] = &[&u64_to_sql(min_height)?, &u64_to_sql(max_height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; query_rows(&self.conn(), sql, args) } @@ -2757,7 +2747,7 @@ impl MemPoolDB { }; let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); let sql = "SELECT COUNT(txid) FROM mempool WHERE height > ?1 AND height <= ?2"; - let args: &[&dyn ToSql] = &[&u64_to_sql(min_height)?, &u64_to_sql(max_height)?]; + let args: &[&dyn ToSql] = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; query_int(conn, sql, args).map(|cnt| cnt as u64) } @@ -2778,7 +2768,7 @@ impl MemPoolDB { /// Get the hashed txid for a txid pub fn get_randomized_txid(&self, txid: &Txid) -> Result, db_error> { let sql = "SELECT hashed_txid FROM randomized_txids WHERE txid = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[txid]; + let args: &[&dyn ToSql] = params![txid]; query_row(&self.conn(), sql, args) } @@ -2825,10 +2815,10 @@ impl MemPoolDB { (SELECT 1 FROM removed_txids WHERE txid = mempool.txid) \ ORDER BY randomized_txids.hashed_txid ASC LIMIT ?3"; - let args: &[&dyn ToSql] = &[ - &last_randomized_txid, - &u64_to_sql(height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, - &u64_to_sql(max_run)?, + let args: &[&dyn ToSql] = params![ + last_randomized_txid, + u64_to_sql(height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, + u64_to_sql(max_run)?, ]; let mut tags_table = HashSet::new(); diff --git a/stackslib/src/cost_estimates/fee_medians.rs b/stackslib/src/cost_estimates/fee_medians.rs index fd8db3bf17..12bd2fb9b8 100644 --- a/stackslib/src/cost_estimates/fee_medians.rs +++ b/stackslib/src/cost_estimates/fee_medians.rs @@ -4,9 +4,10 @@ use std::path::Path; use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; -use rusqlite::types::{FromSql, FromSqlError}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; use rusqlite::{ - params, AndThenRows, Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqlTransaction + params, AndThenRows, Connection, Error as SqliteError, OpenFlags, OptionalExtension, + Transaction as SqlTransaction, }; use serde_json::Value as JsonValue; @@ -62,7 +63,7 @@ impl WeightedMedianFeeRateEstimator { pub fn open(p: &Path, metric: M, window_size: u32) -> Result { let mut db = sqlite_open( p, - rusqlite::OpenFlags::SQLITE_OPEN_CREATE | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, + OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE, false, )?; diff --git a/stackslib/src/cost_estimates/fee_scalar.rs b/stackslib/src/cost_estimates/fee_scalar.rs index fd38f9ca51..ff7911058f 100644 --- a/stackslib/src/cost_estimates/fee_scalar.rs +++ b/stackslib/src/cost_estimates/fee_scalar.rs @@ -4,9 +4,10 @@ use std::path::Path; use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::{ClaritySerializable, STXBalance}; -use rusqlite::types::{FromSql, FromSqlError}; +use rusqlite::types::{FromSql, FromSqlError, ToSql}; use rusqlite::{ - params, Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqlTransaction + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, + Transaction as SqlTransaction, }; use serde_json::Value as JsonValue; @@ -47,7 +48,7 @@ impl ScalarFeeRateEstimator { pub fn open(p: &Path, metric: M) -> Result { let mut db = sqlite_open( p, - rusqlite::OpenFlags::SQLITE_OPEN_CREATE | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, + OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE, false, )?; diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index d9499ca6ad..bb1cf48f38 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -3,9 +3,10 @@ use std::path::Path; use clarity::types::sqlite::NO_PARAMS; use clarity::vm::costs::ExecutionCost; -use rusqlite::types::{FromSql, FromSqlError}; +use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql}; use rusqlite::{ - params, Connection, Error as SqliteError, OptionalExtension, ToSql, Transaction as SqliteTransaction + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, + Transaction as SqliteTransaction, }; use serde_json::Value as JsonValue; @@ -77,9 +78,7 @@ impl std::fmt::Display for CostField { } impl FromSql for Samples { - fn column_result( - sql_value: rusqlite::types::ValueRef<'_>, - ) -> rusqlite::types::FromSqlResult { + fn column_result(sql_value: rusqlite::types::ValueRef<'_>) -> FromSqlResult { let json_value = JsonValue::column_result(sql_value)?; let items = serde_json::from_value(json_value).map_err(|_e| { error!("Failed to parse PessimisticEstimator sample from SQL"); @@ -145,11 +144,8 @@ impl Samples { let sql = "INSERT OR REPLACE INTO pessimistic_estimator (estimate_key, current_value, samples) VALUES (?, ?, ?)"; let current_value = u64_to_sql(self.mean()).unwrap_or_else(|_| i64::MAX); - tx.execute( - sql, - params![identifier, current_value, self.to_json()], - ) - .expect("SQLite failure"); + tx.execute(sql, params![identifier, current_value, self.to_json()]) + .expect("SQLite failure"); } fn get_sqlite(conn: &Connection, identifier: &str) -> Samples { @@ -173,27 +169,25 @@ impl Samples { impl PessimisticEstimator { pub fn open(p: &Path, log_error: bool) -> Result { - let db = - sqlite_open(p, rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, false).or_else(|e| { - if let SqliteError::SqliteFailure(ref internal, _) = e { - if let rusqlite::ErrorCode::CannotOpen = internal.code { - let mut db = sqlite_open( - p, - rusqlite::OpenFlags::SQLITE_OPEN_CREATE - | rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE, - false, - )?; - let tx = tx_begin_immediate_sqlite(&mut db)?; - PessimisticEstimator::instantiate_db(&tx)?; - tx.commit()?; - Ok(db) - } else { - Err(e) - } + let db = sqlite_open(p, OpenFlags::SQLITE_OPEN_READ_WRITE, false).or_else(|e| { + if let SqliteError::SqliteFailure(ref internal, _) = e { + if let rusqlite::ErrorCode::CannotOpen = internal.code { + let mut db = sqlite_open( + p, + OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE, + false, + )?; + let tx = tx_begin_immediate_sqlite(&mut db)?; + PessimisticEstimator::instantiate_db(&tx)?; + tx.commit()?; + Ok(db) } else { Err(e) } - })?; + } else { + Err(e) + } + })?; Ok(PessimisticEstimator { db, log_error }) } diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 1743f580bb..bd7cebc581 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -769,7 +769,7 @@ simulating a miner. if let Some(value) = value_opt { let conn = sqlite_open(&db_path, OpenFlags::SQLITE_OPEN_READ_ONLY, false) .expect("Failed to open DB"); - let args: &[&dyn ToSql] = &[&value.to_hex()]; + let args: &[&dyn ToSql] = params![&value.to_hex()]; let res: Result = conn.query_row_and_then( "SELECT value FROM __fork_storage WHERE value_hash = ?1", args, diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index cb3ad8162f..dc6d90a9c5 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -461,7 +461,7 @@ impl AtlasDB { let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let max = (page_index + 1) * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT MIN(block_height) as min, MAX(block_height) as max FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2"; - let args = params![min, max]; + let args: &[&dyn ToSql] = params![min, max]; let mut stmt = self.conn.prepare(&qry)?; let mut rows = stmt.query(args)?; @@ -497,11 +497,7 @@ impl AtlasDB { let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; - let args = params![ - min, - max, - block_id, - ]; + let args: &[&dyn ToSql] = params![min, max, block_id,]; let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; let mut bool_vector = vec![true; AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE as usize]; @@ -602,8 +598,8 @@ impl AtlasDB { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 0" .to_string(); - let args = params![hex_content_hash]; - let row = query_row::(&self.conn, &qry, &args)?; + let args: &[&dyn ToSql] = params![hex_content_hash]; + let row = query_row::(&self.conn, &qry, args)?; Ok(row) } @@ -637,7 +633,7 @@ impl AtlasDB { ) -> Result, db_error> { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT * FROM attachment_instances WHERE content_hash = ?1 AND status = ?2"; - let args = params![hex_content_hash, AttachmentInstanceStatus::Checked]; + let args: &[&dyn ToSql] = params![hex_content_hash, AttachmentInstanceStatus::Checked]; let rows = query_rows(&self.conn, qry, args)?; Ok(rows) } @@ -646,8 +642,8 @@ impl AtlasDB { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 1" .to_string(); - let args = params![hex_content_hash]; - let row = query_row::(&self.conn, &qry, &args)?; + let args: &[&dyn ToSql] = params![hex_content_hash]; + let row = query_row::(&self.conn, &qry, args)?; Ok(row) } diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 4ba6f243af..086e3b65a2 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -446,17 +446,17 @@ impl PeerDB { PeerDB::apply_schema_migrations(&tx)?; - let local_peer_args: &[&dyn ToSql] = &[ - &network_id, - &parent_network_id, - &to_hex(&localpeer.nonce), - &to_hex(&localpeer.private_key.to_bytes()), - &u64_to_sql(key_expires)?, - &to_bin(localpeer.addrbytes.as_bytes()), - &localpeer.port, - &localpeer.services, - &localpeer.data_url.as_str(), - &serde_json::to_string(stacker_dbs) + let local_peer_args: &[&dyn ToSql] = params![ + network_id, + parent_network_id, + to_hex(&localpeer.nonce), + to_hex(&localpeer.private_key.to_bytes()), + u64_to_sql(key_expires)?, + to_bin(localpeer.addrbytes.as_bytes()), + localpeer.port, + localpeer.services, + localpeer.data_url.as_str(), + serde_json::to_string(stacker_dbs) .expect("FATAL: failed to serialize stacker db contract addresses"), ]; @@ -556,13 +556,13 @@ impl PeerDB { p2p_port: u16, stacker_dbs: &[QualifiedContractIdentifier], ) -> Result<(), db_error> { - let local_peer_args: &[&dyn ToSql] = &[ - &p2p_port, - &data_url.as_str(), - &serde_json::to_string(stacker_dbs) + let local_peer_args: &[&dyn ToSql] = params![ + p2p_port, + data_url.as_str(), + serde_json::to_string(stacker_dbs) .expect("FATAL: unable to serialize Vec"), - &network_id, - &parent_network_id, + network_id, + parent_network_id, ]; match self.conn.execute("UPDATE local_peer SET port = ?1, data_url = ?2, stacker_dbs = ?3 WHERE network_id = ?4 AND parent_network_id = ?5", @@ -827,11 +827,8 @@ impl PeerDB { /// Set local service availability pub fn set_local_services(tx: &Transaction, services: u16) -> Result<(), db_error> { - tx.execute( - "UPDATE local_peer SET services = ?1", - params![services], - ) - .map_err(db_error::SqliteError)?; + tx.execute("UPDATE local_peer SET services = ?1", params![services]) + .map_err(db_error::SqliteError)?; Ok(()) } @@ -842,7 +839,7 @@ impl PeerDB { privkey: &Secp256k1PrivateKey, expire_block: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[&to_hex(&privkey.to_bytes()), &u64_to_sql(expire_block)?]; + let args: &[&dyn ToSql] = params![to_hex(&privkey.to_bytes()), u64_to_sql(expire_block)?]; tx.execute( "UPDATE local_peer SET private_key = ?1, private_key_expire = ?2", args, @@ -915,11 +912,7 @@ impl PeerDB { peer_port: u16, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args = params![ - network_id, - peer_addr.to_bin(), - peer_port, - ]; + let args: &[&dyn ToSql] = params![network_id, peer_addr.to_bin(), peer_port,]; query_row::(conn, qry, args) } @@ -944,7 +937,7 @@ impl PeerDB { peer_port: u16, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND port = ?2"; - let args = params![network_id, peer_port]; + let args: &[&dyn ToSql] = params![network_id, peer_port]; query_row::(conn, &qry, args) } @@ -955,14 +948,14 @@ impl PeerDB { slot: u32, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND slot = ?2"; - let args = params![network_id, slot]; + let args: &[&dyn ToSql] = params![network_id, slot]; query_row::(conn, &qry, args) } /// Is there any peer at a particular slot? pub fn has_peer_at(conn: &DBConn, network_id: u32, slot: u32) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND slot = ?2"; - let args = params![network_id, slot]; + let args: &[&dyn ToSql] = params![network_id, slot]; Ok(query_row::(conn, &qry, args)? .map(|x| x == 1) .unwrap_or(false)) @@ -1039,7 +1032,7 @@ impl PeerDB { ) -> Result<(), db_error> { for cid in smart_contracts { test_debug!("Add Stacker DB contract to slot {}: {}", slot, cid); - let args: &[&dyn ToSql] = &[&cid.to_string(), &slot]; + let args: &[&dyn ToSql] = params![cid.to_string(), slot]; tx.execute("INSERT OR REPLACE INTO stackerdb_peers (smart_contract_id,peer_slot) VALUES (?1,?2)", args) .map_err(db_error::SqliteError)?; } @@ -1061,22 +1054,22 @@ impl PeerDB { ) -> Result<(), db_error> { let old_peer_opt = PeerDB::get_peer_at(tx, neighbor.addr.network_id, slot)?; - let neighbor_args: &[&dyn ToSql] = &[ - &neighbor.addr.peer_version, - &neighbor.addr.network_id, - &to_bin(neighbor.addr.addrbytes.as_bytes()), - &neighbor.addr.port, - &to_hex(&neighbor.public_key.to_bytes_compressed()), - &u64_to_sql(neighbor.expire_block)?, - &u64_to_sql(neighbor.last_contact_time)?, - &neighbor.asn, - &neighbor.org, - &neighbor.allowed, - &neighbor.denied, - &neighbor.in_degree, - &neighbor.out_degree, - &0i64, - &slot, + let neighbor_args: &[&dyn ToSql] = params![ + neighbor.addr.peer_version, + neighbor.addr.network_id, + to_bin(neighbor.addr.addrbytes.as_bytes()), + neighbor.addr.port, + to_hex(&neighbor.public_key.to_bytes_compressed()), + u64_to_sql(neighbor.expire_block)?, + u64_to_sql(neighbor.last_contact_time)?, + neighbor.asn, + neighbor.org, + neighbor.allowed, + neighbor.denied, + neighbor.in_degree, + neighbor.out_degree, + 0i64, + slot, ]; tx.execute("INSERT OR REPLACE INTO frontier (peer_version, network_id, addrbytes, port, public_key, expire_block_height, last_contact_time, asn, org, allowed, denied, in_degree, out_degree, initial, slot) \ @@ -1107,11 +1100,7 @@ impl PeerDB { let slot_opt = Self::find_peer_slot(tx, network_id, peer_addr, peer_port)?; tx.execute( "DELETE FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", - params![ - network_id, - peer_addr.to_bin(), - peer_port, - ], + params![network_id, peer_addr.to_bin(), peer_port,], ) .map_err(db_error::SqliteError)?; @@ -1212,11 +1201,11 @@ impl PeerDB { peer_port: u16, deny_deadline: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &u64_to_sql(deny_deadline)?, - &network_id, - &peer_addr.to_bin(), - &peer_port, + let args: &[&dyn ToSql] = params![ + u64_to_sql(deny_deadline)?, + network_id, + peer_addr.to_bin(), + peer_port, ]; let num_updated = tx.execute("UPDATE frontier SET denied = ?1 WHERE network_id = ?2 AND addrbytes = ?3 AND port = ?4", args) .map_err(db_error::SqliteError)?; @@ -1258,20 +1247,20 @@ impl PeerDB { neighbor.addr.port, )?; - let args: &[&dyn ToSql] = &[ - &neighbor.addr.peer_version, - &to_hex(&neighbor.public_key.to_bytes_compressed()), - &u64_to_sql(neighbor.expire_block)?, - &u64_to_sql(neighbor.last_contact_time)?, - &neighbor.asn, - &neighbor.org, - &neighbor.allowed, - &neighbor.denied, - &neighbor.in_degree, - &neighbor.out_degree, - &neighbor.addr.network_id, - &to_bin(neighbor.addr.addrbytes.as_bytes()), - &neighbor.addr.port, + let args: &[&dyn ToSql] = params![ + neighbor.addr.peer_version, + to_hex(&neighbor.public_key.to_bytes_compressed()), + u64_to_sql(neighbor.expire_block)?, + u64_to_sql(neighbor.last_contact_time)?, + neighbor.asn, + neighbor.org, + neighbor.allowed, + neighbor.denied, + neighbor.in_degree, + neighbor.out_degree, + neighbor.addr.network_id, + to_bin(neighbor.addr.addrbytes.as_bytes()), + neighbor.addr.port, ]; tx.execute("UPDATE frontier SET peer_version = ?1, public_key = ?2, expire_block_height = ?3, last_contact_time = ?4, asn = ?5, org = ?6, allowed = ?7, denied = ?8, in_degree = ?9, out_degree = ?10 \ @@ -1310,7 +1299,7 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT slot FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args: &[&dyn ToSql] = &[&network_id, &addrbytes.to_bin(), &port]; + let args: &[&dyn ToSql] = params![network_id, addrbytes.to_bin(), port]; Ok(query_row::(conn, qry, args)?) } @@ -1336,7 +1325,7 @@ impl PeerDB { smart_contract: &QualifiedContractIdentifier, ) -> Result, db_error> { let qry = "SELECT peer_slot FROM stackerdb_peers WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = &[&smart_contract.to_string()]; + let args: &[&dyn ToSql] = params![smart_contract.to_string()]; query_rows(conn, qry, args) } @@ -1396,7 +1385,7 @@ impl PeerDB { let sql = "DELETE FROM stackerdb_peers WHERE smart_contract_id = ?1 AND peer_slot = ?2"; for cid in to_delete.into_iter() { test_debug!("Delete Stacker DB for {:?}: {}", &neighbor.addr, &cid); - let args: &[&dyn ToSql] = &[&cid.to_string(), &slot]; + let args: &[&dyn ToSql] = params![cid.to_string(), slot]; tx.execute(sql, args).map_err(db_error::SqliteError)?; } @@ -1404,7 +1393,7 @@ impl PeerDB { "INSERT OR REPLACE INTO stackerdb_peers (smart_contract_id,peer_slot) VALUES (?1,?2)"; for cid in to_insert.iter() { test_debug!("Add Stacker DB for {:?}: {}", &neighbor.addr, &cid); - let args: &[&dyn ToSql] = &[&cid.to_string(), &slot]; + let args: &[&dyn ToSql] = params![cid.to_string(), slot]; tx.execute(sql, args).map_err(db_error::SqliteError)?; } @@ -1461,7 +1450,7 @@ impl PeerDB { prefix: &PeerAddress, mask: u32, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[&prefix.to_bin(), &mask]; + let args: &[&dyn ToSql] = params![prefix.to_bin(), mask]; tx.execute( &format!( "INSERT OR REPLACE INTO {} (prefix, mask) VALUES (?1, ?2)", @@ -1480,7 +1469,7 @@ impl PeerDB { prefix: &PeerAddress, mask: u32, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[&prefix.to_bin(), &mask]; + let args: &[&dyn ToSql] = params![prefix.to_bin(), mask]; tx.execute( &format!("DELETE FROM {} WHERE prefix = ?1 AND mask = ?2", table), args, @@ -1557,7 +1546,7 @@ impl PeerDB { ) -> Result<(), db_error> { assert!(mask > 0 && mask <= 128); let prefix_txt = PeerDB::cidr_prefix_to_string(prefix, mask); - let args: &[&dyn ToSql] = &[&value, &mask, &prefix_txt]; + let args: &[&dyn ToSql] = params![value, mask, prefix_txt]; tx.execute( &format!( "UPDATE frontier SET {} = ?1 WHERE SUBSTR(addrbytes,1,?2) = SUBSTR(?3,1,?2)", @@ -1635,11 +1624,11 @@ impl PeerDB { if always_include_allowed { // always include allowed neighbors, freshness be damned let allow_qry = "SELECT * FROM frontier WHERE network_id = ?1 AND denied < ?2 AND (allowed < 0 OR ?3 < allowed) AND (peer_version & 0x000000ff) >= ?4"; - let allow_args: &[&dyn ToSql] = &[ - &network_id, - &u64_to_sql(now_secs)?, - &u64_to_sql(now_secs)?, - &network_epoch, + let allow_args: &[&dyn ToSql] = params![ + network_id, + u64_to_sql(now_secs)?, + u64_to_sql(now_secs)?, + network_epoch, ]; let mut allow_rows = query_rows::(conn, &allow_qry, allow_args)?; @@ -1665,14 +1654,14 @@ impl PeerDB { (allowed < 0 OR (allowed >= 0 AND allowed <= ?5)) AND (peer_version & 0x000000ff) >= ?6 ORDER BY RANDOM() LIMIT ?7" }; - let random_peers_args: &[&dyn ToSql] = &[ - &network_id, - &u64_to_sql(min_age)?, - &u64_to_sql(block_height)?, - &u64_to_sql(now_secs)?, - &u64_to_sql(now_secs)?, - &network_epoch, - &(count - (ret.len() as u32)), + let random_peers_args: &[&dyn ToSql] = params![ + network_id, + u64_to_sql(min_age)?, + u64_to_sql(block_height)?, + u64_to_sql(now_secs)?, + u64_to_sql(now_secs)?, + network_epoch, + (count - (ret.len() as u32)), ]; let mut random_peers = query_rows::(conn, &random_peers_qry, random_peers_args)?; @@ -1722,12 +1711,7 @@ impl PeerDB { fn asn4_insert(tx: &Transaction, asn4: &ASEntry4) -> Result<(), db_error> { tx.execute( "INSERT OR REPLACE INTO asn4 (prefix, mask, asn, org) VALUES (?1, ?2, ?3, ?4)", - params![ - asn4.prefix, - asn4.mask, - asn4.asn, - asn4.org, - ], + params![asn4.prefix, asn4.mask, asn4.asn, asn4.org,], ) .map_err(db_error::SqliteError)?; @@ -1746,7 +1730,7 @@ impl PeerDB { let addr_u32 = addrbits.ipv4_bits().unwrap(); let qry = "SELECT * FROM asn4 WHERE prefix = (?1 & ~((1 << (32 - mask)) - 1)) ORDER BY prefix DESC LIMIT 1"; - let args = params![addr_u32]; + let args: &[&dyn ToSql] = params![addr_u32]; let rows = query_rows::(conn, &qry, args)?; match rows.len() { 0 => Ok(None), @@ -1769,7 +1753,7 @@ impl PeerDB { #[cfg_attr(test, mutants::skip)] pub fn asn_count(conn: &DBConn, asn: u32) -> Result { let qry = "SELECT COUNT(*) FROM frontier WHERE asn = ?1"; - let args = params![asn]; + let args: &[&dyn ToSql] = params![asn]; let count = query_count(conn, &qry, args)?; Ok(count as u64) } @@ -1802,11 +1786,11 @@ impl PeerDB { } let qry = "SELECT DISTINCT frontier.* FROM frontier JOIN stackerdb_peers ON stackerdb_peers.peer_slot = frontier.slot WHERE stackerdb_peers.smart_contract_id = ?1 AND frontier.network_id = ?2 AND frontier.last_contact_time >= ?3 ORDER BY RANDOM() LIMIT ?4"; let max_count_u32 = u32::try_from(max_count).unwrap_or(u32::MAX); - let args: &[&dyn ToSql] = &[ - &smart_contract.to_string(), - &network_id, - &u64_to_sql(min_age)?, - &max_count_u32, + let args: &[&dyn ToSql] = params![ + smart_contract.to_string(), + network_id, + u64_to_sql(min_age)?, + max_count_u32, ]; query_rows(conn, qry, args) } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index dd845770c8..5ed6b1d6fd 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -36,8 +36,7 @@ use libstackerdb::{ }; use rand::{thread_rng, RngCore}; use regex::Regex; -use rusqlite::types::ToSqlOutput; -use rusqlite::ToSql; +use rusqlite::types::{ToSql, ToSqlOutput}; use serde::de::Error as de_Error; use serde::ser::Error as ser_Error; use serde::{Deserialize, Serialize}; diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 43fb0ddd11..6a24afd990 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -158,7 +158,7 @@ fn inner_get_stackerdb_id( smart_contract: &QualifiedContractIdentifier, ) -> Result { let sql = "SELECT rowid FROM databases WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = &[&smart_contract.to_string()]; + let args: &[&dyn ToSql] = params![smart_contract.to_string()]; Ok(query_row(conn, sql, args)?.ok_or(net_error::NoSuchStackerDB(smart_contract.clone()))?) } @@ -172,7 +172,7 @@ fn inner_get_slot_metadata( ) -> Result, net_error> { let stackerdb_id = inner_get_stackerdb_id(conn, smart_contract)?; let sql = "SELECT slot_id,version,data_hash,signature FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; query_row(conn, &sql, args).map_err(|e| e.into()) } @@ -187,7 +187,7 @@ fn inner_get_slot_validation( let stackerdb_id = inner_get_stackerdb_id(conn, smart_contract)?; let sql = "SELECT signer,write_time,version FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; query_row(conn, &sql, args).map_err(|e| e.into()) } @@ -207,7 +207,7 @@ impl<'a> StackerDBTx<'a> { smart_contract_id: &QualifiedContractIdentifier, ) -> Result<(), net_error> { let qry = "DELETE FROM databases WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = &[&smart_contract_id.to_string()]; + let args: &[&dyn ToSql] = params![smart_contract_id.to_string()]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; Ok(()) @@ -247,7 +247,7 @@ impl<'a> StackerDBTx<'a> { let qry = "INSERT OR REPLACE INTO databases (smart_contract_id) VALUES (?1)"; let mut stmt = self.sql_tx.prepare(&qry)?; - let args: &[&dyn ToSql] = &[&smart_contract.to_string()]; + let args: &[&dyn ToSql] = params![smart_contract.to_string()]; stmt.execute(args)?; let stackerdb_id = self.get_stackerdb_id(smart_contract)?; @@ -287,7 +287,7 @@ impl<'a> StackerDBTx<'a> { ) -> Result<(), net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args: &[&dyn ToSql] = params![stackerdb_id]; let mut stmt = self.sql_tx.prepare(&qry)?; stmt.execute(args)?; Ok(()) @@ -375,14 +375,14 @@ impl<'a> StackerDBTx<'a> { let sql = "UPDATE chunks SET version = ?1, data_hash = ?2, signature = ?3, data = ?4, write_time = ?5 WHERE stackerdb_id = ?6 AND slot_id = ?7"; let mut stmt = self.sql_tx.prepare(&sql)?; - let args: &[&dyn ToSql] = &[ - &slot_desc.slot_version, - &Sha512Trunc256Sum::from_data(chunk), - &slot_desc.signature, - &chunk, - &u64_to_sql(get_epoch_time_secs())?, - &stackerdb_id, - &slot_desc.slot_id, + let args: &[&dyn ToSql] = params![ + slot_desc.slot_version, + Sha512Trunc256Sum::from_data(chunk), + slot_desc.signature, + chunk, + u64_to_sql(get_epoch_time_secs())?, + stackerdb_id, + slot_desc.slot_id, ]; stmt.execute(args)?; @@ -549,7 +549,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT signer FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; query_row(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -562,7 +562,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT signer FROM chunks WHERE stackerdb_id = ?1 GROUP BY signer"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args: &[&dyn ToSql] = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -583,7 +583,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = inner_get_stackerdb_id(&self.conn, smart_contract)?; let sql = "SELECT slot_id,version,data_hash,signature FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id ASC"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args: &[&dyn ToSql] = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -607,7 +607,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT version FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; self.conn .query_row(qry, args, |row| row.get(0)) @@ -622,7 +622,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT version FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args: &[&dyn ToSql] = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -633,7 +633,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT write_time FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id"; - let args: &[&dyn ToSql] = &[&stackerdb_id]; + let args: &[&dyn ToSql] = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -648,7 +648,7 @@ impl StackerDBs { ) -> Result>, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT data FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id]; + let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; self.conn .query_row(qry, args, |row| row.get(0)) @@ -681,7 +681,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT slot_id,version,signature,data FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2 AND version = ?3"; - let args: &[&dyn ToSql] = &[&stackerdb_id, &slot_id, &slot_version]; + let args: &[&dyn ToSql] = params![stackerdb_id, slot_id, slot_version]; query_row(&self.conn, &qry, args).map_err(|e| e.into()) } } diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index 75652c8f50..b418addfde 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -22,7 +22,8 @@ use std::io::{Read, Seek, SeekFrom, Write}; use rand::prelude::*; use rand::thread_rng; use rusqlite::blob::Blob; -use rusqlite::{Error as sqlite_error, Row, ToSql}; +use rusqlite::types::ToSql; +use rusqlite::{params, Error as sqlite_error, Row}; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{read_next, write_next, Error as codec_error, StacksMessageCodec}; use stacks_common::types::sqlite::NO_PARAMS; @@ -361,7 +362,7 @@ impl BloomCounter { "INSERT INTO {} (counts, num_bins, num_hashes, hasher) VALUES (?1, ?2, ?3, ?4)", table_name ); - let args: &[&dyn ToSql] = &[&counts_vec, &num_bins, &num_hashes, &hasher_vec]; + let args: &[&dyn ToSql] = params![counts_vec, num_bins, num_hashes, hasher_vec]; tx.execute(&sql, args).map_err(db_error::SqliteError)?; diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index cc6fb9a6df..e6f80e107b 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -25,7 +25,8 @@ use clarity::vm::types::QualifiedContractIdentifier; use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::{FromSql, ToSql}; use rusqlite::{ - params, Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Params, Row, Transaction, TransactionBehavior + params, Connection, Error as sqlite_error, OpenFlags, OptionalExtension, Params, Row, + Transaction, TransactionBehavior, }; use serde_json::Error as serde_error; use stacks_common::types::chainstate::{SortitionId, StacksAddress, StacksBlockId, TrieHash}; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d23ed3d593..5ed96934e2 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12,7 +12,7 @@ use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; use rand::{Rng, RngCore}; -use rusqlite::types::ToSql; +use rusqlite::params; use serde::Deserialize; use serde_json::json; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; From 2dc3dff5ebe287fd7fb2bfe7e0f52eb8bb7fb577 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 4 Jul 2024 17:56:40 +0300 Subject: [PATCH 0474/1400] fix specific debug type error by casting u64 to u128 --- stackslib/src/core/mempool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 2131523c94..f024a95d3b 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1922,7 +1922,7 @@ impl MemPoolDB { debug!( "Mempool iteration finished"; - "considered_txs" => total_considered, + "considered_txs" => total_considered as u128, "elapsed_ms" => start_time.elapsed().as_millis() ); Ok(total_considered) From 151c1b6d1688828894574edb005b6716917ceecb Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 4 Jul 2024 19:15:10 +0300 Subject: [PATCH 0475/1400] add sqlite version function and test --- stacks-signer/src/signerdb.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5692b5c6f0..0725314e2a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -230,6 +230,10 @@ impl SignerDb { Ok(()) } + + fn get_sqlite_version(&self) -> Result, DBError> { + query_row(&self.db, "SELECT sqlite_version()", NO_PARAMS) + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -419,4 +423,11 @@ mod tests { .expect("Failed to get signer state") .is_none()); } + + #[test] + fn test_sqlite_version() { + let db_path = tmp_db_path(); + let db = SignerDb::new(db_path).expect("Failed to create signer db"); + assert_eq!(db.get_sqlite_version().unwrap(), Some("3.45.0".to_string())); + } } From 92703a90e6689feb8110990e25dc790e273f736e Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 4 Jul 2024 19:18:54 +0300 Subject: [PATCH 0476/1400] fix test stacks signer config --- stacks-signer/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index e21e70718a..03ae047849 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -640,7 +640,7 @@ mod tests { format!( r#" Stacks node host: 127.0.0.1:20443 -Signer endpoint: [::1]:30000 +Signer endpoint: 127.0.0.1:30000 Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet From 18fd4a5f0d33fea62f4e384ae49d6ab92f3898c7 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 5 Jul 2024 16:19:51 +0300 Subject: [PATCH 0477/1400] add sqlitre version tests their role is to be aware of the current underline sqlite version in order to know the sqlite features available --- stacks-signer/src/signerdb.rs | 9 ++++----- stackslib/src/chainstate/stacks/db/mod.rs | 9 +++++++++ 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 0725314e2a..5c6b1a8870 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -230,10 +230,6 @@ impl SignerDb { Ok(()) } - - fn get_sqlite_version(&self) -> Result, DBError> { - query_row(&self.db, "SELECT sqlite_version()", NO_PARAMS) - } } fn try_deserialize(s: Option) -> Result, DBError> @@ -428,6 +424,9 @@ mod tests { fn test_sqlite_version() { let db_path = tmp_db_path(); let db = SignerDb::new(db_path).expect("Failed to create signer db"); - assert_eq!(db.get_sqlite_version().unwrap(), Some("3.45.0".to_string())); + assert_eq!( + query_row(&db.db, "SELECT sqlite_version()", NO_PARAMS).unwrap(), + Some("3.45.0".to_string()) + ); } } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index cd76b6ee9b..9d5979466f 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2969,4 +2969,13 @@ pub mod test { }; assert!(db.supports_epoch(StacksEpochId::latest())); } + + #[test] + fn test_sqlite_version() { + let chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + assert_eq!( + query_row(chainstate.db(), "SELECT sqlite_version()", NO_PARAMS).unwrap(), + Some("3.45.0".to_string()) + ); + } } From 1456bae6b9fe030e521b1dba367f545875572c80 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 5 Jul 2024 09:21:03 -0400 Subject: [PATCH 0478/1400] CRC: test both endpoint validation and sortition check validation Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 17 ++++++++-- stacks-signer/src/v0/signer.rs | 4 +-- testnet/stacks-node/src/tests/signer/v0.rs | 39 +++++++++++++++------- 3 files changed, 44 insertions(+), 16 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index f2a7623622..d15f566e16 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -224,7 +224,9 @@ RejectCodeTypePrefix { /// The block was rejected in a prior round RejectedInPriorRound = 2, /// The block was rejected due to no sortition view - NoSortitionView = 3 + NoSortitionView = 3, + /// The block was rejected due to a mismatch with expected sortition view + SortitionViewMismatch = 4 }); impl TryFrom for RejectCodeTypePrefix { @@ -243,6 +245,7 @@ impl From<&RejectCode> for RejectCodeTypePrefix { RejectCode::ConnectivityIssues => RejectCodeTypePrefix::ConnectivityIssues, RejectCode::RejectedInPriorRound => RejectCodeTypePrefix::RejectedInPriorRound, RejectCode::NoSortitionView => RejectCodeTypePrefix::NoSortitionView, + RejectCode::SortitionViewMismatch => RejectCodeTypePrefix::SortitionViewMismatch, } } } @@ -258,6 +261,8 @@ pub enum RejectCode { ConnectivityIssues, /// The block was rejected in a prior round RejectedInPriorRound, + /// The block was rejected due to a mismatch with expected sortition view + SortitionViewMismatch, } define_u8_enum!( @@ -427,7 +432,8 @@ impl StacksMessageCodec for RejectCode { RejectCode::ValidationFailed(code) => write_next(fd, &(*code as u8))?, RejectCode::ConnectivityIssues | RejectCode::RejectedInPriorRound - | RejectCode::NoSortitionView => { + | RejectCode::NoSortitionView + | RejectCode::SortitionViewMismatch => { // No additional data to serialize / deserialize } }; @@ -449,6 +455,7 @@ impl StacksMessageCodec for RejectCode { RejectCodeTypePrefix::ConnectivityIssues => RejectCode::ConnectivityIssues, RejectCodeTypePrefix::RejectedInPriorRound => RejectCode::RejectedInPriorRound, RejectCodeTypePrefix::NoSortitionView => RejectCode::NoSortitionView, + RejectCodeTypePrefix::SortitionViewMismatch => RejectCode::SortitionViewMismatch, }; Ok(code) } @@ -470,6 +477,12 @@ impl std::fmt::Display for RejectCode { RejectCode::NoSortitionView => { write!(f, "The block was rejected due to no sortition view.") } + RejectCode::SortitionViewMismatch => { + write!( + f, + "The block was rejected due to a mismatch with expected sortition view." + ) + } } } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fdaf7b59af..54af51b444 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,7 +15,7 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; -use blockstack_lib::net::api::postblock_proposal::{BlockValidateResponse, ValidateRejectCode}; +use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::PrivateKey; use clarity::util::hash::MerkleHashFunc; @@ -309,7 +309,7 @@ impl Signer { ); Some(BlockResponse::rejected( block_proposal.block.header.signer_signature_hash(), - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + RejectCode::SortitionViewMismatch, )) } // Block proposal passed check, still don't know if valid diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5266a60ae5..ba753a7f0a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -265,17 +265,23 @@ fn block_proposal_rejection() { header: NakamotoBlockHeader::empty(), txs: vec![], }; - // We want to force the signer to submit the block to the node for validation - // Must set the pox treatment validly and consensus hash validlty to prevent early termination + + // First propose a block to the signers that does not have the correct consensus hash or BitVec. This should be rejected BEFORE + // the block is submitted to the node for validation. + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(0, 1, block.clone()); + + // Propose a block to the signers that passes initial checks but will be rejected by the stacks node block.header.pox_treatment = BitVec::ones(1).unwrap(); block.header.consensus_hash = view.cur_sortition.consensus_hash; - let block_signer_signature_hash = block.header.signer_signature_hash(); - signer_test.propose_block(0, 1, block); + let block_signer_signature_hash_2 = block.header.signer_signature_hash(); + signer_test.propose_block(0, 2, block); info!("------------------------- Test Block Proposal Rejected -------------------------"); + // Verify the signers rejected the second block via the endpoint let rejected_block_hash = signer_test.wait_for_validate_reject_response(short_timeout); - assert_eq!(rejected_block_hash, block_signer_signature_hash); + assert_eq!(rejected_block_hash, block_signer_signature_hash_2); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, @@ -293,7 +299,9 @@ fn block_proposal_rejection() { assert_eq!(signer_slot_ids.len(), num_signers); let start_polling = Instant::now(); - 'poll: loop { + let mut found_signer_signature_hash_1 = false; + let mut found_signer_signature_hash_2 = false; + while !found_signer_signature_hash_1 && !found_signer_signature_hash_2 { std::thread::sleep(Duration::from_secs(1)); let messages: Vec = StackerDB::get_messages( stackerdb @@ -309,16 +317,23 @@ fn block_proposal_rejection() { signer_signature_hash, })) = message { - assert_eq!(signer_signature_hash, block_signer_signature_hash); - assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); - break 'poll; + if signer_signature_hash == block_signer_signature_hash_1 { + found_signer_signature_hash_1 = true; + assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + } else if signer_signature_hash == block_signer_signature_hash_2 { + found_signer_signature_hash_2 = true; + assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); + } else { + panic!("Unexpected signer signature hash"); + } } else { panic!("Unexpected message type"); } } - if start_polling.elapsed() > short_timeout { - panic!("Timed out after waiting for response from signer"); - } + assert!( + start_polling.elapsed() <= short_timeout, + "Timed out after waiting for response from signer" + ); } signer_test.shutdown(); } From f3688bc36ba2a82339117cbcc8cbc41344a22161 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Sat, 6 Jul 2024 15:13:22 +0300 Subject: [PATCH 0479/1400] stacks-signer test add support for both ipv4 and ipv6 --- stacks-signer/src/config.rs | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 03ae047849..a7a92a5ad5 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -635,10 +635,8 @@ mod tests { fn test_config_to_string() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); let config_str = config.config_to_log_string(); - assert_eq!( - config_str, - format!( - r#" + + let expected_str_v4 = r#" Stacks node host: 127.0.0.1:20443 Signer endpoint: 127.0.0.1:30000 Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ @@ -647,8 +645,23 @@ Network: testnet Database path: :memory: DKG transaction fee: 0.01 uSTX Metrics endpoint: 0.0.0.0:9090 -"# - ) +"#; + + let expected_str_v6 = r#" +Stacks node host: 127.0.0.1:20443 +Signer endpoint: [::1]:30000 +Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ +Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 +Network: testnet +Database path: :memory: +DKG transaction fee: 0.01 uSTX +Metrics endpoint: 0.0.0.0:9090 +"#; + + assert!( + config_str == expected_str_v4 || config_str == expected_str_v6, + "Config string does not match expected output. Actual:\n{}", + config_str ); } From 0e3a275c553c0a539e25e63437ba13e60aaf8aa7 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 8 Jul 2024 17:53:50 +0300 Subject: [PATCH 0480/1400] move rusqlite dependency to the workspace --- Cargo.toml | 1 + clarity/Cargo.toml | 6 +----- stacks-common/Cargo.toml | 6 +----- stacks-signer/Cargo.toml | 5 +---- stackslib/Cargo.toml | 5 +---- testnet/stacks-node/Cargo.toml | 5 +---- 6 files changed, 6 insertions(+), 22 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index feab983833..8ac168f1f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" wsts = { version = "9.0.0", default-features = false } +rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 4e563ba99b..2e35c06473 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -31,16 +31,12 @@ stacks_common = { package = "stacks-common", path = "../stacks-common", optional rstest = "0.17.0" rstest_reuse = "0.5.0" hashbrown = { workspace = true } +rusqlite = { workspace = true, optional = true} [dependencies.serde_json] version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] -[dependencies.rusqlite] -version = "0.31.0" -optional = true -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [dependencies.time] version = "0.2.23" features = ["std"] diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 4b9c290a1d..57c1407fa8 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -33,6 +33,7 @@ chrono = "0.4.19" libc = "0.2.82" wsts = { workspace = true } hashbrown = { workspace = true } +rusqlite = { workspace = true, optional = true } [target.'cfg(unix)'.dependencies] nix = "0.23" @@ -51,11 +52,6 @@ features = ["arbitrary_precision", "unbounded_depth"] version = "0.24.3" features = ["serde", "recovery"] -[dependencies.rusqlite] -version = "0.31.0" -optional = true -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [dependencies.ed25519-dalek] workspace = true diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 5d51c2b101..1d1af6da78 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -46,16 +46,13 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = { workspace = true } rand = { workspace = true } url = "2.1.0" +rusqlite = { workspace = true } [dev-dependencies] clarity = { path = "../clarity", features = ["testing"] } polynomial = "0.2.6" num-traits = "0.2.18" -[dependencies.rusqlite] -version = "0.31.0" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [dependencies.serde_json] version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index f91af55530..b7967fe249 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -58,6 +58,7 @@ libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" wsts = { workspace = true } hashbrown = { workspace = true } +rusqlite = { workspace = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} @@ -79,10 +80,6 @@ features = ["arbitrary_precision", "unbounded_depth"] version = "0.24.3" features = ["serde", "recovery"] -[dependencies.rusqlite] -version = "0.31.0" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [dependencies.ed25519-dalek] workspace = true diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index a9e5db6c7f..ba674dbaac 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -31,6 +31,7 @@ wsts = { workspace = true } rand = { workspace = true } rand_core = { workspace = true } hashbrown = { workspace = true } +rusqlite = { workspace = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} @@ -49,10 +50,6 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" -[dependencies.rusqlite] -version = "0.31.0" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] - [[bin]] name = "stacks-node" path = "src/main.rs" From 97bace46a5f20c46fce42f139fe4f79cb973b32c Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 8 Jul 2024 18:01:21 +0300 Subject: [PATCH 0481/1400] remove explicit type for variable after call of params! macro - it is known the type is always `&[&dyn ToSql]` --- clarity/src/vm/database/sqlite.rs | 10 +- stackslib/src/burnchains/bitcoin/spv.rs | 6 +- stackslib/src/burnchains/db.rs | 46 +++++----- stackslib/src/burnchains/tests/db.rs | 2 +- stackslib/src/chainstate/burn/db/sortdb.rs | 92 +++++++++---------- stackslib/src/chainstate/nakamoto/mod.rs | 10 +- .../src/chainstate/nakamoto/staging_blocks.rs | 12 +-- stackslib/src/chainstate/nakamoto/tenure.rs | 12 +-- .../src/chainstate/nakamoto/tests/mod.rs | 2 +- .../src/chainstate/stacks/db/accounts.rs | 12 +-- stackslib/src/chainstate/stacks/db/blocks.rs | 90 +++++++++--------- stackslib/src/chainstate/stacks/db/headers.rs | 14 +-- stackslib/src/chainstate/stacks/db/mod.rs | 8 +- .../src/chainstate/stacks/index/trie_sql.rs | 18 ++-- stackslib/src/clarity_vm/database/mod.rs | 6 +- stackslib/src/core/mempool.rs | 44 ++++----- stackslib/src/main.rs | 2 +- stackslib/src/net/atlas/db.rs | 10 +- stackslib/src/net/db.rs | 48 +++++----- stackslib/src/net/stackerdb/db.rs | 34 +++---- stackslib/src/util_lib/bloom.rs | 2 +- 21 files changed, 240 insertions(+), 240 deletions(-) diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 361bf337a2..dc3ad4f5bd 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -44,7 +44,7 @@ pub struct SqliteConnection { } fn sqlite_put(conn: &Connection, key: &str, value: &str) -> Result<()> { - let params: &[&dyn ToSql] = params![key, value]; + let params = params![key, value]; match conn.execute("REPLACE INTO data_table (key, value) VALUES (?, ?)", params) { Ok(_) => Ok(()), Err(e) => { @@ -56,7 +56,7 @@ fn sqlite_put(conn: &Connection, key: &str, value: &str) -> Result<()> { fn sqlite_get(conn: &Connection, key: &str) -> Result> { trace!("sqlite_get {}", key); - let params: &[&dyn ToSql] = params![key]; + let params = params![key]; let res = match conn .query_row( "SELECT value FROM data_table WHERE key = ?", @@ -153,7 +153,7 @@ impl SqliteConnection { value: &str, ) -> Result<()> { let key = format!("clr-meta::{}::{}", contract_hash, key); - let params: &[&dyn ToSql] = params![bhh, key, value]; + let params = params![bhh, key, value]; if let Err(e) = conn.execute( "INSERT INTO metadata_table (blockhash, key, value) VALUES (?, ?, ?)", @@ -176,7 +176,7 @@ impl SqliteConnection { from: &StacksBlockId, to: &StacksBlockId, ) -> Result<()> { - let params: &[&dyn ToSql] = params![to, from]; + let params = params![to, from]; if let Err(e) = conn.execute( "UPDATE metadata_table SET blockhash = ? WHERE blockhash = ?", params, @@ -205,7 +205,7 @@ impl SqliteConnection { key: &str, ) -> Result> { let key = format!("clr-meta::{}::{}", contract_hash, key); - let params: &[&dyn ToSql] = params![bhh, key]; + let params = params![bhh, key]; match conn .query_row( diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index 71c167332f..82cbb7b7f6 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -424,7 +424,7 @@ impl SpvClient { } let tx = self.tx_begin()?; - let args: &[&dyn ToSql] = params![u64_to_sql(interval)?, work.to_hex_be()]; + let args = params![u64_to_sql(interval)?, work.to_hex_be()]; tx.execute( "INSERT OR REPLACE INTO chain_work (interval,work) VALUES (?1,?2)", args, @@ -707,7 +707,7 @@ impl SpvClient { let mut headers = vec![]; let sql_query = "SELECT * FROM headers WHERE height >= ?1 AND height < ?2 ORDER BY height"; - let sql_args: &[&dyn ToSql] = params![u64_to_sql(start_block)?, u64_to_sql(end_block)?]; + let sql_args = params![u64_to_sql(start_block)?, u64_to_sql(end_block)?]; let mut stmt = self .headers_db @@ -749,7 +749,7 @@ impl SpvClient { let sql = "INSERT OR REPLACE INTO headers (version, prev_blockhash, merkle_root, time, bits, nonce, height, hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"; - let args: &[&dyn ToSql] = params![ + let args = params![ header.version, header.prev_blockhash, header.merkle_root, diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 10d928fcf8..aa3725677d 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -322,7 +322,7 @@ impl<'a> BurnchainDBTransaction<'a> { let sql = "INSERT OR IGNORE INTO burnchain_db_block_headers (block_height, block_hash, parent_block_hash, num_txs, timestamp) VALUES (?, ?, ?, ?, ?)"; - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(header.block_height)?, header.block_hash, header.parent_block_hash, @@ -347,7 +347,7 @@ impl<'a> BurnchainDBTransaction<'a> { ) -> Result { let weight = affirmation_map.weight(); let sql = "INSERT INTO affirmation_maps (affirmation_map,weight) VALUES (?1,?2)"; - let args: &[&dyn ToSql] = params![affirmation_map.encode(), u64_to_sql(weight)?]; + let args = params![affirmation_map.encode(), u64_to_sql(weight)?]; match self.sql_tx.execute(sql, args) { Ok(_) => { let am_id = BurnchainDB::get_affirmation_map_id(&self.sql_tx, &affirmation_map)? @@ -368,7 +368,7 @@ impl<'a> BurnchainDBTransaction<'a> { affirmation_id: u64, ) -> Result<(), DBError> { let sql = "UPDATE block_commit_metadata SET affirmation_id = ?1, anchor_block_descendant = ?2 WHERE burn_block_hash = ?3 AND txid = ?4"; - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(affirmation_id)?, opt_u64_to_sql(anchor_block_descendant)?, block_commit.burn_header_hash, @@ -391,13 +391,13 @@ impl<'a> BurnchainDBTransaction<'a> { target_reward_cycle: u64, ) -> Result<(), DBError> { let sql = "INSERT OR REPLACE INTO anchor_blocks (reward_cycle) VALUES (?1)"; - let args: &[&dyn ToSql] = params![u64_to_sql(target_reward_cycle)?]; + let args = params![u64_to_sql(target_reward_cycle)?]; self.sql_tx .execute(sql, args) .map_err(|e| DBError::SqliteError(e))?; let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE burn_block_hash = ?2 AND txid = ?3"; - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(target_reward_cycle)?, block_commit.burn_header_hash, block_commit.txid, @@ -421,7 +421,7 @@ impl<'a> BurnchainDBTransaction<'a> { /// Unmark all block-commit(s) that were anchor block(s) for this reward cycle. pub fn clear_anchor_block(&self, reward_cycle: u64) -> Result<(), DBError> { let sql = "UPDATE block_commit_metadata SET anchor_block = NULL WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; self.sql_tx .execute(sql, args) .map(|_| ()) @@ -878,7 +878,7 @@ impl<'a> BurnchainDBTransaction<'a> { (burn_block_hash, txid, block_height, vtxindex, anchor_block, anchor_block_descendant, affirmation_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"; let mut stmt = self.sql_tx.prepare(commit_metadata_sql)?; - let args: &[&dyn ToSql] = params![ + let args = params![ bcm.burn_block_hash, bcm.txid, u64_to_sql(bcm.block_height)?, @@ -904,7 +904,7 @@ impl<'a> BurnchainDBTransaction<'a> { for op in block_ops.iter() { let serialized_op = serde_json::to_string(op).expect("Failed to serialize parsed BlockstackOp"); - let args: &[&dyn ToSql] = + let args = params![block_header.block_hash, op.txid_ref(), serialized_op]; stmt.execute(args)?; } @@ -960,7 +960,7 @@ impl<'a> BurnchainDBTransaction<'a> { assert_eq!((affirmation_map.len() as u64) + 1, reward_cycle); let qry = "INSERT OR REPLACE INTO overrides (reward_cycle, affirmation_map) VALUES (?1, ?2)"; - let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?, affirmation_map.encode()]; + let args = params![u64_to_sql(reward_cycle)?, affirmation_map.encode()]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; @@ -969,7 +969,7 @@ impl<'a> BurnchainDBTransaction<'a> { pub fn clear_override_affirmation_map(&self, reward_cycle: u64) -> Result<(), DBError> { let qry = "DELETE FROM overrides WHERE reward_cycle = ?1"; - let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; @@ -1166,7 +1166,7 @@ impl BurnchainDB { ) -> Option { let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = params![txid, burn_header_hash]; + let args = params![txid, burn_header_hash]; match query_row(conn, qry, args) { Ok(res) => res, @@ -1185,7 +1185,7 @@ impl BurnchainDB { txid: &Txid, ) -> Option { let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1"; - let args: &[&dyn ToSql] = params![txid]; + let args = params![txid]; let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); @@ -1256,7 +1256,7 @@ impl BurnchainDB { affirmation_id: u64, ) -> Result, DBError> { let sql = "SELECT affirmation_map FROM affirmation_maps WHERE affirmation_id = ?1"; - let args: &[&dyn ToSql] = params![&u64_to_sql(affirmation_id)?]; + let args = params![&u64_to_sql(affirmation_id)?]; query_row(conn, sql, args) } @@ -1265,7 +1265,7 @@ impl BurnchainDB { affirmation_id: u64, ) -> Result, DBError> { let sql = "SELECT weight FROM affirmation_maps WHERE affirmation_id = ?1"; - let args: &[&dyn ToSql] = params![&u64_to_sql(affirmation_id)?]; + let args = params![&u64_to_sql(affirmation_id)?]; query_row(conn, sql, args) } @@ -1274,7 +1274,7 @@ impl BurnchainDB { affirmation_map: &AffirmationMap, ) -> Result, DBError> { let sql = "SELECT affirmation_id FROM affirmation_maps WHERE affirmation_map = ?1"; - let args: &[&dyn ToSql] = params![&affirmation_map.encode()]; + let args = params![&affirmation_map.encode()]; query_row(conn, sql, args) } @@ -1284,7 +1284,7 @@ impl BurnchainDB { txid: &Txid, ) -> Result, DBError> { let sql = "SELECT affirmation_id FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2"; - let args: &[&dyn ToSql] = params![burn_header_hash, txid]; + let args = params![burn_header_hash, txid]; query_row(conn, sql, args) } @@ -1305,13 +1305,13 @@ impl BurnchainDB { txid: &Txid, ) -> Result { let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block IS NOT NULL AND burn_block_hash = ?1 AND txid = ?2"; - let args: &[&dyn ToSql] = params![burn_header_hash, txid]; + let args = params![burn_header_hash, txid]; query_row(conn, sql, args)?.ok_or(DBError::NotFoundError) } pub fn has_anchor_block(conn: &DBConn, reward_cycle: u64) -> Result { let sql = "SELECT 1 FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; Ok(query_row::(conn, sql, args)?.is_some()) } @@ -1320,7 +1320,7 @@ impl BurnchainDB { reward_cycle: u64, ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; let metadatas: Vec = query_rows(conn, sql, args)?; Ok(metadatas) @@ -1332,7 +1332,7 @@ impl BurnchainDB { reward_cycle: u64, ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1"; - let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?]; + let args = params![u64_to_sql(reward_cycle)?]; let metadatas: Vec = query_rows(conn, sql, args)?; for metadata in metadatas { @@ -1373,7 +1373,7 @@ impl BurnchainDB { ) -> Result, DBError> { let sql = "SELECT * FROM block_commit_metadata WHERE anchor_block = ?1 AND burn_block_hash = ?2"; - let args: &[&dyn ToSql] = params![u64_to_sql(reward_cycle)?, anchor_block_burn_header_hash]; + let args = params![u64_to_sql(reward_cycle)?, anchor_block_burn_header_hash]; if let Some(commit_metadata) = query_row::(conn, sql, args)? { let commit = BurnchainDB::get_block_commit( conn, @@ -1451,7 +1451,7 @@ impl BurnchainDB { vtxindex: u16, ) -> Result, DBError> { let qry = "SELECT txid FROM block_commit_metadata WHERE block_height = ?1 AND vtxindex = ?2 AND burn_block_hash = ?3"; - let args: &[&dyn ToSql] = params![block_ptr, vtxindex, header_hash]; + let args = params![block_ptr, vtxindex, header_hash]; let txid = match query_row(&conn, qry, args) { Ok(Some(txid)) => txid, Ok(None) => { @@ -1497,7 +1497,7 @@ impl BurnchainDB { burn_block_hash: &BurnchainHeaderHash, txid: &Txid, ) -> Result, DBError> { - let args: &[&dyn ToSql] = params![burn_block_hash, txid]; + let args = params![burn_block_hash, txid]; query_row_panic( conn, "SELECT * FROM block_commit_metadata WHERE burn_block_hash = ?1 AND txid = ?2", diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 1ab35bb2c6..f14243d049 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -57,7 +57,7 @@ impl BurnchainDB { use rusqlite::params; let sql = "SELECT op FROM burnchain_db_block_ops WHERE block_hash = ?1"; - let args: &[&dyn ToSql] = params![block_hash]; + let args = params![block_hash]; let mut ops: Vec = query_rows(&self.conn, sql, args)?; ops.sort_by(|a, b| a.vtxindex().cmp(&b.vtxindex())); Ok(ops) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 0652af304c..c811c210ee 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -861,7 +861,7 @@ pub fn get_block_commit_by_txid( txid: &Txid, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND txid = ?2 LIMIT 1"; - let args: &[&dyn ToSql] = params![sort_id, txid]; + let args = params![sort_id, txid]; query_row(conn, qry, args) } @@ -1206,7 +1206,7 @@ impl<'a> SortitionHandleTx<'a> { }; let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = params![ + let args = params![ ancestor_snapshot.sortition_id, u64_to_sql(key_block_height)?, key_vtxindex, @@ -1675,7 +1675,7 @@ impl<'a> SortitionHandleTx<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = params![sortition_id]; + let args = params![sortition_id]; let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; let pox_addrs: (Vec, u128) = @@ -1770,7 +1770,7 @@ impl<'a> SortitionHandleTx<'a> { stacks_block_height: u64, ) -> Result<(), db_error> { let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; - let args: &[&dyn ToSql] = params![ + let args = params![ sort_id, consensus_hash, stacks_block_hash, @@ -1820,7 +1820,7 @@ impl<'a> SortitionHandleTx<'a> { // in epoch 2.x, where we track canonical stacks tip via the sortition DB let arrival_index = SortitionDB::get_max_arrival_index(self)?; - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(stacks_block_height)?, u64_to_sql(arrival_index + 1)?, consensus_hash, @@ -2540,7 +2540,7 @@ impl<'a> SortitionHandleConn<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = params![sortition_id]; + let args = params![sortition_id]; let pox_addrs_json: String = query_row(self, sql, args)?.ok_or(db_error::NotFoundError)?; let pox_addrs: (Vec, u128) = @@ -2850,7 +2850,7 @@ impl SortitionDB { ) -> Result<(), db_error> { let epochs = StacksEpoch::validate_epochs(epochs); for epoch in epochs.into_iter() { - let args: &[&dyn ToSql] = params![ + let args = params![ (epoch.epoch_id as u32), u64_to_sql(epoch.start_height)?, u64_to_sql(epoch.end_height)?, @@ -2924,7 +2924,7 @@ impl SortitionDB { info!("Replace existing epochs with new epochs"); db_tx.execute("DELETE FROM epochs;", NO_PARAMS)?; for epoch in epochs.into_iter() { - let args: &[&dyn ToSql] = params![ + let args = params![ (epoch.epoch_id as u32), u64_to_sql(epoch.start_height)?, u64_to_sql(epoch.end_height)?, @@ -2946,7 +2946,7 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE txid = ?1 AND sortition_id = ?2"; - let args: &[&dyn ToSql] = params![txid, sortition_id]; + let args = params![txid, sortition_id]; query_row(conn, qry, args) } @@ -2958,7 +2958,7 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let qry = "SELECT parent_sortition_id AS sortition_id FROM block_commit_parents WHERE block_commit_parents.block_commit_txid = ?1 AND block_commit_parents.block_commit_sortition_id = ?2"; - let args: &[&dyn ToSql] = params![txid, sortition_id]; + let args = params![txid, sortition_id]; query_row(conn, qry, args) } @@ -3109,9 +3109,9 @@ impl SortitionDB { tx.execute_batch(sql_exec)?; } - let typical_rules: &[&dyn ToSql] = params![(ASTRules::Typical as u8), 0i64]; + let typical_rules = params![(ASTRules::Typical as u8), 0i64]; - let precheck_size_rules: &[&dyn ToSql] = params![ + let precheck_size_rules = params![ (ASTRules::PrecheckSize as u8), u64_to_sql(AST_RULES_PRECHECK_SIZE)?, ]; @@ -3209,7 +3209,7 @@ impl SortitionDB { // skip if this step was done if table_exists(&tx, "stacks_chain_tips")? { let sql = "SELECT 1 FROM stacks_chain_tips WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = params![canonical_tip.sortition_id]; + let args = params![canonical_tip.sortition_id]; if let Ok(Some(_)) = query_row::(&tx, sql, args) { info!("`stacks_chain_tips` appears to have been populated already; skipping this step"); return Ok(()); @@ -3225,7 +3225,7 @@ impl SortitionDB { ); for snapshot in snapshots.into_iter() { let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; - let args: &[&dyn ToSql] = params![ + let args = params![ snapshot.sortition_id, snapshot.canonical_stacks_tip_consensus_hash, snapshot.canonical_stacks_tip_hash, @@ -3453,7 +3453,7 @@ impl SortitionDB { ast_rules: ASTRules, height: u64, ) -> Result<(), db_error> { - let rules: &[&dyn ToSql] = params![u64_to_sql(height)?, (ast_rules as u8)]; + let rules = params![u64_to_sql(height)?, (ast_rules as u8)]; tx.execute( "UPDATE ast_rule_heights SET block_height = ?1 WHERE ast_rule_id = ?2", @@ -3506,7 +3506,7 @@ impl SortitionDB { } let sql = "REPLACE INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; - let args: &[&dyn ToSql] = params![sortition_id, rc_json]; + let args = params![sortition_id, rc_json]; sort_tx.execute(sql, args)?; Ok(()) } @@ -3589,7 +3589,7 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result, db_error> { let sql = "SELECT reward_set FROM preprocessed_reward_sets WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = params![sortition_id]; + let args = params![sortition_id]; let reward_set_opt: Option = sortdb.query_row(sql, args, |row| row.get(0)).optional()?; @@ -3821,7 +3821,7 @@ impl<'a> SortitionDBConn<'a> { sortition_id: &SortitionId, ) -> Result<(Vec, u128), db_error> { let sql = "SELECT pox_payouts FROM snapshots WHERE sortition_id = ?1"; - let args: &[&dyn ToSql] = params![sortition_id]; + let args = params![sortition_id]; let pox_addrs_json: String = query_row(self.conn(), sql, args)?.ok_or(db_error::NotFoundError)?; @@ -4028,7 +4028,7 @@ impl SortitionDB { stacks_block_accepted: Option, ) -> Result<(), BurnchainError> { if let Some(stacks_block_accepted) = stacks_block_accepted { - let args: &[&dyn ToSql] = params![ + let args = params![ sortition_id, u64_to_sql(canonical_stacks_height)?, canonical_stacks_bhh, @@ -4040,7 +4040,7 @@ impl SortitionDB { args )?; } else { - let args: &[&dyn ToSql] = params![ + let args = params![ sortition_id, u64_to_sql(canonical_stacks_height)?, canonical_stacks_bhh, @@ -4607,7 +4607,7 @@ impl SortitionDB { burnchain_header_hash: &BurnchainHeaderHash, ) -> Result, db_error> { let sql = "SELECT parent_burn_header_hash AS burn_header_hash FROM snapshots WHERE burn_header_hash = ?1"; - let args: &[&dyn ToSql] = params![burnchain_header_hash]; + let args = params![burnchain_header_hash]; let mut rows = query_rows::(conn, sql, args)?; // there can be more than one if there was a PoX reorg. If so, make sure they're _all the @@ -4882,7 +4882,7 @@ impl SortitionDB { conn: &Connection, ) -> Result<(u64, BurnchainHeaderHash), db_error> { let sql = "SELECT block_height, burn_header_hash FROM snapshots WHERE consensus_hash = ?1"; - let args: &[&dyn ToSql] = params![ConsensusHash::empty()]; + let args = params![ConsensusHash::empty()]; let mut stmt = conn.prepare(sql)?; let mut rows = stmt.query(args)?; while let Some(row) = rows.next()? { @@ -4970,7 +4970,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 ORDER BY vtxindex ASC"; - let args: &[&dyn ToSql] = params![sortition]; + let args = params![sortition]; query_rows(conn, qry, args) } @@ -4982,7 +4982,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM missed_commits WHERE intended_sortition_id = ?1"; - let args: &[&dyn ToSql] = params![sortition]; + let args = params![sortition]; query_rows(conn, qry, args) } @@ -4994,7 +4994,7 @@ impl SortitionDB { sortition: &SortitionId, ) -> Result, db_error> { let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 ORDER BY vtxindex ASC"; - let args: &[&dyn ToSql] = params![sortition]; + let args = params![sortition]; query_rows(conn, qry, args) } @@ -5008,7 +5008,7 @@ impl SortitionDB { let qry = "SELECT vtxindex FROM block_commits WHERE sortition_id = ?1 AND txid = ( SELECT winning_block_txid FROM snapshots WHERE sortition_id = ?2 LIMIT 1) LIMIT 1"; - let args: &[&dyn ToSql] = params![sortition, sortition]; + let args = params![sortition, sortition]; conn.query_row(qry, args, |row| row.get(0)) .optional() .map_err(db_error::from) @@ -5090,7 +5090,7 @@ impl SortitionDB { assert!(block_height < BLOCK_HEIGHT_MAX); let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = params![sortition, u64_to_sql(block_height)?, vtxindex]; + let args = params![sortition, u64_to_sql(block_height)?, vtxindex]; query_row_panic(conn, qry, args, || { format!( "Multiple parent blocks at {},{} in {}", @@ -5119,7 +5119,7 @@ impl SortitionDB { }; let qry = "SELECT * FROM leader_keys WHERE sortition_id = ?1 AND block_height = ?2 AND vtxindex = ?3 LIMIT 2"; - let args: &[&dyn ToSql] = params![ + let args = params![ ancestor_snapshot.sortition_id, u64_to_sql(key_block_height)?, key_vtxindex, @@ -5157,7 +5157,7 @@ impl SortitionDB { }; let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_header_hash = ?2 AND txid = ?3"; - let args: &[&dyn ToSql] = params![sortition_id, block_hash, winning_txid]; + let args = params![sortition_id, block_hash, winning_txid]; query_row_panic(conn, qry, args, || { format!("FATAL: multiple block commits for {}", &block_hash) }) @@ -5213,7 +5213,7 @@ impl SortitionDB { ) -> Result, db_error> { let sql = "SELECT * FROM epochs WHERE start_block_height <= ?1 AND ?2 < end_block_height LIMIT 1"; - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(burn_block_height)?, u64_to_sql(burn_block_height)?, ]; @@ -5244,7 +5244,7 @@ impl SortitionDB { epoch_id: &StacksEpochId, ) -> Result, db_error> { let sql = "SELECT * FROM epochs WHERE epoch_id = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = params![*epoch_id as u32]; + let args = params![*epoch_id as u32]; query_row(conn, sql, args) } @@ -5481,7 +5481,7 @@ impl<'a> SortitionHandleTx<'a> { let create = "CREATE TABLE IF NOT EXISTS snapshot_burn_distributions (sortition_id TEXT PRIMARY KEY, data TEXT NOT NULL);"; self.execute(create, NO_PARAMS).unwrap(); let sql = "INSERT INTO snapshot_burn_distributions (sortition_id, data) VALUES (?, ?)"; - let args: &[&dyn ToSql] = params![ + let args = params![ new_sortition, serde_json::to_string(&transition.burn_dist).unwrap(), ]; @@ -5502,7 +5502,7 @@ impl<'a> SortitionHandleTx<'a> { transition: &BurnchainStateTransition, ) -> Result<(), db_error> { let sql = "INSERT INTO snapshot_transition_ops (sortition_id, accepted_ops, consumed_keys) VALUES (?, ?, ?)"; - let args: &[&dyn ToSql] = params![ + let args = params![ new_sortition, serde_json::to_string(&transition.accepted_ops).unwrap(), serde_json::to_string(&transition.consumed_leader_keys).unwrap(), @@ -5593,7 +5593,7 @@ impl<'a> SortitionHandleTx<'a> { ) -> Result<(), db_error> { assert!(leader_key.block_height < BLOCK_HEIGHT_MAX); - let args: &[&dyn ToSql] = params![ + let args = params![ leader_key.txid, leader_key.vtxindex, u64_to_sql(leader_key.block_height)?, @@ -5611,7 +5611,7 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a stack-stx op fn insert_stack_stx(&mut self, op: &StackStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![ + let args = params![ op.txid, op.vtxindex, u64_to_sql(op.block_height)?, @@ -5632,7 +5632,7 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a delegate-stx op fn insert_delegate_stx(&mut self, op: &DelegateStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![ + let args = params![ op.txid, op.vtxindex, u64_to_sql(op.block_height)?, @@ -5654,7 +5654,7 @@ impl<'a> SortitionHandleTx<'a> { &mut self, op: &VoteForAggregateKeyOp, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![ + let args = params![ op.txid, op.vtxindex, u64_to_sql(op.block_height)?, @@ -5674,7 +5674,7 @@ impl<'a> SortitionHandleTx<'a> { /// Insert a transfer-stx op fn insert_transfer_stx(&mut self, op: &TransferStxOp) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![ + let args = params![ op.txid, op.vtxindex, u64_to_sql(op.block_height)?, @@ -5723,7 +5723,7 @@ impl<'a> SortitionHandleTx<'a> { } } - let args: &[&dyn ToSql] = params![ + let args = params![ block_commit.txid, block_commit.vtxindex, u64_to_sql(block_commit.block_height)?, @@ -5748,7 +5748,7 @@ impl<'a> SortitionHandleTx<'a> { self.execute("INSERT INTO block_commits (txid, vtxindex, block_height, burn_header_hash, block_header_hash, new_seed, parent_block_ptr, parent_vtxindex, key_block_ptr, key_vtxindex, memo, burn_fee, input, sortition_id, commit_outs, sunset_burn, apparent_sender, burn_parent_modulus, punished) \ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19)", args)?; - let parent_args: &[&dyn ToSql] = params![sort_id, block_commit.txid, parent_sortition_id]; + let parent_args = params![sort_id, block_commit.txid, parent_sortition_id]; debug!( "Parent sortition of {},{},{} is {} (parent at {},{})", @@ -5776,7 +5776,7 @@ impl<'a> SortitionHandleTx<'a> { let tx_input_str = serde_json::to_string(&op.input).map_err(|e| db_error::SerializationError(e))?; - let args: &[&dyn ToSql] = params![op.txid, op.intended_sortition, tx_input_str]; + let args = params![op.txid, op.intended_sortition, tx_input_str]; self.execute( "INSERT OR REPLACE INTO missed_commits (txid, intended_sortition_id, input) \ @@ -5828,7 +5828,7 @@ impl<'a> SortitionHandleTx<'a> { } } - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(snapshot.block_height)?, snapshot.burn_header_hash, u64_to_sql(snapshot.burn_header_timestamp)?, @@ -6458,7 +6458,7 @@ impl<'a> SortitionHandleTx<'a> { best_bhh: BlockHeaderHash, best_height: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![ + let args = params![ best_chh, best_bhh, u64_to_sql(best_height)?, @@ -6730,7 +6730,7 @@ pub mod tests { let pox_payouts_json = serde_json::to_string(&pox_payout) .expect("FATAL: could not encode `total_pox_payouts` as JSON"); - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(first_snapshot.block_height)?, first_snapshot.burn_header_hash, u64_to_sql(first_snapshot.burn_header_timestamp)?, @@ -6788,7 +6788,7 @@ pub mod tests { height: u64, ) -> Result<(), db_error> { let tip = SortitionDB::get_canonical_burn_chain_tip(conn)?; - let args: &[&dyn ToSql] = params![ch, bhh, u64_to_sql(height)?, tip.sortition_id]; + let args = params![ch, bhh, u64_to_sql(height)?, tip.sortition_id]; conn.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 WHERE sortition_id = ?4", args) .map_err(db_error::SqliteError)?; @@ -6865,7 +6865,7 @@ pub mod tests { let apparent_sender_str = serde_json::to_string(sender).map_err(|e| db_error::SerializationError(e))?; let sql = "SELECT * FROM block_commits WHERE apparent_sender = ?1 ORDER BY block_height DESC LIMIT 1"; - let args: &[&dyn ToSql] = params![apparent_sender_str]; + let args = params![apparent_sender_str]; query_row(conn, sql, args) } } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0252e2ec19..a97b47dc88 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2289,7 +2289,7 @@ impl NakamotoChainState { block_hash: &BlockHeaderHash, ) -> Result, ChainstateError> { let sql = "SELECT processed, orphaned FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = params![consensus_hash, block_hash]; + let args = params![consensus_hash, block_hash]; let Some((processed, orphaned)) = query_row_panic(&staging_blocks_conn, sql, args, || { "FATAL: multiple rows for the same consensus hash and block hash".to_string() }) @@ -2327,7 +2327,7 @@ impl NakamotoChainState { consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT vrf_proof FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND tenure_changed = 1"; - let args: &[&dyn ToSql] = params![consensus_hash]; + let args = params![consensus_hash]; let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; if let Some(bytes) = proof_bytes { let proof = VRFProof::from_hex(&bytes) @@ -2415,7 +2415,7 @@ impl NakamotoChainState { )) })?; - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(*stacks_block_height)?, index_root, consensus_hash, @@ -2622,7 +2622,7 @@ impl NakamotoChainState { if applied_epoch_transition { debug!("Block {} applied an epoch transition", &index_block_hash); let sql = "INSERT INTO epoch_transitions (block_id) VALUES (?)"; - let args: &[&dyn ToSql] = params![index_block_hash]; + let args = params![index_block_hash]; headers_tx.deref_mut().execute(sql, args)?; } @@ -2639,7 +2639,7 @@ impl NakamotoChainState { reward_set: &RewardSet, ) -> Result<(), ChainstateError> { let sql = "INSERT INTO nakamoto_reward_sets (index_block_hash, reward_set) VALUES (?, ?)"; - let args: &[&dyn ToSql] = params![block_id, reward_set.metadata_serialize(),]; + let args = params![block_id, reward_set.metadata_serialize(),]; tx.execute(sql, args)?; Ok(()) } diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 59a82f3726..a9f3ea9703 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -188,7 +188,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { // this block must be a processed Nakamoto block let ibh = StacksBlockId::new(&ch, &bhh); let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE processed = 1 AND index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = params![ibh]; + let args = params![ibh]; let res: Option = query_row(self, qry, args)?; Ok(res.is_some()) } @@ -202,7 +202,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result { let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = params![index_block_hash]; + let args = params![index_block_hash]; let res: Option = query_row(self, qry, args)?; Ok(res.is_some()) } @@ -213,7 +213,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE is_tenure_start = 1 AND consensus_hash = ?1"; - let args: &[&dyn ToSql] = params![consensus_hash]; + let args = params![consensus_hash]; let data: Option> = query_row(self, qry, args)?; let Some(block_bytes) = data else { return Ok(None); @@ -235,7 +235,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT rowid FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = params![index_block_hash]; + let args = params![index_block_hash]; let res: Option = query_row(self, sql, args)?; Ok(res) } @@ -250,7 +250,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = params![index_block_hash]; + let args = params![index_block_hash]; let res: Option> = query_row(self, qry, args)?; let Some(block_bytes) = res else { return Ok(None); @@ -279,7 +279,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT length(data) FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = params![index_block_hash]; + let args = params![index_block_hash]; let res = query_row(self, qry, args)? .map(|size: i64| u64::try_from(size).expect("FATAL: block size exceeds i64::MAX")); Ok(res) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 51b824077b..15698f160b 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -438,7 +438,7 @@ impl NakamotoChainState { ) -> Result { // a tenure will have been processed if any of its children have been processed let sql = "SELECT 1 FROM nakamoto_tenures WHERE prev_tenure_id_consensus_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = params![tenure_id_consensus_hash]; + let args = params![tenure_id_consensus_hash]; let found: Option = query_row(conn, sql, args)?; Ok(found.is_some()) } @@ -454,7 +454,7 @@ impl NakamotoChainState { ) -> Result<(), ChainstateError> { // NOTE: this is checked with check_nakamoto_tenure() assert_eq!(block_header.consensus_hash, tenure.tenure_consensus_hash); - let args: &[&dyn ToSql] = params![ + let args = params![ tenure.tenure_consensus_hash, tenure.prev_tenure_consensus_hash, tenure.burn_view_consensus_hash, @@ -511,7 +511,7 @@ impl NakamotoChainState { consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT prev_tenure_id_consensus_hash AS consensus_hash FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = params![consensus_hash]; + let args = params![consensus_hash]; query_row(chainstate_conn, sql, args).map_err(ChainstateError::DBError) } @@ -577,7 +577,7 @@ impl NakamotoChainState { tenure_consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = params![tenure_consensus_hash]; + let args = params![tenure_consensus_hash]; let tenure_opt: Option = query_row(headers_conn, sql, args)?; Ok(tenure_opt) } @@ -588,7 +588,7 @@ impl NakamotoChainState { burn_view: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_tenures WHERE burn_view_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = params![burn_view]; + let args = params![burn_view]; let tenure_opt: Option = query_row(headers_conn, sql, args)?; Ok(tenure_opt) } @@ -601,7 +601,7 @@ impl NakamotoChainState { tenure_id_consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 AND cause = ?2 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = params![ + let args = params![ tenure_id_consensus_hash, TenureChangeCause::BlockFound.as_u8(), ]; diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index b2a2127b40..f72d44c808 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -181,7 +181,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { tenure_id_consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 ORDER BY height ASC"; - let args: &[&dyn ToSql] = params![tenure_id_consensus_hash]; + let args = params![tenure_id_consensus_hash]; let block_data: Vec> = query_rows(self, qry, args)?; let mut blocks = Vec::with_capacity(block_data.len()); for data in block_data.into_iter() { diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 4d15c001a2..e81bcbf581 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -414,7 +414,7 @@ impl StacksChainState { } }; - let args: &[&dyn ToSql] = params![ + let args = params![ block_reward.address.to_string(), block_reward.recipient.to_string(), block_reward.block_hash, @@ -504,7 +504,7 @@ impl StacksChainState { child_index_block_hash ) VALUES (?1,?2,?3,?4,?5,?6,?7,?8,?9)"; - let args: &[&dyn ToSql] = params![ + let args = params![ reward.address.to_string(), reward.recipient.to_string(), reward.vtxindex, @@ -613,7 +613,7 @@ impl StacksChainState { child_block_id: &TenureBlockId, ) -> Result, Error> { let sql = "SELECT * FROM matured_rewards WHERE parent_index_block_hash = ?1 AND child_index_block_hash = ?2 AND vtxindex = 0"; - let args: &[&dyn ToSql] = params![parent_block_id.0, child_block_id.0]; + let args = params![parent_block_id.0, child_block_id.0]; let ret: Vec = query_rows(conn, sql, args).map_err(|e| Error::DBError(e))?; Ok(ret) } @@ -676,7 +676,7 @@ impl StacksChainState { ) -> Result, Error> { let qry = "SELECT * FROM payments WHERE index_block_hash = ?1 ORDER BY vtxindex ASC".to_string(); - let args: &[&dyn ToSql] = params![index_block_hash]; + let args = params![index_block_hash]; let rows = query_rows::(conn, &qry, args).map_err(Error::DBError)?; test_debug!("{} rewards in {}", rows.len(), index_block_hash); @@ -698,7 +698,7 @@ impl StacksChainState { }; let qry = "SELECT * FROM payments WHERE block_hash = ?1 AND consensus_hash = ?2 ORDER BY vtxindex ASC".to_string(); - let args: &[&dyn ToSql] = params![ + let args = params![ ancestor_info.anchored_header.block_hash(), ancestor_info.consensus_hash, ]; @@ -734,7 +734,7 @@ impl StacksChainState { let qry = "SELECT * FROM payments WHERE consensus_hash = ?1 AND block_hash = ?2 AND miner = 1" .to_string(); - let args: &[&dyn ToSql] = params![consensus_hash, stacks_block_hash,]; + let args = params![consensus_hash, stacks_block_hash,]; let mut rows = query_rows::(conn, &qry, args).map_err(Error::DBError)?; let len = rows.len(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index d03941464f..1710bfe360 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -770,7 +770,7 @@ impl StacksChainState { for (consensus_hash, block_hash) in blocks.drain(..) { let list_microblock_sql = "SELECT * FROM staging_microblocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 ORDER BY sequence".to_string(); - let list_microblock_args: &[&dyn ToSql] = params![block_hash, consensus_hash]; + let list_microblock_args = params![block_hash, consensus_hash]; let mut microblocks = query_rows::( blocks_conn, &list_microblock_sql, @@ -964,7 +964,7 @@ impl StacksChainState { minimum_block_height: i64, ) -> bool { let sql = "SELECT 1 FROM staging_blocks WHERE microblock_pubkey_hash = ?1 AND height >= ?2"; - let args: &[&dyn ToSql] = params![pubkey_hash, minimum_block_height]; + let args = params![pubkey_hash, minimum_block_height]; block_conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -980,7 +980,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND orphaned = 0 AND processed = 0".to_string(); - let args: &[&dyn ToSql] = params![block_hash, consensus_hash]; + let args = params![block_hash, consensus_hash]; let mut rows = query_rows::(block_conn, &sql, args).map_err(Error::DBError)?; let len = rows.len(); @@ -1009,7 +1009,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE index_block_hash = ?1 AND orphaned = 0"; - let args: &[&dyn ToSql] = params![index_block_hash]; + let args = params![index_block_hash]; query_row::(block_conn, sql, args).map_err(Error::DBError) } @@ -1059,7 +1059,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT microblock_pubkey_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0"; - let args: &[&dyn ToSql] = params![block_hash, consensus_hash]; + let args = params![block_hash, consensus_hash]; let rows = query_row_columns::(block_conn, sql, args, "microblock_pubkey_hash") .map_err(Error::DBError)?; match rows.len() { @@ -1114,7 +1114,7 @@ impl StacksChainState { microblock_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND orphaned = 0 LIMIT 1"; - let args: &[&dyn ToSql] = params![parent_index_block_hash, microblock_hash]; + let args = params![parent_index_block_hash, microblock_hash]; query_row::(blocks_conn, sql, args).map_err(Error::DBError) } @@ -1127,7 +1127,7 @@ impl StacksChainState { index_microblock_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_microblock_hash = ?1 AND orphaned = 0 LIMIT 1"; - let args: &[&dyn ToSql] = params![index_microblock_hash]; + let args = params![index_microblock_hash]; query_row::(blocks_conn, sql, args).map_err(Error::DBError) } @@ -1332,7 +1332,7 @@ impl StacksChainState { "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 AND sequence >= ?2 AND sequence < ?3 AND orphaned = 0 ORDER BY sequence ASC".to_string() }; - let args: &[&dyn ToSql] = params![parent_index_block_hash, start_seq, last_seq]; + let args = params![parent_index_block_hash, start_seq, last_seq]; let staging_microblocks = query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; @@ -1565,7 +1565,7 @@ impl StacksChainState { // if this block has an unprocessed staging parent, then it's not attachable until its parent is. let has_unprocessed_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0 LIMIT 1"; let has_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 LIMIT 1"; - let has_parent_args: &[&dyn ToSql] = + let has_parent_args = params![block.header.parent_block, parent_consensus_hash]; let has_unprocessed_parent_rows = query_row_columns::( &tx, @@ -1618,7 +1618,7 @@ impl StacksChainState { download_time) \ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17)"; - let args: &[&dyn ToSql] = params![ + let args = params![ block_hash, block.header.parent_block, consensus_hash, @@ -1691,7 +1691,7 @@ impl StacksChainState { // store microblock metadata let sql = "INSERT OR REPLACE INTO staging_microblocks (anchored_block_hash, consensus_hash, index_block_hash, microblock_hash, parent_hash, index_microblock_hash, sequence, processed, orphaned) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)"; - let args: &[&dyn ToSql] = params![ + let args = params![ parent_anchored_block_hash, parent_consensus_hash, index_block_hash, @@ -1710,7 +1710,7 @@ impl StacksChainState { let block_sql = "INSERT OR REPLACE INTO staging_microblocks_data \ (block_hash, block_data) VALUES (?1, ?2)"; - let block_args: &[&dyn ToSql] = params![microblock.block_hash(), microblock_bytes]; + let block_args = params![microblock.block_hash(), microblock_bytes]; tx.execute(&block_sql, block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -1855,7 +1855,7 @@ impl StacksChainState { }; let sql = "SELECT 1 FROM staging_microblocks WHERE index_block_hash = ?1 AND microblock_hash = ?2 AND processed = 1 AND orphaned = 0"; - let args: &[&dyn ToSql] = params![parent_index_block_hash, parent_microblock_hash]; + let args = params![parent_index_block_hash, parent_microblock_hash]; let res = self .db() .query_row(sql, args, |_r| Ok(())) @@ -2028,7 +2028,7 @@ impl StacksChainState { ); let sql = "SELECT COALESCE(MIN(block_height), 0), COALESCE(MAX(block_height), 0) FROM block_headers WHERE burn_header_height >= ?1 AND burn_header_height < ?2"; - let args: &[&dyn ToSql] = + let args = params![u64_to_sql(burn_height_start)?, u64_to_sql(burn_height_end)?,]; self.db() @@ -2077,7 +2077,7 @@ impl StacksChainState { FROM staging_blocks LEFT JOIN staging_microblocks \ ON staging_blocks.parent_microblock_hash = staging_microblocks.microblock_hash \ WHERE staging_blocks.height >= ?1 AND staging_blocks.height <= ?2"; - let args: &[&dyn ToSql] = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; + let args = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let mut stmt = self.db().prepare(sql)?; @@ -2154,7 +2154,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let qry = "SELECT consensus_hash FROM staging_blocks WHERE anchored_block_hash = ?1"; - let args: &[&dyn ToSql] = params![block_hash]; + let args = params![block_hash]; query_rows(conn, qry, args).map_err(|e| e.into()) } @@ -2300,16 +2300,16 @@ impl StacksChainState { ) -> Result<(), Error> { // This block is orphaned let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_block_args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; + let update_block_args = params![consensus_hash, anchored_block_hash]; // All descendants of this processed block are never attachable. // Indicate this by marking all children as orphaned (but not procesed), across all burnchain forks. let update_children_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 0, attachable = 0 WHERE parent_consensus_hash = ?1 AND parent_anchored_block_hash = ?2"; - let update_children_args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; + let update_children_args = params![consensus_hash, anchored_block_hash]; // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = + let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; let orphaned_microblock_hashes = query_row_columns::( tx, @@ -2320,7 +2320,7 @@ impl StacksChainState { // drop microblocks (this processes them) let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_microblock_children_args: &[&dyn ToSql] = + let update_microblock_children_args = params![consensus_hash, anchored_block_hash]; tx.execute(update_block_sql, update_block_args)?; @@ -2368,7 +2368,7 @@ impl StacksChainState { ); let sql = "DELETE FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1 AND processed = 1"; - let args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; + let args = params![consensus_hash, anchored_block_hash]; tx.execute(sql, args)?; @@ -2392,7 +2392,7 @@ impl StacksChainState { accept: bool, ) -> Result<(), Error> { let sql = "SELECT * FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 0".to_string(); - let args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; + let args = params![consensus_hash, anchored_block_hash]; let has_stored_block = StacksChainState::has_stored_block( tx, @@ -2406,7 +2406,7 @@ impl StacksChainState { 0 => { // not an error if this block was already orphaned let orphan_sql = "SELECT * FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND orphaned = 1".to_string(); - let orphan_args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; + let orphan_args = params![consensus_hash, anchored_block_hash]; let orphan_rows = query_rows::(tx, &orphan_sql, orphan_args) .map_err(Error::DBError)?; if orphan_rows.len() == 1 { @@ -2460,7 +2460,7 @@ impl StacksChainState { } let update_sql = "UPDATE staging_blocks SET processed = 1, processed_time = ?1 WHERE consensus_hash = ?2 AND anchored_block_hash = ?3".to_string(); - let update_args: &[&dyn ToSql] = params![ + let update_args = params![ u64_to_sql(get_epoch_time_secs())?, consensus_hash, anchored_block_hash, @@ -2528,11 +2528,11 @@ impl StacksChainState { &index_block_hash ); let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_block_args: &[&dyn ToSql] = params![consensus_hash, anchored_block_hash]; + let update_block_args = params![consensus_hash, anchored_block_hash]; // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = + let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; let orphaned_microblock_hashes = query_row_columns::( tx, @@ -2550,7 +2550,7 @@ impl StacksChainState { &index_block_hash ); let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_microblock_children_args: &[&dyn ToSql] = + let update_microblock_children_args = params![consensus_hash, anchored_block_hash]; tx.execute(&update_block_sql, update_block_args) @@ -2587,7 +2587,7 @@ impl StacksChainState { ) -> Result<(), Error> { // find offending sequence let seq_sql = "SELECT sequence FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND microblock_hash = ?3 AND processed = 0 AND orphaned = 0".to_string(); - let seq_args: &[&dyn ToSql] = + let seq_args = params![consensus_hash, anchored_block_hash, invalid_block_hash]; let seq = match query_int::<_>(tx, &seq_sql, seq_args) { Ok(seq) => seq, @@ -2609,7 +2609,7 @@ impl StacksChainState { // drop staging children at and beyond the invalid block let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE anchored_block_hash = ?1 AND sequence >= ?2".to_string(); - let update_microblock_children_args: &[&dyn ToSql] = params![anchored_block_hash, seq]; + let update_microblock_children_args = params![anchored_block_hash, seq]; tx.execute( &update_microblock_children_sql, @@ -2619,7 +2619,7 @@ impl StacksChainState { // find all orphaned microblocks hashes, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE anchored_block_hash = ?1 AND sequence >= ?2"; - let find_orphaned_microblocks_args: &[&dyn ToSql] = params![anchored_block_hash, seq]; + let find_orphaned_microblocks_args = params![anchored_block_hash, seq]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2674,7 +2674,7 @@ impl StacksChainState { test_debug!("Set {}-{} processed", &parent_index_hash, &mblock_hash); // confirm this microblock - let args: &[&dyn ToSql] = + let args = params![parent_consensus_hash, parent_block_hash, mblock_hash]; tx.execute(sql, args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2746,7 +2746,7 @@ impl StacksChainState { index_microblock_hash: &StacksBlockId, ) -> Result { let sql = "SELECT 1 FROM staging_microblocks WHERE index_microblock_hash = ?1 AND processed = 1 AND orphaned = 0"; - let args: &[&dyn ToSql] = params![index_microblock_hash]; + let args = params![index_microblock_hash]; let res = conn .query_row(&sql, args, |_r| Ok(())) .optional() @@ -2840,7 +2840,7 @@ impl StacksChainState { "SELECT {},{} FROM staging_blocks WHERE index_block_hash = ?1", consensus_hash_col, anchored_block_col ); - let args: &[&dyn ToSql] = params![index_block_hash]; + let args = params![index_block_hash]; blocks_db .query_row(&sql, args, |row| { @@ -2892,7 +2892,7 @@ impl StacksChainState { staging_microblocks JOIN staging_microblocks_data \ ON staging_microblocks.microblock_hash = staging_microblocks_data.block_hash \ WHERE staging_microblocks.index_block_hash = ?1 AND staging_microblocks.microblock_hash = ?2"; - let args: &[&dyn ToSql] = params![parent_index_block_hash, microblock_hash,]; + let args = params![parent_index_block_hash, microblock_hash,]; query_row(blocks_conn, sql, args).map_err(Error::DBError) } @@ -2905,7 +2905,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM staging_microblocks WHERE index_block_hash = ?1 ORDER BY sequence" .to_string(); - let args: &[&dyn ToSql] = params![parent_index_block_hash]; + let args = params![parent_index_block_hash]; let microblock_info = query_rows::(blocks_conn, &sql, args).map_err(Error::DBError)?; Ok(microblock_info) @@ -2947,7 +2947,7 @@ impl StacksChainState { ) -> Result { let sql = "SELECT 1 FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND height >= ?1 AND arrival_time >= ?2"; - let args: &[&dyn ToSql] = params![u64_to_sql(height)?, u64_to_sql(deadline)?]; + let args = params![u64_to_sql(height)?, u64_to_sql(deadline)?]; let res = conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -3177,7 +3177,7 @@ impl StacksChainState { parent_block_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT 1 FROM epoch_transitions WHERE block_id = ?1"; - let args: &[&dyn ToSql] = params![StacksBlockHeader::make_index_block_hash( + let args = params![StacksBlockHeader::make_index_block_hash( parent_consensus_hash, parent_block_hash, )]; @@ -3841,7 +3841,7 @@ impl StacksChainState { end_height: u64, ) -> Result, Error> { let sql = "SELECT processed_time - arrival_time FROM staging_blocks WHERE processed = 1 AND height >= ?1 AND height < ?2"; - let args: &[&dyn ToSql] = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; + let args = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let list = query_rows::(blocks_conn, &sql, args)?; Ok(list) } @@ -3854,7 +3854,7 @@ impl StacksChainState { end_height: u64, ) -> Result, Error> { let sql = "SELECT download_time FROM staging_blocks WHERE height >= ?1 AND height < ?2"; - let args: &[&dyn ToSql] = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; + let args = params![u64_to_sql(start_height)?, u64_to_sql(end_height)?]; let list = query_rows::(blocks_conn, &sql, args)?; Ok(list) } @@ -3936,7 +3936,7 @@ impl StacksChainState { // not the first-ever block. Does this connect to a previously-accepted // block in the headers database? let hdr_sql = "SELECT * FROM block_headers WHERE block_hash = ?1 AND consensus_hash = ?2".to_string(); - let hdr_args: &[&dyn ToSql] = params![ + let hdr_args = params![ candidate.parent_anchored_block_hash, candidate.parent_consensus_hash, ]; @@ -6537,7 +6537,7 @@ impl StacksChainState { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = params![consensus_hash, block_bhh]; + let args = params![consensus_hash, block_bhh]; query_row(&self.db(), sql, args).map_err(Error::DBError) } @@ -6546,7 +6546,7 @@ impl StacksChainState { let (consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = params![consensus_hash, block_bhh]; + let args = params![consensus_hash, block_bhh]; let Some(staging_block): Option = query_row(&self.db(), sql, args).map_err(Error::DBError)? else { @@ -6559,7 +6559,7 @@ impl StacksChainState { pub fn get_stacks_chain_tips_at_height(&self, height: u64) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND height = ?1"; - let args: &[&dyn ToSql] = params![u64_to_sql(height)?]; + let args = params![u64_to_sql(height)?]; query_rows(&self.db(), sql, args).map_err(Error::DBError) } @@ -6569,7 +6569,7 @@ impl StacksChainState { staging_block: &StagingBlock, ) -> Result, Error> { let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = params![ + let args = params![ staging_block.parent_consensus_hash, staging_block.parent_anchored_block_hash, ]; @@ -6583,7 +6583,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT height FROM staging_blocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = params![consensus_hash, block_hash]; + let args = params![consensus_hash, block_hash]; query_row(&self.db(), sql, args).map_err(Error::DBError) } diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index b42b9545d0..98f41bf9c7 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -139,7 +139,7 @@ impl StacksChainState { assert!(block_height < (i64::MAX as u64)); - let args: &[&dyn ToSql] = params![ + let args = params![ header.version, total_burn_str, total_work_str, @@ -209,7 +209,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT 1 FROM block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = params![consensus_hash, block_hash]; + let args = params![consensus_hash, block_hash]; match conn.query_row(sql, args, |_| Ok(true)) { Ok(_) => Ok(true), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(false), @@ -225,7 +225,7 @@ impl StacksChainState { block_hash: &BlockHeaderHash, ) -> Result, Error> { let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args: &[&dyn ToSql] = params![consensus_hash, block_hash]; + let args = params![consensus_hash, block_hash]; query_row_panic(conn, sql, args, || { "FATAL: multiple rows for the same block hash".to_string() }) @@ -319,7 +319,7 @@ impl StacksChainState { pub fn get_genesis_header_info(conn: &Connection) -> Result { // by construction, only one block can have height 0 in this DB let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1 AND block_height = 0"; - let args: &[&dyn ToSql] = params![FIRST_BURNCHAIN_CONSENSUS_HASH]; + let args = params![FIRST_BURNCHAIN_CONSENSUS_HASH]; let row_opt = query_row(conn, sql, args)?; Ok(row_opt.expect("BUG: no genesis header info")) } @@ -330,7 +330,7 @@ impl StacksChainState { block_id: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT parent_block_id FROM block_headers WHERE index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = params![block_id]; + let args = params![block_id]; let mut rows = query_row_columns::(conn, sql, args, "parent_block_id")?; Ok(rows.pop()) } @@ -338,7 +338,7 @@ impl StacksChainState { /// Is this block present and processed? pub fn has_stacks_block(conn: &Connection, block_id: &StacksBlockId) -> Result { let sql = "SELECT 1 FROM block_headers WHERE index_block_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = params![block_id]; + let args = params![block_id]; Ok(conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -383,7 +383,7 @@ impl StacksChainState { ) -> Result, Error> { let qry = "SELECT * FROM block_headers WHERE block_height = ?1 AND affirmation_weight = ?2 ORDER BY burn_header_height DESC"; - let args: &[&dyn ToSql] = params![u64_to_sql(height)?, u64_to_sql(affirmation_weight)?]; + let args = params![u64_to_sql(height)?, u64_to_sql(affirmation_weight)?]; query_rows(conn, qry, args).map_err(|e| e.into()) } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 9d5979466f..2fc111d702 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -669,7 +669,7 @@ impl<'a> ChainstateTx<'a> { let txid = tx_event.transaction.txid(); let tx_hex = tx_event.transaction.serialize_to_dbstring(); let result = tx_event.result.to_string(); - let params: &[&dyn ToSql] = params![txid, block_id, tx_hex, result]; + let params = params![txid, block_id, tx_hex, result]; if let Err(e) = self.tx.tx().execute(insert, params) { warn!("Failed to log TX: {}", e); } @@ -2472,7 +2472,7 @@ impl StacksChainState { index_block_hash: &StacksBlockId, ) -> Result, Error> { let sql = "SELECT txids FROM burnchain_txids WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; + let args = params![index_block_hash]; let txids = conn .query_row(sql, args, |r| { @@ -2552,7 +2552,7 @@ impl StacksChainState { let txids_json = serde_json::to_string(&txids).expect("FATAL: could not serialize Vec"); let sql = "INSERT INTO burnchain_txids (index_block_hash, txids) VALUES (?1, ?2)"; - let args: &[&dyn ToSql] = &[index_block_hash, &txids_json]; + let args = params![index_block_hash, &txids_json]; tx.execute(sql, args)?; Ok(()) } @@ -2682,7 +2682,7 @@ impl StacksChainState { if applied_epoch_transition { debug!("Block {} applied an epoch transition", &index_block_hash); let sql = "INSERT INTO epoch_transitions (block_id) VALUES (?)"; - let args: &[&dyn ToSql] = &[&index_block_hash]; + let args = params![&index_block_hash]; headers_tx.deref_mut().execute(sql, args)?; } diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index 75abe038fa..c9d3b40dce 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -239,7 +239,7 @@ pub fn write_trie_blob( block_hash: &T, data: &[u8], ) -> Result { - let args: &[&dyn ToSql] = params![block_hash, data, 0, 0, 0,]; + let args = params![block_hash, data, 0, 0, 0,]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, ?, ?)")?; let block_id = s @@ -266,7 +266,7 @@ fn inner_write_external_trie_blob( let block_id = if let Some(block_id) = block_id { // existing entry (i.e. a migration) let empty_blob: &[u8] = &[]; - let args: &[&dyn ToSql] = params![ + let args = params![ block_hash, empty_blob, 0, @@ -286,7 +286,7 @@ fn inner_write_external_trie_blob( } else { // new entry let empty_blob: &[u8] = &[]; - let args: &[&dyn ToSql] = params![ + let args = params![ block_hash, empty_blob, 0, @@ -342,13 +342,13 @@ pub fn write_trie_blob_to_mined( ) -> Result { if let Ok(block_id) = get_mined_block_identifier(conn, block_hash) { // already exists; update - let args: &[&dyn ToSql] = params![data, block_id]; + let args = params![data, block_id]; let mut s = conn.prepare("UPDATE mined_blocks SET data = ? WHERE block_id = ?")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); } else { // doesn't exist yet; insert - let args: &[&dyn ToSql] = params![block_hash, data]; + let args = params![block_hash, data]; let mut s = conn.prepare("INSERT INTO mined_blocks (block_hash, data) VALUES (?, ?)")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); @@ -375,13 +375,13 @@ pub fn write_trie_blob_to_unconfirmed( if let Ok(Some(block_id)) = get_unconfirmed_block_identifier(conn, block_hash) { // already exists; update - let args: &[&dyn ToSql] = params![data, block_id]; + let args = params![data, block_id]; let mut s = conn.prepare("UPDATE marf_data SET data = ? WHERE block_id = ?")?; s.execute(args) .expect("EXHAUSTION: MARF cannot track more than 2**31 - 1 blocks"); } else { // doesn't exist yet; insert - let args: &[&dyn ToSql] = params![block_hash, data, 1]; + let args = params![block_hash, data, 1]; let mut s = conn.prepare("INSERT INTO marf_data (block_hash, data, unconfirmed, external_offset, external_length) VALUES (?, ?, ?, 0, 0)")?; s.execute(args) @@ -515,7 +515,7 @@ pub fn get_external_trie_offset_length( block_id: u32, ) -> Result<(u64, u64), Error> { let qry = "SELECT external_offset, external_length FROM marf_data WHERE block_id = ?1"; - let args: &[&dyn ToSql] = params![block_id]; + let args = params![block_id]; let (offset, length) = query_row(conn, qry, args)?.ok_or(Error::NotFoundError)?; Ok((offset, length)) } @@ -526,7 +526,7 @@ pub fn get_external_trie_offset_length_by_bhh( bhh: &T, ) -> Result<(u64, u64), Error> { let qry = "SELECT external_offset, external_length FROM marf_data WHERE block_hash = ?1"; - let args: &[&dyn ToSql] = params![bhh]; + let args = params![bhh]; let (offset, length) = query_row(conn, qry, args)?.ok_or(Error::NotFoundError)?; Ok((offset, length)) } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index eb05d31193..281a0d784f 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -513,7 +513,7 @@ pub fn get_stacks_header_column_from_table( where F: Fn(&Row) -> R, { - let args: &[&dyn ToSql] = params![id_bhh]; + let args = params![id_bhh]; let table_name = if nakamoto { "nakamoto_block_headers" } else { @@ -590,7 +590,7 @@ fn get_first_block_in_tenure( }; let ch = consensus_hash .expect("Unexpected SQL failure querying block header table for 'consensus_hash'"); - let args: &[&dyn ToSql] = params![ch]; + let args = params![ch]; conn.query_row( " SELECT index_block_hash @@ -619,7 +619,7 @@ fn get_miner_column( where F: FnOnce(&Row) -> R, { - let args: &[&dyn ToSql] = params![id_bhh.0]; + let args = params![id_bhh.0]; conn.query_row( &format!( "SELECT {} FROM payments WHERE index_block_hash = ? AND miner = 1", diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index f024a95d3b..9cd8539a77 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -859,7 +859,7 @@ impl<'a> MemPoolTx<'a> { /// Used to clear out txids that are now outside the bloom counter's depth. fn prune_bloom_counter(&mut self, target_height: u64) -> Result<(), MemPoolRejection> { let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height = ?1"; - let args: &[&dyn ToSql] = params![u64_to_sql(target_height)?]; + let args = params![u64_to_sql(target_height)?]; let txids: Vec = query_rows(&self.tx, sql, args)?; let _num_txs = txids.len(); @@ -871,7 +871,7 @@ impl<'a> MemPoolTx<'a> { bloom_counter.remove_raw(dbtx, &txid.0)?; let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; - let args: &[&dyn ToSql] = params![txid]; + let args = params![txid]; dbtx.execute(sql, args).map_err(db_error::SqliteError)?; } // help the type inference out @@ -902,7 +902,7 @@ impl<'a> MemPoolTx<'a> { ) -> Result, MemPoolRejection> { // is this the first-ever txid at this height? let sql = "SELECT 1 FROM mempool WHERE height = ?1"; - let args: &[&dyn ToSql] = params![u64_to_sql(height)?]; + let args = params![u64_to_sql(height)?]; let present: Option = query_row(&self.tx, sql, args)?; if present.is_none() && height > (BLOOM_COUNTER_DEPTH as u64) { // this is the first-ever tx at this height. @@ -925,7 +925,7 @@ impl<'a> MemPoolTx<'a> { // remove lowest-fee tx (they're paying the least, so replication is // deprioritized) let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height > ?1 ORDER BY a.tx_fee ASC LIMIT 1"; - let args: &[&dyn ToSql] = params![u64_to_sql( + let args = params![u64_to_sql( height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), )?]; let evict_txid: Option = query_row(&dbtx, sql, args)?; @@ -933,7 +933,7 @@ impl<'a> MemPoolTx<'a> { bloom_counter.remove_raw(dbtx, &evict_txid.0)?; let sql = "INSERT OR REPLACE INTO removed_txids (txid) VALUES (?1)"; - let args: &[&dyn ToSql] = params![evict_txid]; + let args = params![evict_txid]; dbtx.execute(sql, args).map_err(db_error::SqliteError)?; Some(evict_txid) @@ -963,7 +963,7 @@ impl<'a> MemPoolTx<'a> { let hashed_txid = Txid(Sha512Trunc256Sum::from_data(&randomized_buff).0); let sql = "INSERT OR REPLACE INTO randomized_txids (txid,hashed_txid) VALUES (?1,?2)"; - let args: &[&dyn ToSql] = params![txid, hashed_txid]; + let args = params![txid, hashed_txid]; self.execute(sql, args).map_err(db_error::SqliteError)?; @@ -1504,7 +1504,7 @@ impl MemPoolDB { ) -> Result, db_error> { let sql = "SELECT DISTINCT origin_address FROM mempool WHERE height > ?1 AND height <= ?2 AND tx_fee >= ?3 ORDER BY tx_fee DESC LIMIT ?4 OFFSET ?5"; - let args: &[&dyn ToSql] = params![ + let args = params![ start_height, end_height, u64_to_sql(min_fees)?, @@ -1966,7 +1966,7 @@ impl MemPoolDB { block_header_hash: &BlockHeaderHash, ) -> Result { let sql = "SELECT * FROM mempool WHERE consensus_hash = ?1 AND block_header_hash = ?2"; - let args: &[&dyn ToSql] = params![consensus_hash, block_header_hash]; + let args = params![consensus_hash, block_header_hash]; let rows = query_rows::(conn, &sql, args)?; Ok(rows.len()) } @@ -1980,7 +1980,7 @@ impl MemPoolDB { timestamp: u64, ) -> Result, db_error> { let sql = "SELECT * FROM mempool WHERE accept_time = ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY origin_nonce ASC"; - let args: &[&dyn ToSql] = + let args = params![u64_to_sql(timestamp)?, consensus_hash, block_header_hash]; let rows = query_rows::(conn, &sql, args)?; Ok(rows) @@ -1989,7 +1989,7 @@ impl MemPoolDB { /// Given a chain tip, find the highest block-height from _before_ this tip pub fn get_previous_block_height(conn: &DBConn, height: u64) -> Result, db_error> { let sql = "SELECT height FROM mempool WHERE height < ?1 ORDER BY height DESC LIMIT 1"; - let args: &[&dyn ToSql] = params![u64_to_sql(height)?]; + let args = params![u64_to_sql(height)?]; query_row(conn, sql, args) } @@ -2002,7 +2002,7 @@ impl MemPoolDB { count: u64, ) -> Result, db_error> { let sql = "SELECT * FROM mempool WHERE accept_time >= ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY tx_fee DESC LIMIT ?4"; - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(timestamp)?, consensus_hash, block_header_hash, @@ -2039,7 +2039,7 @@ impl MemPoolDB { FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2", if is_origin { "origin" } else { "sponsor" } ); - let args: &[&dyn ToSql] = params![addr.to_string(), u64_to_sql(nonce)?]; + let args = params![addr.to_string(), u64_to_sql(nonce)?]; query_row(conn, &sql, args) } @@ -2174,7 +2174,7 @@ impl MemPoolDB { tx) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)"; - let args: &[&dyn ToSql] = params![ + let args = params![ txid, origin_address.to_string(), u64_to_sql(origin_nonce)?, @@ -2236,7 +2236,7 @@ impl MemPoolDB { event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), db_error> { let threshold_time = get_epoch_time_secs().saturating_sub(age.as_secs()); - let args: &[&dyn ToSql] = params![u64_to_sql(threshold_time)?]; + let args = params![u64_to_sql(threshold_time)?]; if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE accept_time < ?1"; let txids = query_rows(tx, sql, args)?; @@ -2257,7 +2257,7 @@ impl MemPoolDB { min_height: u64, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![u64_to_sql(min_height)?]; + let args = params![u64_to_sql(min_height)?]; if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE height < ?1"; @@ -2555,7 +2555,7 @@ impl MemPoolDB { ) -> Result<(), db_error> { for txid in txids { let sql = "INSERT OR REPLACE INTO tx_blacklist (txid, arrival_time) VALUES (?1, ?2)"; - let args: &[&dyn ToSql] = params![txid, &u64_to_sql(now)?]; + let args = params![txid, &u64_to_sql(now)?]; tx.execute(sql, args)?; } Ok(()) @@ -2570,7 +2570,7 @@ impl MemPoolDB { max_size: u64, ) -> Result<(), db_error> { let sql = "DELETE FROM tx_blacklist WHERE arrival_time + ?1 < ?2"; - let args: &[&dyn ToSql] = params![u64_to_sql(timeout)?, u64_to_sql(now)?]; + let args = params![u64_to_sql(timeout)?, u64_to_sql(now)?]; tx.execute(sql, args)?; // if we get too big, then drop some txs at random @@ -2596,7 +2596,7 @@ impl MemPoolDB { txid: &Txid, ) -> Result, db_error> { let sql = "SELECT arrival_time FROM tx_blacklist WHERE txid = ?1"; - let args: &[&dyn ToSql] = params![txid]; + let args = params![txid]; query_row(conn, sql, args) } @@ -2719,7 +2719,7 @@ impl MemPoolDB { }; let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); let sql = "SELECT mempool.txid FROM mempool WHERE height > ?1 AND height <= ?2 AND NOT EXISTS (SELECT 1 FROM removed_txids WHERE txid = mempool.txid)"; - let args: &[&dyn ToSql] = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; + let args = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; query_rows(&self.conn(), sql, args) } @@ -2747,7 +2747,7 @@ impl MemPoolDB { }; let min_height = max_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64); let sql = "SELECT COUNT(txid) FROM mempool WHERE height > ?1 AND height <= ?2"; - let args: &[&dyn ToSql] = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; + let args = params![u64_to_sql(min_height)?, u64_to_sql(max_height)?]; query_int(conn, sql, args).map(|cnt| cnt as u64) } @@ -2768,7 +2768,7 @@ impl MemPoolDB { /// Get the hashed txid for a txid pub fn get_randomized_txid(&self, txid: &Txid) -> Result, db_error> { let sql = "SELECT hashed_txid FROM randomized_txids WHERE txid = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = params![txid]; + let args = params![txid]; query_row(&self.conn(), sql, args) } @@ -2815,7 +2815,7 @@ impl MemPoolDB { (SELECT 1 FROM removed_txids WHERE txid = mempool.txid) \ ORDER BY randomized_txids.hashed_txid ASC LIMIT ?3"; - let args: &[&dyn ToSql] = params![ + let args = params![ last_randomized_txid, u64_to_sql(height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, u64_to_sql(max_run)?, diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index bd7cebc581..8660e0e9a7 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -769,7 +769,7 @@ simulating a miner. if let Some(value) = value_opt { let conn = sqlite_open(&db_path, OpenFlags::SQLITE_OPEN_READ_ONLY, false) .expect("Failed to open DB"); - let args: &[&dyn ToSql] = params![&value.to_hex()]; + let args = params![&value.to_hex()]; let res: Result = conn.query_row_and_then( "SELECT value FROM __fork_storage WHERE value_hash = ?1", args, diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index dc6d90a9c5..f971344a28 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -461,7 +461,7 @@ impl AtlasDB { let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let max = (page_index + 1) * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT MIN(block_height) as min, MAX(block_height) as max FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2"; - let args: &[&dyn ToSql] = params![min, max]; + let args = params![min, max]; let mut stmt = self.conn.prepare(&qry)?; let mut rows = stmt.query(args)?; @@ -497,7 +497,7 @@ impl AtlasDB { let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; - let args: &[&dyn ToSql] = params![min, max, block_id,]; + let args = params![min, max, block_id,]; let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; let mut bool_vector = vec![true; AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE as usize]; @@ -598,7 +598,7 @@ impl AtlasDB { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 0" .to_string(); - let args: &[&dyn ToSql] = params![hex_content_hash]; + let args = params![hex_content_hash]; let row = query_row::(&self.conn, &qry, args)?; Ok(row) } @@ -633,7 +633,7 @@ impl AtlasDB { ) -> Result, db_error> { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT * FROM attachment_instances WHERE content_hash = ?1 AND status = ?2"; - let args: &[&dyn ToSql] = params![hex_content_hash, AttachmentInstanceStatus::Checked]; + let args = params![hex_content_hash, AttachmentInstanceStatus::Checked]; let rows = query_rows(&self.conn, qry, args)?; Ok(rows) } @@ -642,7 +642,7 @@ impl AtlasDB { let hex_content_hash = to_hex(&content_hash.0[..]); let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 1" .to_string(); - let args: &[&dyn ToSql] = params![hex_content_hash]; + let args = params![hex_content_hash]; let row = query_row::(&self.conn, &qry, args)?; Ok(row) } diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 086e3b65a2..07f0bb5d74 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -446,7 +446,7 @@ impl PeerDB { PeerDB::apply_schema_migrations(&tx)?; - let local_peer_args: &[&dyn ToSql] = params![ + let local_peer_args = params![ network_id, parent_network_id, to_hex(&localpeer.nonce), @@ -556,7 +556,7 @@ impl PeerDB { p2p_port: u16, stacker_dbs: &[QualifiedContractIdentifier], ) -> Result<(), db_error> { - let local_peer_args: &[&dyn ToSql] = params![ + let local_peer_args = params![ p2p_port, data_url.as_str(), serde_json::to_string(stacker_dbs) @@ -839,7 +839,7 @@ impl PeerDB { privkey: &Secp256k1PrivateKey, expire_block: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![to_hex(&privkey.to_bytes()), u64_to_sql(expire_block)?]; + let args = params![to_hex(&privkey.to_bytes()), u64_to_sql(expire_block)?]; tx.execute( "UPDATE local_peer SET private_key = ?1, private_key_expire = ?2", args, @@ -912,7 +912,7 @@ impl PeerDB { peer_port: u16, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args: &[&dyn ToSql] = params![network_id, peer_addr.to_bin(), peer_port,]; + let args = params![network_id, peer_addr.to_bin(), peer_port,]; query_row::(conn, qry, args) } @@ -923,7 +923,7 @@ impl PeerDB { peer_port: u16, ) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args: &[&dyn ToSql] = params![network_id, peer_addr.to_bin(), peer_port]; + let args = params![network_id, peer_addr.to_bin(), peer_port]; Ok(query_row::(conn, &qry, args)? .map(|x| x == 1) .unwrap_or(false)) @@ -937,7 +937,7 @@ impl PeerDB { peer_port: u16, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND port = ?2"; - let args: &[&dyn ToSql] = params![network_id, peer_port]; + let args = params![network_id, peer_port]; query_row::(conn, &qry, args) } @@ -948,14 +948,14 @@ impl PeerDB { slot: u32, ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND slot = ?2"; - let args: &[&dyn ToSql] = params![network_id, slot]; + let args = params![network_id, slot]; query_row::(conn, &qry, args) } /// Is there any peer at a particular slot? pub fn has_peer_at(conn: &DBConn, network_id: u32, slot: u32) -> Result { let qry = "SELECT 1 FROM frontier WHERE network_id = ?1 AND slot = ?2"; - let args: &[&dyn ToSql] = params![network_id, slot]; + let args = params![network_id, slot]; Ok(query_row::(conn, &qry, args)? .map(|x| x == 1) .unwrap_or(false)) @@ -1032,7 +1032,7 @@ impl PeerDB { ) -> Result<(), db_error> { for cid in smart_contracts { test_debug!("Add Stacker DB contract to slot {}: {}", slot, cid); - let args: &[&dyn ToSql] = params![cid.to_string(), slot]; + let args = params![cid.to_string(), slot]; tx.execute("INSERT OR REPLACE INTO stackerdb_peers (smart_contract_id,peer_slot) VALUES (?1,?2)", args) .map_err(db_error::SqliteError)?; } @@ -1054,7 +1054,7 @@ impl PeerDB { ) -> Result<(), db_error> { let old_peer_opt = PeerDB::get_peer_at(tx, neighbor.addr.network_id, slot)?; - let neighbor_args: &[&dyn ToSql] = params![ + let neighbor_args = params![ neighbor.addr.peer_version, neighbor.addr.network_id, to_bin(neighbor.addr.addrbytes.as_bytes()), @@ -1201,7 +1201,7 @@ impl PeerDB { peer_port: u16, deny_deadline: u64, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![ + let args = params![ u64_to_sql(deny_deadline)?, network_id, peer_addr.to_bin(), @@ -1247,7 +1247,7 @@ impl PeerDB { neighbor.addr.port, )?; - let args: &[&dyn ToSql] = params![ + let args = params![ neighbor.addr.peer_version, to_hex(&neighbor.public_key.to_bytes_compressed()), u64_to_sql(neighbor.expire_block)?, @@ -1299,7 +1299,7 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT slot FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; - let args: &[&dyn ToSql] = params![network_id, addrbytes.to_bin(), port]; + let args = params![network_id, addrbytes.to_bin(), port]; Ok(query_row::(conn, qry, args)?) } @@ -1325,7 +1325,7 @@ impl PeerDB { smart_contract: &QualifiedContractIdentifier, ) -> Result, db_error> { let qry = "SELECT peer_slot FROM stackerdb_peers WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = params![smart_contract.to_string()]; + let args = params![smart_contract.to_string()]; query_rows(conn, qry, args) } @@ -1385,7 +1385,7 @@ impl PeerDB { let sql = "DELETE FROM stackerdb_peers WHERE smart_contract_id = ?1 AND peer_slot = ?2"; for cid in to_delete.into_iter() { test_debug!("Delete Stacker DB for {:?}: {}", &neighbor.addr, &cid); - let args: &[&dyn ToSql] = params![cid.to_string(), slot]; + let args = params![cid.to_string(), slot]; tx.execute(sql, args).map_err(db_error::SqliteError)?; } @@ -1393,7 +1393,7 @@ impl PeerDB { "INSERT OR REPLACE INTO stackerdb_peers (smart_contract_id,peer_slot) VALUES (?1,?2)"; for cid in to_insert.iter() { test_debug!("Add Stacker DB for {:?}: {}", &neighbor.addr, &cid); - let args: &[&dyn ToSql] = params![cid.to_string(), slot]; + let args = params![cid.to_string(), slot]; tx.execute(sql, args).map_err(db_error::SqliteError)?; } @@ -1450,7 +1450,7 @@ impl PeerDB { prefix: &PeerAddress, mask: u32, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![prefix.to_bin(), mask]; + let args = params![prefix.to_bin(), mask]; tx.execute( &format!( "INSERT OR REPLACE INTO {} (prefix, mask) VALUES (?1, ?2)", @@ -1469,7 +1469,7 @@ impl PeerDB { prefix: &PeerAddress, mask: u32, ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = params![prefix.to_bin(), mask]; + let args = params![prefix.to_bin(), mask]; tx.execute( &format!("DELETE FROM {} WHERE prefix = ?1 AND mask = ?2", table), args, @@ -1546,7 +1546,7 @@ impl PeerDB { ) -> Result<(), db_error> { assert!(mask > 0 && mask <= 128); let prefix_txt = PeerDB::cidr_prefix_to_string(prefix, mask); - let args: &[&dyn ToSql] = params![value, mask, prefix_txt]; + let args = params![value, mask, prefix_txt]; tx.execute( &format!( "UPDATE frontier SET {} = ?1 WHERE SUBSTR(addrbytes,1,?2) = SUBSTR(?3,1,?2)", @@ -1624,7 +1624,7 @@ impl PeerDB { if always_include_allowed { // always include allowed neighbors, freshness be damned let allow_qry = "SELECT * FROM frontier WHERE network_id = ?1 AND denied < ?2 AND (allowed < 0 OR ?3 < allowed) AND (peer_version & 0x000000ff) >= ?4"; - let allow_args: &[&dyn ToSql] = params![ + let allow_args = params![ network_id, u64_to_sql(now_secs)?, u64_to_sql(now_secs)?, @@ -1654,7 +1654,7 @@ impl PeerDB { (allowed < 0 OR (allowed >= 0 AND allowed <= ?5)) AND (peer_version & 0x000000ff) >= ?6 ORDER BY RANDOM() LIMIT ?7" }; - let random_peers_args: &[&dyn ToSql] = params![ + let random_peers_args = params![ network_id, u64_to_sql(min_age)?, u64_to_sql(block_height)?, @@ -1730,7 +1730,7 @@ impl PeerDB { let addr_u32 = addrbits.ipv4_bits().unwrap(); let qry = "SELECT * FROM asn4 WHERE prefix = (?1 & ~((1 << (32 - mask)) - 1)) ORDER BY prefix DESC LIMIT 1"; - let args: &[&dyn ToSql] = params![addr_u32]; + let args = params![addr_u32]; let rows = query_rows::(conn, &qry, args)?; match rows.len() { 0 => Ok(None), @@ -1753,7 +1753,7 @@ impl PeerDB { #[cfg_attr(test, mutants::skip)] pub fn asn_count(conn: &DBConn, asn: u32) -> Result { let qry = "SELECT COUNT(*) FROM frontier WHERE asn = ?1"; - let args: &[&dyn ToSql] = params![asn]; + let args = params![asn]; let count = query_count(conn, &qry, args)?; Ok(count as u64) } @@ -1786,7 +1786,7 @@ impl PeerDB { } let qry = "SELECT DISTINCT frontier.* FROM frontier JOIN stackerdb_peers ON stackerdb_peers.peer_slot = frontier.slot WHERE stackerdb_peers.smart_contract_id = ?1 AND frontier.network_id = ?2 AND frontier.last_contact_time >= ?3 ORDER BY RANDOM() LIMIT ?4"; let max_count_u32 = u32::try_from(max_count).unwrap_or(u32::MAX); - let args: &[&dyn ToSql] = params![ + let args = params![ smart_contract.to_string(), network_id, u64_to_sql(min_age)?, diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 6a24afd990..d95d3ebbdb 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -158,7 +158,7 @@ fn inner_get_stackerdb_id( smart_contract: &QualifiedContractIdentifier, ) -> Result { let sql = "SELECT rowid FROM databases WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = params![smart_contract.to_string()]; + let args = params![smart_contract.to_string()]; Ok(query_row(conn, sql, args)?.ok_or(net_error::NoSuchStackerDB(smart_contract.clone()))?) } @@ -172,7 +172,7 @@ fn inner_get_slot_metadata( ) -> Result, net_error> { let stackerdb_id = inner_get_stackerdb_id(conn, smart_contract)?; let sql = "SELECT slot_id,version,data_hash,signature FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; + let args = params![stackerdb_id, slot_id]; query_row(conn, &sql, args).map_err(|e| e.into()) } @@ -187,7 +187,7 @@ fn inner_get_slot_validation( let stackerdb_id = inner_get_stackerdb_id(conn, smart_contract)?; let sql = "SELECT signer,write_time,version FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; + let args = params![stackerdb_id, slot_id]; query_row(conn, &sql, args).map_err(|e| e.into()) } @@ -207,7 +207,7 @@ impl<'a> StackerDBTx<'a> { smart_contract_id: &QualifiedContractIdentifier, ) -> Result<(), net_error> { let qry = "DELETE FROM databases WHERE smart_contract_id = ?1"; - let args: &[&dyn ToSql] = params![smart_contract_id.to_string()]; + let args = params![smart_contract_id.to_string()]; let mut stmt = self.sql_tx.prepare(qry)?; stmt.execute(args)?; Ok(()) @@ -247,7 +247,7 @@ impl<'a> StackerDBTx<'a> { let qry = "INSERT OR REPLACE INTO databases (smart_contract_id) VALUES (?1)"; let mut stmt = self.sql_tx.prepare(&qry)?; - let args: &[&dyn ToSql] = params![smart_contract.to_string()]; + let args = params![smart_contract.to_string()]; stmt.execute(args)?; let stackerdb_id = self.get_stackerdb_id(smart_contract)?; @@ -259,7 +259,7 @@ impl<'a> StackerDBTx<'a> { for (principal, slot_count) in slots.iter() { test_debug!("Create StackerDB slots: ({}, {})", &principal, slot_count); for _ in 0..*slot_count { - let args: &[&dyn ToSql] = params![ + let args = params![ stackerdb_id, principal.to_string(), slot_id, @@ -287,7 +287,7 @@ impl<'a> StackerDBTx<'a> { ) -> Result<(), net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1"; - let args: &[&dyn ToSql] = params![stackerdb_id]; + let args = params![stackerdb_id]; let mut stmt = self.sql_tx.prepare(&qry)?; stmt.execute(args)?; Ok(()) @@ -327,7 +327,7 @@ impl<'a> StackerDBTx<'a> { // new slot, or existing slot with a different signer let qry = "INSERT OR REPLACE INTO chunks (stackerdb_id,signer,slot_id,version,write_time,data,data_hash,signature) VALUES (?1,?2,?3,?4,?5,?6,?7,?8)"; let mut stmt = self.sql_tx.prepare(&qry)?; - let args: &[&dyn ToSql] = params![ + let args = params![ stackerdb_id, principal.to_string(), slot_id, @@ -375,7 +375,7 @@ impl<'a> StackerDBTx<'a> { let sql = "UPDATE chunks SET version = ?1, data_hash = ?2, signature = ?3, data = ?4, write_time = ?5 WHERE stackerdb_id = ?6 AND slot_id = ?7"; let mut stmt = self.sql_tx.prepare(&sql)?; - let args: &[&dyn ToSql] = params![ + let args = params![ slot_desc.slot_version, Sha512Trunc256Sum::from_data(chunk), slot_desc.signature, @@ -549,7 +549,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT signer FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; + let args = params![stackerdb_id, slot_id]; query_row(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -562,7 +562,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT signer FROM chunks WHERE stackerdb_id = ?1 GROUP BY signer"; - let args: &[&dyn ToSql] = params![stackerdb_id]; + let args = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -583,7 +583,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = inner_get_stackerdb_id(&self.conn, smart_contract)?; let sql = "SELECT slot_id,version,data_hash,signature FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id ASC"; - let args: &[&dyn ToSql] = params![stackerdb_id]; + let args = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -607,7 +607,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT version FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; + let args = params![stackerdb_id, slot_id]; self.conn .query_row(qry, args, |row| row.get(0)) @@ -622,7 +622,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT version FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id"; - let args: &[&dyn ToSql] = params![stackerdb_id]; + let args = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -633,7 +633,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let sql = "SELECT write_time FROM chunks WHERE stackerdb_id = ?1 ORDER BY slot_id"; - let args: &[&dyn ToSql] = params![stackerdb_id]; + let args = params![stackerdb_id]; query_rows(&self.conn, &sql, args).map_err(|e| e.into()) } @@ -648,7 +648,7 @@ impl StackerDBs { ) -> Result>, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT data FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2"; - let args: &[&dyn ToSql] = params![stackerdb_id, slot_id]; + let args = params![stackerdb_id, slot_id]; self.conn .query_row(qry, args, |row| row.get(0)) @@ -681,7 +681,7 @@ impl StackerDBs { ) -> Result, net_error> { let stackerdb_id = self.get_stackerdb_id(smart_contract)?; let qry = "SELECT slot_id,version,signature,data FROM chunks WHERE stackerdb_id = ?1 AND slot_id = ?2 AND version = ?3"; - let args: &[&dyn ToSql] = params![stackerdb_id, slot_id, slot_version]; + let args = params![stackerdb_id, slot_id, slot_version]; query_row(&self.conn, &qry, args).map_err(|e| e.into()) } } diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index b418addfde..d1632f0b14 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -362,7 +362,7 @@ impl BloomCounter { "INSERT INTO {} (counts, num_bins, num_hashes, hasher) VALUES (?1, ?2, ?3, ?4)", table_name ); - let args: &[&dyn ToSql] = params![counts_vec, num_bins, num_hashes, hasher_vec]; + let args = params![counts_vec, num_bins, num_hashes, hasher_vec]; tx.execute(&sql, args).map_err(db_error::SqliteError)?; From 1be684729011bb68305a07245309c78d29633a10 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 8 Jul 2024 18:02:14 +0300 Subject: [PATCH 0482/1400] change u128 casting to `u128::from(value)` --- stackslib/src/core/mempool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 9cd8539a77..bffd4fcdf3 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1922,7 +1922,7 @@ impl MemPoolDB { debug!( "Mempool iteration finished"; - "considered_txs" => total_considered as u128, + "considered_txs" => u128::from(total_considered), "elapsed_ms" => start_time.elapsed().as_millis() ); Ok(total_considered) From 443fd1108dab49146970c0c6187f09557b4fc13b Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 8 Jul 2024 18:05:26 +0300 Subject: [PATCH 0483/1400] `fmt-stacks` format code --- stackslib/src/burnchains/db.rs | 3 +-- stackslib/src/chainstate/stacks/db/blocks.rs | 24 +++++++------------- stackslib/src/core/mempool.rs | 3 +-- 3 files changed, 10 insertions(+), 20 deletions(-) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index aa3725677d..66b4753fde 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -904,8 +904,7 @@ impl<'a> BurnchainDBTransaction<'a> { for op in block_ops.iter() { let serialized_op = serde_json::to_string(op).expect("Failed to serialize parsed BlockstackOp"); - let args = - params![block_header.block_hash, op.txid_ref(), serialized_op]; + let args = params![block_header.block_hash, op.txid_ref(), serialized_op]; stmt.execute(args)?; } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 1710bfe360..2cdb192bf4 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -1565,8 +1565,7 @@ impl StacksChainState { // if this block has an unprocessed staging parent, then it's not attachable until its parent is. let has_unprocessed_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 AND processed = 0 AND orphaned = 0 LIMIT 1"; let has_parent_sql = "SELECT anchored_block_hash FROM staging_blocks WHERE anchored_block_hash = ?1 AND consensus_hash = ?2 LIMIT 1"; - let has_parent_args = - params![block.header.parent_block, parent_consensus_hash]; + let has_parent_args = params![block.header.parent_block, parent_consensus_hash]; let has_unprocessed_parent_rows = query_row_columns::( &tx, has_unprocessed_parent_sql, @@ -2028,8 +2027,7 @@ impl StacksChainState { ); let sql = "SELECT COALESCE(MIN(block_height), 0), COALESCE(MAX(block_height), 0) FROM block_headers WHERE burn_header_height >= ?1 AND burn_header_height < ?2"; - let args = - params![u64_to_sql(burn_height_start)?, u64_to_sql(burn_height_end)?,]; + let args = params![u64_to_sql(burn_height_start)?, u64_to_sql(burn_height_end)?,]; self.db() .query_row(sql, args, |row| { @@ -2309,8 +2307,7 @@ impl StacksChainState { // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args = - params![consensus_hash, anchored_block_hash]; + let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2320,8 +2317,7 @@ impl StacksChainState { // drop microblocks (this processes them) let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let update_microblock_children_args = - params![consensus_hash, anchored_block_hash]; + let update_microblock_children_args = params![consensus_hash, anchored_block_hash]; tx.execute(update_block_sql, update_block_args)?; @@ -2532,8 +2528,7 @@ impl StacksChainState { // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; - let find_orphaned_microblocks_args = - params![consensus_hash, anchored_block_hash]; + let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, @@ -2550,8 +2545,7 @@ impl StacksChainState { &index_block_hash ); let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string(); - let update_microblock_children_args = - params![consensus_hash, anchored_block_hash]; + let update_microblock_children_args = params![consensus_hash, anchored_block_hash]; tx.execute(&update_block_sql, update_block_args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; @@ -2587,8 +2581,7 @@ impl StacksChainState { ) -> Result<(), Error> { // find offending sequence let seq_sql = "SELECT sequence FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2 AND microblock_hash = ?3 AND processed = 0 AND orphaned = 0".to_string(); - let seq_args = - params![consensus_hash, anchored_block_hash, invalid_block_hash]; + let seq_args = params![consensus_hash, anchored_block_hash, invalid_block_hash]; let seq = match query_int::<_>(tx, &seq_sql, seq_args) { Ok(seq) => seq, Err(e) => match e { @@ -2674,8 +2667,7 @@ impl StacksChainState { test_debug!("Set {}-{} processed", &parent_index_hash, &mblock_hash); // confirm this microblock - let args = - params![parent_consensus_hash, parent_block_hash, mblock_hash]; + let args = params![parent_consensus_hash, parent_block_hash, mblock_hash]; tx.execute(sql, args) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index bffd4fcdf3..47be6855ba 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1980,8 +1980,7 @@ impl MemPoolDB { timestamp: u64, ) -> Result, db_error> { let sql = "SELECT * FROM mempool WHERE accept_time = ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY origin_nonce ASC"; - let args = - params![u64_to_sql(timestamp)?, consensus_hash, block_header_hash]; + let args = params![u64_to_sql(timestamp)?, consensus_hash, block_header_hash]; let rows = query_rows::(conn, &sql, args)?; Ok(rows) } From f63fd9522bc51573df265e7a76baf55e2488d41b Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 8 Jul 2024 16:35:23 +0100 Subject: [PATCH 0484/1400] docs: specify allowed character set for branch names As specified by docker tags requirements [1] and `docker-metadata` [2]. Prevents issues like [3] on parenthesis (not actually related to tag name, but to Docker failing CI configuration). [1]: https://docs.docker.com/reference/cli/docker/image/tag/#description [2]: https://github.com/docker/metadata-action?tab=readme-ov-file#image-name-and-tag-sanitization [3]: https://github.com/stacks-network/stacks-core/actions/runs/9766520161/job/27087431713?pr=4921 --- CONTRIBUTING.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9ffcfb80f7..22507d6f33 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,6 +43,11 @@ Branch names should use a prefix that conveys the overall goal of the branch: - `test/more-coverage` for branches that only add more tests - `refactor/formatting-fix` for refactors +The branch suffix must only include ASCII lowercase and uppercase letters, +digits, underscores, periods and dashes. + +The full branch name must be max 128 characters long. + ### Merging PRs from Forks PRs from forks or opened by contributors without commit access require From 830c6361142bbc4f93a18d7be2d66d8a2814ff5a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 8 Jul 2024 14:50:31 -0400 Subject: [PATCH 0485/1400] Do not terminate tenure early and process block proposals that are from prior cycles. Adds test. Signed-off-by: Jacinta Ferrant --- stacks-signer/src/lib.rs | 2 + stacks-signer/src/runloop.rs | 10 +- stacks-signer/src/signerdb.rs | 38 +++++++ stacks-signer/src/v0/signer.rs | 17 ++- stacks-signer/src/v1/signer.rs | 10 ++ testnet/stacks-node/src/tests/signer/mod.rs | 13 ++- testnet/stacks-node/src/tests/signer/v0.rs | 117 +++++++++++++++----- testnet/stacks-node/src/tests/signer/v1.rs | 14 +-- 8 files changed, 178 insertions(+), 43 deletions(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 47367c9bd5..927f6e2300 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -81,6 +81,8 @@ pub trait Signer: Debug + Display { current_reward_cycle: u64, command: Option, ); + /// Check if the signer is stale, i.e. its tenure is complete and it has no pending blocks to process + fn has_pending_blocks(&self) -> bool; } /// A wrapper around the running signer type for the signer diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index c257bb624c..625a3996cd 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -375,13 +375,21 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { - if signer.reward_cycle() < current_reward_cycle { + let reward_cycle = signer.reward_cycle(); + let next_reward_cycle = reward_cycle.wrapping_add(1); + let stale = match next_reward_cycle.cmp(¤t_reward_cycle) { + std::cmp::Ordering::Less => true, // We are more than one reward cycle behind, so we are stale + std::cmp::Ordering::Equal => !signer.has_pending_blocks(), // We are the next reward cycle, so check if we have any pending blocks to process + std::cmp::Ordering::Greater => false, // We are the current reward cycle, so we are not stale + }; + if stale { debug!("{signer}: Signer's tenure has completed."); to_delete.push(*idx); continue; } } for idx in to_delete { + println!("DELETING"); self.stacks_signers.remove(&idx); } } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5c6b1a8870..738eb11e75 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -100,6 +100,7 @@ CREATE TABLE IF NOT EXISTS blocks ( const CREATE_INDEXES: &str = " CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); +CREATE INDEX IF NOT EXISTS blocks_valid ON blocks ((json_extract(block_info, '$.valid'))); "; const CREATE_SIGNER_STATE_TABLE: &str = " @@ -230,6 +231,15 @@ impl SignerDb { Ok(()) } + + /// Determine if there are any pending blocks that have not yet been processed by checking the block_info.valid field + pub fn has_pending_blocks(&self, reward_cycle: u64) -> Result { + let query = "SELECT block_info FROM blocks WHERE reward_cycle = ? AND json_extract(block_info, '$.valid') IS NULL LIMIT 1"; + let result: Option = + query_row(&self.db, query, params!(&u64_to_sql(reward_cycle)?))?; + + Ok(result.is_some()) + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -420,6 +430,34 @@ mod tests { .is_none()); } + #[test] + fn test_has_pending_blocks() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let (mut block_info_1, _block_proposal) = create_block(); + let (block_info_2, _block_proposal) = create_block(); + let (block_info_3, _block_proposal) = create_block(); + let (block_info_4, _block_proposal) = create_block(); + + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + db.insert_block(&block_info_3) + .expect("Unable to insert block into db"); + db.insert_block(&block_info_4) + .expect("Unable to insert block into db"); + + assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + + block_info_1.valid = Some(true); + + db.insert_block(&block_info_1) + .expect("Unable to update block in db"); + + assert!(!db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + } + #[test] fn test_sqlite_version() { let db_path = tmp_db_path(); diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 6d55833e88..ce37c0a522 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -82,13 +82,13 @@ impl SignerTrait for Signer { sortition_state: &mut Option, event: Option<&SignerEvent>, _res: Sender>, - current_reward_cycle: u64, + _current_reward_cycle: u64, ) { let event_parity = match event { - Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), // Block proposal events do have reward cycles, but each proposal has its own cycle, // and the vec could be heterogenous, so, don't differentiate. - Some(SignerEvent::MinerMessages(..)) + Some(SignerEvent::BlockValidationResponse(_)) + | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock(_)) | Some(SignerEvent::StatusCheck) | None => None, @@ -163,6 +163,16 @@ impl SignerTrait for Signer { warn!("{self}: Received a command: {command:?}. V0 Signers do not support commands. Ignoring...") } } + + fn has_pending_blocks(&self) -> bool { + self.signer_db + .has_pending_blocks(self.reward_cycle) + .unwrap_or_else(|e| { + error!("{self}: Failed to check for pending blocks: {e:?}",); + // Assume we have pending blocks to prevent premature cleanup + true + }) + } } impl From for Signer { @@ -381,7 +391,6 @@ impl Signer { } }; block_info.valid = Some(true); - // TODO: do not sign the block if it fails signer state checks (forks, etc.) let signature = self .private_key .sign(&signer_signature_hash.0) diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index f89fc031ae..8eff7f5fbe 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -267,6 +267,16 @@ impl SignerTrait for Signer { } self.process_next_command(stacks_client, current_reward_cycle); } + + fn has_pending_blocks(&self) -> bool { + self.signer_db + .has_pending_blocks(self.reward_cycle) + .unwrap_or_else(|e| { + error!("{self}: Failed to check if there are pending blocks: {e:?}"); + // Assume there are pending blocks to prevent premature cleanup + true + }) + } } impl Signer { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 0ac82f4365..e2bc3dc30c 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -100,7 +100,11 @@ pub struct SignerTest { } impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { - fn new(num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>) -> Self { + fn new( + num_signers: usize, + initial_balances: Vec<(StacksAddress, u64)>, + wait_on_signers: Option, + ) -> Self { // Generate Signer Data let signer_stacks_private_keys = (0..num_signers) .map(|_| StacksPrivateKey::new()) @@ -118,7 +122,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = SignerTest::new(num_signers, vec![]); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); @@ -360,7 +362,7 @@ fn miner_gather_signatures() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); signer_test.boot_to_epoch_3(); let timeout = Duration::from_secs(30); @@ -412,7 +414,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); let timeout = Duration::from_secs(200); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -468,13 +470,36 @@ fn end_of_tenure() { let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(500)), ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let long_timeout = Duration::from_secs(200); + let short_timeout = Duration::from_secs(20); signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + // Advance to one before the next reward cycle to ensure we are on the reward cycle boundary + let final_reward_cycle = curr_reward_cycle + 1; + let final_reward_cycle_height_boundary = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(final_reward_cycle) + - 2; - signer_test.mine_nakamoto_block(Duration::from_secs(30)); + info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); + signer_test.run_until_burnchain_height_nakamoto( + long_timeout, + final_reward_cycle_height_boundary, + num_signers, + ); + println!("Advanced to nexct reward cycle boundary: {final_reward_cycle_height_boundary}"); + assert_eq!( + signer_test.get_current_reward_cycle(), + final_reward_cycle - 1 + ); + info!("------------------------- Test Block Validation Stalled -------------------------"); TEST_VALIDATE_STALL.lock().unwrap().replace(true); let proposals_before = signer_test @@ -486,6 +511,8 @@ fn end_of_tenure() { .nakamoto_blocks_mined .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + let start_height = info.stacks_tip_height; // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = @@ -493,14 +520,17 @@ fn end_of_tenure() { submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); - loop { - let blocks_proposed = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - if blocks_proposed > proposals_before { - break; - } + let start_time = Instant::now(); + while signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst) + <= proposals_before + { + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting for block proposal" + ); std::thread::sleep(Duration::from_millis(100)); } @@ -516,14 +546,13 @@ fn end_of_tenure() { blocks_before ); + info!("Triggering a new block to be mined"); + + // Mine a couple blocks into the next reward cycle let commits_before = signer_test .running_nodes .commits_submitted .load(Ordering::SeqCst); - - info!("Triggering a new block to be mined"); - - // Trigger the next block to be mined and commit submitted next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 10, @@ -536,23 +565,52 @@ fn end_of_tenure() { }, ) .unwrap(); + for _ in 0..2 { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 10, + || Ok(true), + ) + .unwrap(); + } + assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); + + while test_observer::get_burn_blocks() + .last() + .unwrap() + .get("burn_block_height") + .unwrap() + .as_u64() + .unwrap() + >= final_reward_cycle_height_boundary + 3 + { + std::thread::sleep(Duration::from_secs(1)); + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting for bun block events" + ); + } - info!("Disabling the stall and waiting for the blocks to be processed"); + std::thread::sleep(short_timeout); + info!("Unpausing block validation and waiting for block to be processed"); // Disable the stall and wait for the block to be processed TEST_VALIDATE_STALL.lock().unwrap().replace(false); - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - if blocks_mined > blocks_before + 1 { - break; - } + let start_time = Instant::now(); + while signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst) + <= blocks_before + { + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting for block to be mined" + ); std::thread::sleep(Duration::from_millis(100)); } let info = get_chain_info(&signer_test.running_nodes.conf); - assert_eq!(info.stacks_tip_height, 30); + assert_eq!(info.stacks_tip_height, start_height + 1); signer_test.shutdown(); } @@ -580,6 +638,7 @@ fn retry_on_timeout() { let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -619,8 +678,8 @@ fn retry_on_timeout() { info!("Block proposed, verifying that it is not processed"); - // Wait 20 seconds to be sure that the timeout has occurred - std::thread::sleep(Duration::from_secs(20)); + // Wait 10 seconds to be sure that the timeout has occurred + std::thread::sleep(Duration::from_secs(10)); assert_eq!( signer_test .running_nodes diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index ae5696e4ea..9a3af13081 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -485,7 +485,7 @@ fn dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10, vec![]); + let mut signer_test = SignerTest::new(10, vec![], None); info!("Boot to epoch 3.0 reward calculation..."); boot_to_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -594,7 +594,7 @@ fn sign_request_rejected() { block2.header.tx_merkle_root = tx_merkle_root2; let timeout = Duration::from_secs(200); - let mut signer_test: SignerTest = SignerTest::new(10, vec![]); + let mut signer_test: SignerTest = SignerTest::new(10, vec![], None); let _key = signer_test.boot_to_epoch_3(timeout); info!("------------------------- Test Sign -------------------------"); @@ -689,7 +689,7 @@ fn delayed_dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); let num_signers = 3; - let mut signer_test = SignerTest::new(num_signers, vec![]); + let mut signer_test = SignerTest::new(num_signers, vec![], None); boot_to_epoch_3_reward_set_calculation_boundary( &signer_test.running_nodes.conf, &signer_test.running_nodes.blocks_processed, @@ -881,7 +881,7 @@ fn block_proposal() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); let timeout = Duration::from_secs(30); let short_timeout = Duration::from_secs(30); @@ -940,7 +940,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; - let mut signer_test: SignerTest = SignerTest::new(5, vec![]); + let mut signer_test: SignerTest = SignerTest::new(5, vec![], None); let timeout = Duration::from_secs(200); let first_dkg = signer_test.boot_to_epoch_3(timeout); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -1015,7 +1015,7 @@ fn filter_bad_transactions() { info!("------------------------- Test Setup -------------------------"); // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test: SignerTest = SignerTest::new(5, vec![]); + let mut signer_test: SignerTest = SignerTest::new(5, vec![], None); let timeout = Duration::from_secs(200); let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); let next_signers_dkg = signer_test @@ -1103,7 +1103,7 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Setup -------------------------"); let num_signers = 3; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); let timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(30); From f90bb2955c21f9b548c55f21e4e2a12d27af81c4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 8 Jul 2024 14:52:40 -0400 Subject: [PATCH 0486/1400] Cleanup logging and cleanup_stale_signers Signed-off-by: Jacinta Ferrant --- stacks-signer/src/runloop.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 625a3996cd..b8396525f8 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -385,11 +385,9 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo if stale { debug!("{signer}: Signer's tenure has completed."); to_delete.push(*idx); - continue; } } for idx in to_delete { - println!("DELETING"); self.stacks_signers.remove(&idx); } } From bce9dd2bd086d2a7f787dcb1dd59f326bd3e3e1c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 9 Jul 2024 08:07:31 -0400 Subject: [PATCH 0487/1400] Cleanup has_pending_blocks documentation Signed-off-by: Jacinta Ferrant --- stacks-signer/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 927f6e2300..2310fce938 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -81,7 +81,7 @@ pub trait Signer: Debug + Display { current_reward_cycle: u64, command: Option, ); - /// Check if the signer is stale, i.e. its tenure is complete and it has no pending blocks to process + /// Check if the signer is in the middle of processing blocks fn has_pending_blocks(&self) -> bool; } From 224273b752aca83d97e0d2221827e6bb08d528ed Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 9 Jul 2024 08:48:15 -0400 Subject: [PATCH 0488/1400] Add end_of_tenure test to CI Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 1bb4bd17f4..f7b96b96c7 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -86,6 +86,7 @@ jobs: - tests::signer::v0::block_proposal_rejection - tests::signer::v0::miner_gather_signatures - tests::signer::v0::mine_2_nakamoto_reward_cycles + - tests::signer::v0::end_of_tenure - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state From 86d6d226f693f8a585eed589d760b204475a1fc7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 9 Jul 2024 10:52:48 -0400 Subject: [PATCH 0489/1400] Remove sleeps in end_of_tenure test Signed-off-by: Jacinta Ferrant --- stacks-signer/src/runloop.rs | 24 ++++++-- testnet/stacks-node/src/tests/signer/mod.rs | 11 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 61 ++++++++++++--------- 3 files changed, 60 insertions(+), 36 deletions(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index b8396525f8..67dfbdd626 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -34,10 +34,19 @@ use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, S use crate::config::{GlobalConfig, SignerConfig}; use crate::Signer as SignerTrait; +/// The internal signer state info +#[derive(PartialEq, Clone, Debug)] +pub struct StateInfo { + /// the runloop state + pub runloop_state: State, + /// the current reward cycle info + pub reward_cycle_info: Option, +} + /// The signer result that can be sent across threads pub enum SignerResult { /// The signer has received a status check - StatusCheck(State), + StatusCheck(StateInfo), /// The signer has completed an operation OperationResult(OperationResult), } @@ -48,9 +57,9 @@ impl From for SignerResult { } } -impl From for SignerResult { - fn from(state: State) -> Self { - SignerResult::StatusCheck(state) +impl From for SignerResult { + fn from(state_info: StateInfo) -> Self { + SignerResult::StatusCheck(state_info) } } @@ -458,7 +467,12 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> // This is the only event that we respond to from the outer signer runloop if let Some(SignerEvent::StatusCheck) = event { info!("Signer status check requested: {:?}.", self.state); - if let Err(e) = res.send(vec![self.state.into()]) { + if let Err(e) = res.send(vec![StateInfo { + runloop_state: self.state, + reward_cycle_info: self.current_reward_cycle_info, + } + .into()]) + { error!("Failed to send status check result: {e}."); } } diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index e2bc3dc30c..18a4ea40f3 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -51,7 +51,7 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; use stacks_signer::client::{SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::{SignerResult, State}; +use stacks_signer::runloop::{SignerResult, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use wsts::state_machine::PublicKeys; @@ -168,7 +168,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { + pub fn wait_for_states(&mut self, timeout: Duration) -> Vec { debug!("Waiting for Status..."); let now = std::time::Instant::now(); let mut states = Vec::with_capacity(self.spawned_signers.len()); @@ -202,8 +203,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { panic!("Recieved an operation result."); } - SignerResult::StatusCheck(state) => { - states.push(state); + SignerResult::StatusCheck(state_info) => { + states.push(state_info); } } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7e86767c63..93e7da29d2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -93,7 +93,7 @@ impl SignerTest { let states = self.wait_for_states(short_timeout); if states .iter() - .all(|state| state == &State::RegisteredSigners) + .all(|state_info| state_info.runloop_state == State::RegisteredSigners) { break; } @@ -534,21 +534,9 @@ fn end_of_tenure() { std::thread::sleep(Duration::from_millis(100)); } - info!("Block proposed, verifying that it is not processed"); - - // Wait 10 seconds and verify that the block has not been processed - std::thread::sleep(Duration::from_secs(10)); - assert_eq!( - signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst), - blocks_before - ); - info!("Triggering a new block to be mined"); - // Mine a couple blocks into the next reward cycle + // Mine a block into the next reward cycle let commits_before = signer_test .running_nodes .commits_submitted @@ -565,14 +553,6 @@ fn end_of_tenure() { }, ) .unwrap(); - for _ in 0..2 { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 10, - || Ok(true), - ) - .unwrap(); - } assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); while test_observer::get_burn_blocks() @@ -582,16 +562,45 @@ fn end_of_tenure() { .unwrap() .as_u64() .unwrap() - >= final_reward_cycle_height_boundary + 3 + >= final_reward_cycle_height_boundary + 1 { - std::thread::sleep(Duration::from_secs(1)); assert!( start_time.elapsed() <= short_timeout, - "Timed out waiting for bun block events" + "Timed out waiting for burn block events" ); + std::thread::sleep(Duration::from_millis(100)); } - std::thread::sleep(short_timeout); + let now = std::time::Instant::now(); + // Wait for the signer to process the burn blocks and fully enter the next reward cycle + loop { + signer_test.send_status_request(); + let states = signer_test.wait_for_states(short_timeout); + if states.iter().all(|state_info| { + state_info + .reward_cycle_info + .map(|info| info.reward_cycle == final_reward_cycle) + .unwrap_or(false) + }) { + break; + } + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for signers to be in the next reward cycle" + ); + std::thread::sleep(Duration::from_millis(100)); + } + + info!("Block proposed and burn blocks consumed. Verifying that stacks block is still not processed"); + + assert_eq!( + signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst), + blocks_before + ); + info!("Unpausing block validation and waiting for block to be processed"); // Disable the stall and wait for the block to be processed TEST_VALIDATE_STALL.lock().unwrap().replace(false); From b231ee912cb3e7f3d0e8da9de3fc07d21e6360ea Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 9 Jul 2024 12:30:53 -0400 Subject: [PATCH 0490/1400] Fix waiting for burn blocks logic Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 93e7da29d2..6c589d9b41 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -553,6 +553,16 @@ fn end_of_tenure() { }, ) .unwrap(); + + // Mine a few blocks so we are well into the next reward cycle + for _ in 0..2 { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 10, + || Ok(true), + ) + .unwrap(); + } assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); while test_observer::get_burn_blocks() @@ -562,7 +572,7 @@ fn end_of_tenure() { .unwrap() .as_u64() .unwrap() - >= final_reward_cycle_height_boundary + 1 + < final_reward_cycle_height_boundary + 1 { assert!( start_time.elapsed() <= short_timeout, From fefac85d8242e79225503a8cee59fec710c45c6f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 9 Jul 2024 14:16:10 -0400 Subject: [PATCH 0491/1400] fix: typo --- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 3732df86c8..0ef2f5b135 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -1097,7 +1097,7 @@ impl< // and will remain canonical with respect to its tenure's Bitcoin fork forever. // Here, we're loading a reward set calculated between H and H+99 from H+100, where // H is the start of the prepare phase. So if we get any reward set from our - // canonical tip, it's guaranteed to be te canonical one. + // canonical tip, it's guaranteed to be the canonical one. let canonical_sortition_tip = self.canonical_sortition_tip.clone().unwrap_or( // should be unreachable SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())? From 85492aa005aedf85afccd7b30346bbfd54d10d9f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 9 Jul 2024 23:30:26 -0400 Subject: [PATCH 0492/1400] chore: fix unit tests --- .../src/net/download/nakamoto/download_state_machine.rs | 4 ++++ .../src/net/download/nakamoto/tenure_downloader_set.rs | 4 ++-- stackslib/src/net/httpcore.rs | 9 ++++++++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index c3af881516..3865e8ee39 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -1263,6 +1263,10 @@ impl NakamotoDownloadStateMachine { if is_available && !wt.processed { // a tenure is available but not yet processed, so we can't yet transition to // fetching unconfirmed tenures (we'd have no way to validate them). + test_debug!( + "Tenure {} is available but not yet processed", + &wt.tenure_id_consensus_hash + ); return false; } } diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index a75fb9dc8d..8a154637cf 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -203,9 +203,9 @@ impl NakamotoTenureDownloaderSet { continue; } test_debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return true; + return false; } - false + true } /// Try to resume processing a download state machine with a given peer. Since a peer is diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 0bb725bcc9..dec51df42a 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -804,7 +804,14 @@ impl StacksHttpRecvStream { blocked = true; (0, num_consumed) } - Ok((num_read, num_consumed)) => (num_read, num_consumed), + Ok((num_read, num_consumed)) => { + test_debug!( + "consume_data read {} bytes ({} consumed)", + num_read, + num_consumed + ); + (num_read, num_consumed) + } Err(e) => { if e.kind() == io::ErrorKind::WouldBlock || e.kind() == io::ErrorKind::TimedOut { From 25ca77e526e4d4018b4dbbc37c6778740786de94 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 10 Jul 2024 09:27:41 -0400 Subject: [PATCH 0493/1400] Fix has_pending_blocks test Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 738eb11e75..f4e5c7f8dc 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -192,7 +192,7 @@ impl SignerDb { tenure: &ConsensusHash, ) -> Result, DBError> { let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height DESC LIMIT 1"; - let result: Option = query_row(&self.db, query, &[tenure])?; + let result: Option = query_row(&self.db, query, [tenure])?; try_deserialize(result) } @@ -270,6 +270,7 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, }; + use clarity::util::secp256k1::MessageSignature; use libsigner::BlockProposal; use super::*; @@ -434,19 +435,19 @@ mod tests { fn test_has_pending_blocks() { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let (mut block_info_1, _block_proposal) = create_block(); - let (block_info_2, _block_proposal) = create_block(); - let (block_info_3, _block_proposal) = create_block(); - let (block_info_4, _block_proposal) = create_block(); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.burn_height = 1; + }); + let (mut block_info_2, _block_proposal) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.burn_height = 2; + }); db.insert_block(&block_info_1) .expect("Unable to insert block into db"); db.insert_block(&block_info_2) .expect("Unable to insert block into db"); - db.insert_block(&block_info_3) - .expect("Unable to insert block into db"); - db.insert_block(&block_info_4) - .expect("Unable to insert block into db"); assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); @@ -455,6 +456,13 @@ mod tests { db.insert_block(&block_info_1) .expect("Unable to update block in db"); + assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + + block_info_2.valid = Some(true); + + db.insert_block(&block_info_2) + .expect("Unable to update block in db"); + assert!(!db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); } From 1cca58e9bfb543f65d84ba2ee9bda021d3613e9c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 10 Jul 2024 16:08:27 -0400 Subject: [PATCH 0494/1400] test: find exact value for expected block rewards --- .github/workflows/bitcoin-tests.yml | 1 + .../src/tests/nakamoto_integrations.rs | 102 ++++++++++++++---- 2 files changed, 84 insertions(+), 19 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 1bb4bd17f4..36744126aa 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -91,6 +91,7 @@ jobs: - tests::nakamoto_integrations::clarity_burn_state - tests::nakamoto_integrations::check_block_times - tests::nakamoto_integrations::check_block_info + - tests::nakamoto_integrations::check_block_info_rewards - tests::nakamoto_integrations::continue_tenure_extend # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f541b842b8..64b3476c4d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6131,6 +6131,67 @@ fn check_block_info() { run_loop_thread.join().unwrap(); } +fn get_expected_reward_for_height(blocks: &Vec, block_height: u128) -> u128 { + // Find the target block + let target_block = blocks + .iter() + .find(|b| b["block_height"].as_u64().unwrap() == block_height as u64) + .unwrap(); + + // Find the tenure change block (the first block with this burn block hash) + let tenure_burn_block_hash = target_block["burn_block_hash"].as_str().unwrap(); + let tenure_block = blocks + .iter() + .find(|b| b["burn_block_hash"].as_str().unwrap() == tenure_burn_block_hash) + .unwrap(); + let matured_block_hash = tenure_block["block_hash"].as_str().unwrap(); + + let mut expected_reward_opt = None; + for block in blocks.iter().rev() { + for rewards in block["matured_miner_rewards"].as_array().unwrap() { + if rewards.as_object().unwrap()["from_stacks_block_hash"] + .as_str() + .unwrap() + == matured_block_hash + { + let reward_object = rewards.as_object().unwrap(); + let coinbase_amount: u128 = reward_object["coinbase_amount"] + .as_str() + .unwrap() + .parse() + .unwrap(); + let tx_fees_anchored: u128 = reward_object["tx_fees_anchored"] + .as_str() + .unwrap() + .parse() + .unwrap(); + let tx_fees_streamed_confirmed: u128 = reward_object["tx_fees_streamed_confirmed"] + .as_str() + .unwrap() + .parse() + .unwrap(); + let tx_fees_streamed_produced: u128 = reward_object["tx_fees_streamed_produced"] + .as_str() + .unwrap() + .parse() + .unwrap(); + expected_reward_opt = Some( + expected_reward_opt.unwrap_or(0) + + coinbase_amount + + tx_fees_anchored + + tx_fees_streamed_confirmed + + tx_fees_streamed_produced, + ); + } + } + + if let Some(expected_reward) = expected_reward_opt { + return expected_reward; + } + } + panic!("Expected reward not found"); +} + #[test] #[ignore] /// Verify `block-reward` property in `get-block-info?` and `get-tenure-info?`. @@ -6352,18 +6413,18 @@ fn check_block_info_rewards() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; - - // Mining 100 blocks takes a while, so only run this test if the env var is set let last_nakamoto_block = last_stacks_block_height; - // Mine 100+ burn blocks to get the block reward matured - info!("Mining 102 tenures to mature the block reward"); - for i in 0..102 { - let commits_before = commits_submitted.load(Ordering::SeqCst); - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= commits_before + 1) - }) + // Mine more than 2 burn blocks to get the last block's reward matured + // (only 2 blocks maturation time in tests) + info!("Mining 6 tenures to mature the block reward"); + for i in 0..6 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 20, + &coord_channel, + &commits_submitted, + ) .unwrap(); info!("Mined a block ({i})"); } @@ -6371,14 +6432,17 @@ fn check_block_info_rewards() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; + let blocks = test_observer::get_blocks(); // Check the block reward is now matured in one of the tenure-change blocks + let mature_height = last_stacks_block_height - 4; + let expected_reward = get_expected_reward_for_height(&blocks, mature_height); let result0 = call_read_only( &naka_conf, &sender_addr, contract0_name, "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 100)], + vec![&clarity::vm::Value::UInt(mature_height)], ); let tuple0 = result0.expect_tuple().unwrap().data_map; assert_eq!( @@ -6389,7 +6453,7 @@ fn check_block_info_rewards() { .expect_optional() .unwrap() .unwrap(), - Value::UInt(2040806360) + Value::UInt(expected_reward as u128) ); let result1 = call_read_only( @@ -6397,7 +6461,7 @@ fn check_block_info_rewards() { &sender_addr, contract1_name, "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 100)], + vec![&clarity::vm::Value::UInt(mature_height)], ); let tuple1 = result1.expect_tuple().unwrap().data_map; assert_eq!(tuple0, tuple1); @@ -6407,7 +6471,7 @@ fn check_block_info_rewards() { &sender_addr, contract3_name, "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 100)], + vec![&clarity::vm::Value::UInt(mature_height)], ); let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; assert_eq!( @@ -6416,6 +6480,8 @@ fn check_block_info_rewards() { ); // Check the block reward is now matured in one of the Nakamoto blocks + let expected_reward = get_expected_reward_for_height(&blocks, last_nakamoto_block); + let result0 = call_read_only( &naka_conf, &sender_addr, @@ -6424,17 +6490,15 @@ fn check_block_info_rewards() { vec![&clarity::vm::Value::UInt(last_nakamoto_block)], ); let tuple0 = result0.expect_tuple().unwrap().data_map; - assert!( + assert_eq!( tuple0 .get("block-reward") .unwrap() .clone() .expect_optional() .unwrap() - .unwrap() - .expect_u128() - .unwrap() - > 0 + .unwrap(), + Value::UInt(expected_reward as u128) ); let result1 = call_read_only( From c524b3a2c6e8cf1feab069bd3f028a38f12d06de Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 9 Jul 2024 15:44:10 -0500 Subject: [PATCH 0495/1400] feat: track timing of sortitions and allowance for reorgs * track sortition timing * track proposal timing * make proposal / sortition timing configurable --- libsigner/src/events.rs | 24 ++- stacks-signer/src/chainstate.rs | 128 +++++++++++++++- stacks-signer/src/client/mod.rs | 1 + stacks-signer/src/config.rs | 12 ++ stacks-signer/src/runloop.rs | 5 +- stacks-signer/src/signerdb.rs | 81 +++++++++- stacks-signer/src/tests/chainstate.rs | 143 +++++++++++++++++- stacks-signer/src/v0/signer.rs | 47 ++++-- stacks-signer/src/v1/signer.rs | 21 ++- stackslib/src/net/api/getsortition.rs | 3 + .../stacks-node/src/tests/bitcoin_regtest.rs | 8 + .../src/tests/nakamoto_integrations.rs | 27 +++- testnet/stacks-node/src/tests/signer/v0.rs | 7 +- 13 files changed, 465 insertions(+), 42 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index c9927b577e..9a59221b14 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -20,6 +20,7 @@ use std::net::{SocketAddr, TcpListener, TcpStream}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; +use std::time::SystemTime; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; @@ -111,7 +112,14 @@ pub enum SignerEvent { /// Status endpoint request StatusCheck, /// A new burn block event was received with the given burnchain block height - NewBurnBlock(u64), + NewBurnBlock { + /// the burn height for the newly processed burn block + burn_height: u64, + /// the burn hash for the newly processed burn block + burn_header_hash: BurnchainHeaderHash, + /// the time at which this event was received by the signer's event processor + received_time: SystemTime, + }, } /// Trait to implement a stop-signaler for the event receiver thread. @@ -516,7 +524,19 @@ fn process_new_burn_block_event( } let temp: TempBurnBlockEvent = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event = SignerEvent::NewBurnBlock(temp.burn_block_height); + let burn_header_hash = temp + .burn_block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + BurnchainHeaderHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + let event = SignerEvent::NewBurnBlock { + burn_height: temp.burn_block_height, + received_time: SystemTime::now(), + burn_header_hash, + }; if let Err(e) = request.respond(HttpResponse::empty(200u16)) { error!("Failed to respond to request: {:?}", &e); } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index ef7ae1aa03..b6872b8079 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -13,17 +13,45 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::time::Duration; + use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; +use blockstack_lib::util_lib::db::Error as DBError; +use clarity::types::chainstate::BurnchainHeaderHash; use slog::{slog_info, slog_warn}; use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; use stacks_common::util::hash::Hash160; use stacks_common::{info, warn}; use crate::client::{ClientError, StacksClient}; +use crate::config::SignerConfig; use crate::signerdb::SignerDb; +#[derive(thiserror::Error, Debug)] +/// Error type for the signer chainstate module +pub enum SignerChainstateError { + /// Error resulting from database interactions + #[error("Database error: {0}")] + DBError(DBError), + /// Error resulting from crate::client interactions + #[error("Client error: {0}")] + ClientError(ClientError), +} + +impl From for SignerChainstateError { + fn from(value: ClientError) -> Self { + Self::ClientError(value) + } +} + +impl From for SignerChainstateError { + fn from(value: DBError) -> Self { + Self::DBError(value) + } +} + /// Captures this signer's current view of a sortition's miner. #[derive(PartialEq, Eq, Debug)] pub enum SortitionMinerStatus { @@ -56,6 +84,26 @@ pub struct SortitionState { pub consensus_hash: ConsensusHash, /// what is this signer's view of the this sortition's miner? did they misbehave? pub miner_status: SortitionMinerStatus, + /// the timestamp in the burn block that performed this sortition + pub burn_header_timestamp: u64, + /// the burn header hash of the burn block that performed this sortition + pub burn_block_hash: BurnchainHeaderHash, +} + +/// Captures the configuration settings used by the signer when evaluating block proposals. +#[derive(Debug, Clone)] +pub struct ProposalEvalConfig { + /// How much time between the first block proposal in a tenure and the next bitcoin block + /// must pass before a subsequent miner isn't allowed to reorg the tenure + pub first_proposal_burn_block_timing: Duration, +} + +impl From<&SignerConfig> for ProposalEvalConfig { + fn from(value: &SignerConfig) -> Self { + Self { + first_proposal_burn_block_timing: value.first_proposal_burn_block_timing.clone(), + } + } } /// The signer's current view of the stacks chain's sortition @@ -68,6 +116,8 @@ pub struct SortitionsView { pub cur_sortition: SortitionState, /// the hash at which the sortitions view was fetched pub latest_consensus_hash: ConsensusHash, + /// configuration settings for evaluating proposals + pub config: ProposalEvalConfig, } impl TryFrom for SortitionState { @@ -85,6 +135,8 @@ impl TryFrom for SortitionState { parent_tenure_id: value .stacks_parent_ch .ok_or_else(|| ClientError::UnexpectedSortitionInfo)?, + burn_header_timestamp: value.burn_header_timestamp, + burn_block_hash: value.burn_block_hash, miner_status: SortitionMinerStatus::Valid, }) } @@ -112,7 +164,7 @@ impl SortitionsView { signer_db: &SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, - ) -> Result { + ) -> Result { let bitvec_all_1s = block.header.pox_treatment.iter().all(|entry| entry); if !bitvec_all_1s { warn!( @@ -203,8 +255,13 @@ impl SortitionsView { return Ok(false); } // now, we have to check if the parent tenure was a valid choice. - let is_valid_parent_tenure = - Self::check_parent_tenure_choice(proposed_by.state(), block, client)?; + let is_valid_parent_tenure = Self::check_parent_tenure_choice( + proposed_by.state(), + block, + signer_db, + client, + &self.config.first_proposal_burn_block_timing, + )?; if !is_valid_parent_tenure { return Ok(false); } @@ -251,8 +308,10 @@ impl SortitionsView { fn check_parent_tenure_choice( sortition_state: &SortitionState, block: &NakamotoBlock, + signer_db: &SignerDb, client: &StacksClient, - ) -> Result { + first_proposal_burn_block_timing: &Duration, + ) -> Result { // if the parent tenure is the last sortition, it is a valid choice. // if the parent tenure is a reorg, then all of the reorged sortitions // must either have produced zero blocks _or_ produced their first block @@ -277,9 +336,61 @@ impl SortitionsView { ); return Ok(false); } + + // this value *should* always be some, but try to do the best we can if it isn't + let sortition_state_received_time = + signer_db.get_burn_block_receive_time(&sortition_state.burn_block_hash)?; + for tenure in tenures_reorged.iter() { + if tenure.consensus_hash == sortition_state.parent_tenure_id { + // this was a built-upon tenure, no need to check this tenure as part of the reorg. + continue; + } + if tenure.first_block_mined.is_some() { - // TODO: must check if the first block was poorly timed. + let Some(local_block_info) = + signer_db.get_first_signed_block_in_tenure(&tenure.consensus_hash)? + else { + warn!( + "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks, and there is no local knowledge for that tenure's block timing."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %sortition_state.parent_tenure_id, + "last_sortition" => %sortition_state.prior_sortition, + "violating_tenure_id" => %tenure.consensus_hash, + "violating_tenure_first_block_id" => ?tenure.first_block_mined, + ); + return Ok(false); + }; + + let checked_proposal_timing = if let Some(sortition_state_received_time) = + sortition_state_received_time + { + // how long was there between when the proposal was received and the next sortition started? + let proposal_to_sortition = sortition_state_received_time + .saturating_sub(local_block_info.proposed_time); + if Duration::from_secs(proposal_to_sortition) + <= *first_proposal_burn_block_timing + { + info!( + "Miner is not building off of most recent tenure. A tenure they reorg has already mined blocks, but the block was poorly timed, allowing the reorg."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %sortition_state.parent_tenure_id, + "last_sortition" => %sortition_state.prior_sortition, + "violating_tenure_id" => %tenure.consensus_hash, + "violating_tenure_first_block_id" => ?tenure.first_block_mined, + "violating_tenure_proposed_time" => local_block_info.proposed_time, + "new_tenure_received_time" => sortition_state_received_time, + "new_tenure_burn_timestamp" => sortition_state.burn_header_timestamp, + ); + continue; + } + true + } else { + false + }; + warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks."; "proposed_block_consensus_hash" => %block.header.consensus_hash, @@ -288,6 +399,7 @@ impl SortitionsView { "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, "violating_tenure_first_block_id" => ?tenure.first_block_mined, + "checked_proposal_timing" => checked_proposal_timing, ); return Ok(false); } @@ -346,7 +458,10 @@ impl SortitionsView { } /// Fetch a new view of the recent sortitions - pub fn fetch_view(client: &StacksClient) -> Result { + pub fn fetch_view( + config: ProposalEvalConfig, + client: &StacksClient, + ) -> Result { let latest_state = client.get_latest_sortition()?; let latest_ch = latest_state.consensus_hash; @@ -383,6 +498,7 @@ impl SortitionsView { cur_sortition, last_sortition, latest_consensus_hash, + config, }) } } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 0bab72b287..e3e3dd1dc5 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -565,6 +565,7 @@ pub(crate) mod tests { tx_fee_ustx: config.tx_fee_ustx, max_tx_fee_ustx: config.max_tx_fee_ustx, db_path: config.db_path.clone(), + first_proposal_burn_block_timing: Duration::from_secs(30), } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index a7a92a5ad5..4b9970e38f 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -151,6 +151,9 @@ pub struct SignerConfig { pub max_tx_fee_ustx: Option, /// The path to the signer's database file pub db_path: PathBuf, + /// How much time between the first block proposal in a tenure and the next bitcoin block + /// must pass before a subsequent miner isn't allowed to reorg the tenure + pub first_proposal_burn_block_timing: Duration, } /// The parsed configuration for the signer @@ -190,6 +193,9 @@ pub struct GlobalConfig { pub db_path: PathBuf, /// Metrics endpoint pub metrics_endpoint: Option, + /// How much time between the first block proposal in a tenure and the next bitcoin block + /// must pass before a subsequent miner isn't allowed to reorg the tenure + pub first_proposal_burn_block_timing: Duration, } /// Internal struct for loading up the config file @@ -227,6 +233,9 @@ struct RawConfigFile { pub db_path: String, /// Metrics endpoint pub metrics_endpoint: Option, + /// How much time between the first block proposal in a tenure and the next bitcoin block + /// must pass before a subsequent miner isn't allowed to reorg the tenure + pub first_proposal_burn_block_timing_secs: Option, } impl RawConfigFile { @@ -298,6 +307,8 @@ impl TryFrom for GlobalConfig { let dkg_private_timeout = raw_data.dkg_private_timeout_ms.map(Duration::from_millis); let nonce_timeout = raw_data.nonce_timeout_ms.map(Duration::from_millis); let sign_timeout = raw_data.sign_timeout_ms.map(Duration::from_millis); + let first_proposal_burn_block_timing = + Duration::from_secs(raw_data.first_proposal_burn_block_timing_secs.unwrap_or(30)); let db_path = raw_data.db_path.into(); let metrics_endpoint = match raw_data.metrics_endpoint { @@ -331,6 +342,7 @@ impl TryFrom for GlobalConfig { auth_password: raw_data.auth_password, db_path, metrics_endpoint, + first_proposal_burn_block_timing, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 67dfbdd626..6795f0cfee 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -257,6 +257,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo key_ids, signer_entries, signer_slot_ids: signer_slot_ids.into_values().collect(), + first_proposal_burn_block_timing: self.config.first_proposal_burn_block_timing, ecdsa_private_key: self.config.ecdsa_private_key, stacks_private_key: self.config.stacks_private_key, node_host: self.config.node_host.to_string(), @@ -434,8 +435,8 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> } return None; } - } else if let Some(SignerEvent::NewBurnBlock(current_burn_block_height)) = event { - if let Err(e) = self.refresh_runloop(current_burn_block_height) { + } else if let Some(SignerEvent::NewBurnBlock { burn_height, .. }) = event { + if let Err(e) = self.refresh_runloop(burn_height) { error!("Failed to refresh signer runloop: {e}."); warn!("Signer may have an outdated view of the network."); } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index f4e5c7f8dc..582e2027d2 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -15,19 +15,22 @@ // along with this program. If not, see . use std::path::Path; +use std::time::SystemTime; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; use blockstack_lib::util_lib::db::{ query_row, sqlite_open, table_exists, u64_to_sql, Error as DBError, }; +use clarity::types::chainstate::BurnchainHeaderHash; +use clarity::util::get_epoch_time_secs; use libsigner::BlockProposal; use rusqlite::{params, Connection, Error as SqliteError, OpenFlags}; use serde::{Deserialize, Serialize}; -use slog::slog_debug; -use stacks_common::debug; +use slog::{slog_debug, slog_error}; use stacks_common::types::chainstate::ConsensusHash; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::{debug, error}; use wsts::net::NonceRequest; /// Additional Info about a proposed block @@ -47,6 +50,12 @@ pub struct BlockInfo { pub nonce_request: Option, /// Whether this block is already being signed over pub signed_over: bool, + /// Time at which the proposal was received by this signer (epoch time in seconds) + pub proposed_time: u64, + /// Time at which the proposal was signed by this signer (epoch time in seconds) + pub signed_self: Option, + /// Time at which the proposal was signed by a threshold in the signer set (epoch time in seconds) + pub signed_group: Option, } impl From for BlockInfo { @@ -59,6 +68,9 @@ impl From for BlockInfo { valid: None, nonce_request: None, signed_over: false, + proposed_time: get_epoch_time_secs(), + signed_self: None, + signed_group: None, } } } @@ -101,6 +113,7 @@ const CREATE_INDEXES: &str = " CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); CREATE INDEX IF NOT EXISTS blocks_valid ON blocks ((json_extract(block_info, '$.valid'))); +CREATE INDEX IF NOT EXISTS burn_blocks_height ON burn_blocks (block_height); "; const CREATE_SIGNER_STATE_TABLE: &str = " @@ -109,6 +122,13 @@ CREATE TABLE IF NOT EXISTS signer_states ( encrypted_state BLOB NOT NULL )"; +const CREATE_BURN_STATE_TABLE: &str = " +CREATE TABLE IF NOT EXISTS burn_blocks ( + block_hash TEXT PRIMARY KEY, + block_height INTEGER, + received_time INTEGER +)"; + impl SignerDb { /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -132,6 +152,10 @@ impl SignerDb { self.db.execute(CREATE_SIGNER_STATE_TABLE, NO_PARAMS)?; } + if !table_exists(&self.db, "burn_blocks")? { + self.db.execute(CREATE_BURN_STATE_TABLE, NO_PARAMS)?; + } + self.db.execute_batch(CREATE_INDEXES)?; Ok(()) @@ -197,7 +221,58 @@ impl SignerDb { try_deserialize(result) } - /// Insert a block into the database. + /// Return the first signed block in a tenure (identified by its consensus hash) + pub fn get_first_signed_block_in_tenure( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height ASC LIMIT 1"; + let result: Option = query_row(&self.db, query, &[tenure])?; + + try_deserialize(result) + } + + /// Insert or replace a burn block into the database + pub fn insert_burn_block( + &mut self, + burn_hash: &BurnchainHeaderHash, + burn_height: u64, + received_time: &SystemTime, + ) -> Result<(), DBError> { + let received_ts = received_time + .duration_since(std::time::UNIX_EPOCH) + .map_err(|e| DBError::Other(format!("Bad system time: {e}")))? + .as_secs(); + debug!("Inserting burn block info"; "burn_block_height" => burn_height, "burn_hash" => %burn_hash, "received" => received_ts); + self.db.execute( + "INSERT OR REPLACE INTO burn_blocks (block_hash, block_height, received_time) VALUES (?1, ?2, ?3)", + params![ + burn_hash, + u64_to_sql(burn_height)?, + u64_to_sql(received_ts)?, + ], + )?; + Ok(()) + } + + /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcheer by this signer + /// if that burn block has been received. + pub fn get_burn_block_receive_time( + &self, + burn_hash: &BurnchainHeaderHash, + ) -> Result, DBError> { + let query = "SELECT received_time FROM burn_blocks WHERE block_hash = ? LIMIT 1"; + let Some(receive_time_i64) = query_row::(&self.db, query, &[burn_hash])? else { + return Ok(None); + }; + let receive_time = u64::try_from(receive_time_i64).map_err(|e| { + error!("Failed to parse db received_time as u64: {e}"); + DBError::Corruption + })?; + Ok(Some(receive_time)) + } + + /// Insert or replace a block into the database. /// `hash` is the `signer_signature_hash` of the block. pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { let block_json = diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 67fa55ace5..f99fde9952 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -15,15 +15,22 @@ use std::fs; use std::net::{Ipv4Addr, SocketAddrV4}; +use std::time::{Duration, SystemTime}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::chainstate::stacks::{ - SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangeCause, - TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionPostConditionMode, TransactionPublicKeyEncoding, TransactionSpendingCondition, - TransactionVersion, + CoinbasePayload, SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, + TenureChangeCause, TenureChangePayload, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionPostConditionMode, TransactionPublicKeyEncoding, + TransactionSpendingCondition, TransactionVersion, }; +use blockstack_lib::net::api::get_tenures_fork_info::TenureForkingInfo; +use clarity::types::chainstate::{BurnchainHeaderHash, SortitionId}; +use clarity::util::vrf::VRFProof; +use libsigner::BlockProposal; +use slog::slog_info; use stacks_common::bitvec::BitVec; +use stacks_common::info; use stacks_common::types::chainstate::{ ConsensusHash, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, }; @@ -31,9 +38,14 @@ use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; -use crate::chainstate::{SortitionMinerStatus, SortitionState, SortitionsView}; +use crate::chainstate::{ + ProposalEvalConfig, SignerChainstateError, SortitionMinerStatus, SortitionState, SortitionsView, +}; +use crate::client::tests::MockServerClient; use crate::client::StacksClient; -use crate::signerdb::SignerDb; +use crate::signerdb::{BlockInfo, SignerDb}; + +static TEST_FIRST_PROPOSAL_BURN_BLOCK_TIMING: Duration = Duration::from_secs(30); fn setup_test_environment( fn_name: &str, @@ -55,6 +67,8 @@ fn setup_test_environment( parent_tenure_id: ConsensusHash([0; 20]), consensus_hash: ConsensusHash([1; 20]), miner_status: SortitionMinerStatus::Valid, + burn_header_timestamp: 2, + burn_block_hash: BurnchainHeaderHash([1; 32]), }; let last_sortition = Some(SortitionState { @@ -64,12 +78,17 @@ fn setup_test_environment( parent_tenure_id: ConsensusHash([128; 20]), consensus_hash: ConsensusHash([0; 20]), miner_status: SortitionMinerStatus::Valid, + burn_header_timestamp: 1, + burn_block_hash: BurnchainHeaderHash([0; 32]), }); let view = SortitionsView { latest_consensus_hash: cur_sortition.consensus_hash, cur_sortition, last_sortition, + config: ProposalEvalConfig { + first_proposal_burn_block_timing: TEST_FIRST_PROPOSAL_BURN_BLOCK_TIMING.clone(), + }, }; let stacks_client = StacksClient::new( @@ -136,6 +155,118 @@ fn check_proposal_miner_pkh_mismatch() { .unwrap()); } +fn reorg_timing_testing( + test_name: &str, + sortition_timing_secs: u64, +) -> Result { + let (_stacks_client, mut signer_db, block_pk, mut view, mut block) = + setup_test_environment(test_name); + + view.cur_sortition.parent_tenure_id = view.last_sortition.as_ref().unwrap().parent_tenure_id; + block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.txs.push(StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), + TransactionPayload::TenureChange(TenureChangePayload { + tenure_consensus_hash: view.cur_sortition.consensus_hash, + prev_tenure_consensus_hash: view.cur_sortition.parent_tenure_id, + burn_view_consensus_hash: view.cur_sortition.consensus_hash, + previous_tenure_end: block.header.parent_block_id, + previous_tenure_blocks: 10, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&block_pk), + }), + )); + block.txs.push(StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, Some(VRFProof::empty())), + )); + + let last_sortition = view.last_sortition.as_ref().unwrap(); + + let expected_result = vec![ + TenureForkingInfo { + burn_block_hash: last_sortition.burn_block_hash, + burn_block_height: 2, + sortition_id: SortitionId([2; 32]), + parent_sortition_id: SortitionId([1; 32]), + consensus_hash: last_sortition.consensus_hash, + was_sortition: true, + first_block_mined: Some(StacksBlockId([1; 32])), + }, + TenureForkingInfo { + burn_block_hash: BurnchainHeaderHash([128; 32]), + burn_block_height: 1, + sortition_id: SortitionId([1; 32]), + parent_sortition_id: SortitionId([0; 32]), + consensus_hash: view.cur_sortition.parent_tenure_id, + was_sortition: true, + first_block_mined: Some(StacksBlockId([2; 32])), + }, + ]; + + let block_proposal_1 = BlockProposal { + block: NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: last_sortition.consensus_hash, + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 11, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }, + burn_height: 2, + reward_cycle: 1, + }; + let mut block_info_1 = BlockInfo::from(block_proposal_1); + block_info_1.signed_over = true; + signer_db.insert_block(&block_info_1).unwrap(); + + let sortition_time = SystemTime::UNIX_EPOCH + + Duration::from_secs(block_info_1.proposed_time + sortition_timing_secs); + signer_db + .insert_burn_block(&view.cur_sortition.burn_block_hash, 3, &sortition_time) + .unwrap(); + + let MockServerClient { server, client, .. } = MockServerClient::new(); + let h = std::thread::spawn(move || view.check_proposal(&client, &signer_db, &block, &block_pk)); + + crate::client::tests::write_response( + server, + format!("HTTP/1.1 200 Ok\n\n{}", serde_json::json!(expected_result)).as_bytes(), + ); + + let result = h.join().unwrap(); + info!("Result: {result:?}"); + result +} + +#[test] +fn check_proposal_reorg_timing_bad() { + let result = reorg_timing_testing( + "reorg_timing_bad", + TEST_FIRST_PROPOSAL_BURN_BLOCK_TIMING.as_secs() + 1, + ); + assert!(!result.unwrap(), "Proposal should not validate, because the reorg occurred in a block whose proposed time was long enough before the sortition"); +} + +#[test] +fn check_proposal_reorg_timing_ok() { + let result = reorg_timing_testing( + "reorg_timing_okay", + TEST_FIRST_PROPOSAL_BURN_BLOCK_TIMING.as_secs(), + ); + assert!(result.unwrap(), "Proposal should validate okay, because the reorg occurred in a block whose proposed time was close to the sortition"); +} + #[test] fn check_proposal_invalid_status() { let (stacks_client, signer_db, block_pk, mut view, mut block) = diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index ce37c0a522..661e34d5ea 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -26,7 +26,7 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; -use crate::chainstate::SortitionsView; +use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerResult}; @@ -52,6 +52,8 @@ pub struct Signer { pub reward_cycle: u64, /// SignerDB for state management pub signer_db: SignerDb, + /// Configuration for proposal evaluation + pub proposal_config: ProposalEvalConfig, } impl std::fmt::Display for Signer { @@ -89,7 +91,7 @@ impl SignerTrait for Signer { // and the vec could be heterogenous, so, don't differentiate. Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) - | Some(SignerEvent::NewBurnBlock(_)) + | Some(SignerEvent::NewBurnBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -146,8 +148,23 @@ impl SignerTrait for Signer { SignerEvent::StatusCheck => { debug!("{self}: Received a status check event."); } - SignerEvent::NewBurnBlock(height) => { - debug!("{self}: Receved a new burn block event for block height {height}"); + SignerEvent::NewBurnBlock { + burn_height, + burn_header_hash, + received_time, + } => { + debug!("{self}: Receved a new burn block event for block height {burn_height}"); + if let Err(e) = + self.signer_db + .insert_burn_block(burn_header_hash, *burn_height, received_time) + { + warn!( + "Failed to write burn block event to signerdb"; + "err" => ?e, + "burn_header_hash" => %burn_header_hash, + "burn_height" => burn_height + ); + } *sortition_state = None; } } @@ -184,7 +201,7 @@ impl From for Signer { ); let signer_db = SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); - + let proposal_config = ProposalEvalConfig::from(&signer_config); Self { private_key: signer_config.stacks_private_key, stackerdb, @@ -198,6 +215,7 @@ impl From for Signer { signer_slot_ids: signer_config.signer_slot_ids.clone(), reward_cycle: signer_config.reward_cycle, signer_db, + proposal_config, } } } @@ -279,15 +297,16 @@ impl Signer { // Get sortition view if we don't have it if sortition_state.is_none() { - *sortition_state = SortitionsView::fetch_view(stacks_client) - .inspect_err(|e| { - warn!( - "{self}: Failed to update sortition view: {e:?}"; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ) - }) - .ok(); + *sortition_state = + SortitionsView::fetch_view(self.proposal_config.clone(), stacks_client) + .inspect_err(|e| { + warn!( + "{self}: Failed to update sortition view: {e:?}"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ) + }) + .ok(); } // Check if proposal can be rejected now if not valid against sortition view diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 8eff7f5fbe..4b9f2b18f1 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -172,7 +172,7 @@ impl SignerTrait for Signer { // Block proposal events do have reward cycles, but each proposal has its own cycle, // and the vec could be heterogenous, so, don't differentiate. Some(SignerEvent::MinerMessages(..)) - | Some(SignerEvent::NewBurnBlock(_)) + | Some(SignerEvent::NewBurnBlock { .. }) | Some(SignerEvent::StatusCheck) | None => None, Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), @@ -239,8 +239,23 @@ impl SignerTrait for Signer { SignerEvent::StatusCheck => { debug!("{self}: Received a status check event.") } - SignerEvent::NewBurnBlock(height) => { - debug!("{self}: Receved a new burn block event for block height {height}"); + SignerEvent::NewBurnBlock { + burn_height, + burn_header_hash, + received_time, + } => { + debug!("{self}: Receved a new burn block event for block height {burn_height}"); + if let Err(e) = + self.signer_db + .insert_burn_block(burn_header_hash, *burn_height, received_time) + { + warn!( + "Failed to write burn block event to signerdb"; + "err" => ?e, + "burn_header_hash" => %burn_header_hash, + "burn_height" => burn_height + ); + } } } } diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index d7600a7c4f..ce17b5427b 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -64,6 +64,8 @@ pub struct SortitionInfo { pub burn_block_hash: BurnchainHeaderHash, /// The burn height of the block that triggered this event. pub burn_block_height: u64, + /// The burn block time of the sortition + pub burn_header_timestamp: u64, /// This sortition ID of the block that triggered this event. This incorporates /// PoX forking information and the burn block hash to obtain an identifier that is /// unique across PoX forks and burnchain forks. @@ -256,6 +258,7 @@ impl RPCRequestHandler for GetSortitionHandler { Ok(SortitionInfo { burn_block_hash: sortition_sn.burn_header_hash, burn_block_height: sortition_sn.block_height, + burn_header_timestamp: sortition_sn.burn_header_timestamp, sortition_id: sortition_sn.sortition_id, parent_sortition_id: sortition_sn.parent_sortition_id, consensus_hash: sortition_sn.consensus_hash, diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 5f8b1aabd3..3fbfa51986 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -21,6 +21,14 @@ pub enum BitcoinCoreError { SpawnFailed(String), } +impl std::fmt::Display for BitcoinCoreError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), + } + } +} + type BitcoinResult = Result; pub struct BitcoinCoreController { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8570b0aeff..4a42220663 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -71,6 +71,7 @@ use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; +use stacks::util::get_epoch_time_secs; use stacks::util::hash::hex_bytes; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -88,7 +89,7 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; -use stacks_signer::chainstate::SortitionsView; +use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, SignerDb}; use wsts::net::Message; @@ -4542,7 +4543,11 @@ fn signer_chainstate() { ) .unwrap(); - let sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); + // this config disallows any reorg due to poorly timed block commits + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + }; + let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); // check the prior tenure's proposals again, confirming that the sortitions_view // will reject them. @@ -4604,6 +4609,9 @@ fn signer_chainstate() { valid: Some(true), nonce_request: None, signed_over: true, + proposed_time: get_epoch_time_secs(), + signed_self: None, + signed_group: None, }) .unwrap(); @@ -4648,7 +4656,11 @@ fn signer_chainstate() { ); // force the view to refresh and check again - let sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); + // this config disallows any reorg due to poorly timed block commits + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + }; + let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let valid = sortitions_view .check_proposal( &signer_client, @@ -4672,6 +4684,9 @@ fn signer_chainstate() { valid: Some(true), nonce_request: None, signed_over: true, + proposed_time: get_epoch_time_secs(), + signed_self: None, + signed_group: None, }) .unwrap(); @@ -4707,7 +4722,11 @@ fn signer_chainstate() { txs: vec![], }; - let mut sortitions_view = SortitionsView::fetch_view(&signer_client).unwrap(); + // this config disallows any reorg due to poorly timed block commits + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + }; + let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); assert!( !sortitions_view diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6c589d9b41..3e3e37e9d7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -32,7 +32,7 @@ use stacks::types::PublicKey; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; -use stacks_signer::chainstate::SortitionsView; +use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; @@ -262,7 +262,10 @@ fn block_proposal_rejection() { info!("------------------------- Send Block Proposal To Signers -------------------------"); let reward_cycle = signer_test.get_current_reward_cycle(); - let view = SortitionsView::fetch_view(&signer_test.stacks_client).unwrap(); + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + }; + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), txs: vec![], From 9578d5e89f6948afbb9aa6a293c747d250f59346 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 10 Jul 2024 16:56:25 -0400 Subject: [PATCH 0496/1400] chore: address PR feedback and get remaining integration tests to pass --- stackslib/src/chainstate/burn/db/sortdb.rs | 38 +++++++++++- .../stacks-node/src/nakamoto_node/miner.rs | 50 ++++++++-------- .../stacks-node/src/nakamoto_node/relayer.rs | 59 +------------------ .../src/tests/nakamoto_integrations.rs | 29 ++++++++- 4 files changed, 91 insertions(+), 85 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 6616ed6183..741cc2ec6b 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -1845,7 +1845,43 @@ impl<'a> SortitionHandleTx<'a> { ).optional()?; if let Some((cur_ch, cur_bhh, cur_height)) = current_sortition_tip { - let will_replace = cur_height < stacks_block_height; + let will_replace = if cur_height < stacks_block_height { + true + } else if cur_height > stacks_block_height { + false + } else { + if &cur_ch == consensus_hash { + // same sortition (i.e. nakamoto block) + // pick the one with the lexicographically-lesser block hash + cur_bhh + .0 + .iter() + .zip(stacks_block_hash.0.iter()) + .find_map(|(cur_bhh_byte, accepted_bhh_byte)| { + if cur_bhh_byte < accepted_bhh_byte { + // current is "earlier", so don't replace + Some(false) + } else if cur_bhh_byte > accepted_bhh_byte { + // current is "later", so replace + Some(true) + } else { + None + } + }) + // if somehow the block hashes are also the same, then don't replace + .unwrap_or(false) + } else { + // tips come from different sortitions + // break ties by going with the latter-signed block + let sn_current = SortitionDB::get_block_snapshot_consensus(self, &cur_ch)? + .ok_or(db_error::NotFoundError)?; + let sn_accepted = + SortitionDB::get_block_snapshot_consensus(self, &consensus_hash)? + .ok_or(db_error::NotFoundError)?; + sn_current.block_height < sn_accepted.block_height + } + }; + debug!("Setting Stacks tip as accepted"; "replace?" => will_replace, "current_tip_consensus_hash" => %cur_ch, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ba3a2d3be7..372372c51f 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -725,8 +725,7 @@ impl BlockMinerThread { // TODO: add tests from mutation testing results #4869 #[cfg_attr(test, mutants::skip)] /// Load up the parent block info for mining. - /// If there's no parent because this is the first block, then return the genesis block's info. - /// If we can't find the parent in the DB but we expect one, return None. + /// If we can't find the parent in the DB but we expect one, return Err(ParentNotFound). fn load_block_parent_info( &self, burn_db: &mut SortitionDB, @@ -741,27 +740,27 @@ impl BlockMinerThread { "Stacks block parent ID is last mined block {}", &block.block_id() ); - NakamotoChainState::get_block_header(chain_state.db(), &block.block_id()) + let stacks_block_id = block.block_id(); + NakamotoChainState::get_block_header(chain_state.db(), &stacks_block_id) .map_err(|e| { error!( "Could not query header info for last-mined block ID {}: {:?}", - &block.block_id(), - &e + &stacks_block_id, &e ); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { - error!( - "No header info for last-mined block ID {}", - &block.block_id() - ); + error!("No header for parent tenure ID {}", &stacks_block_id); NakamotoNodeError::ParentNotFound })? } else { + // no mined blocks yet test_debug!( "Stacks block parent ID is last block in parent tenure ID {}", &self.parent_tenure_id ); + + // find the last block in the parent tenure, since this is the tip we'll build atop let parent_tenure_header = NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) .map_err(|e| { @@ -802,21 +801,24 @@ impl BlockMinerThread { "Stacks block parent ID may be an epoch2x block: {}", &self.parent_tenure_id ); - NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) - .map_err(|e| { - error!( - "Could not query header info for epoch2x tenure block ID {}: {:?}", - &self.parent_tenure_id, &e - ); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!( - "No header info for epoch2x tenure block ID {}", - &self.parent_tenure_id - ); - NakamotoNodeError::ParentNotFound - })? + let epoch2_header = + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header info for epoch2x tenure block ID {}: {:?}", + &self.parent_tenure_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for epoch2x tenure block ID {}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })?; + + epoch2_header } }; diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 807eb797cf..a614043472 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -390,62 +390,6 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB") .expect("FATAL: unknown consensus hash"); - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical stacks tip: {:?}", &e); - NakamotoNodeError::ParentNotFound - })?; - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); - - let ongoing_tenure_consensus_hash = if let Some(ongoing_tenure) = - NakamotoChainState::get_ongoing_tenure(&mut self.chainstate.index_conn(), &stacks_tip) - .map_err(|e| { - error!( - "Failed to get ongoing tenure off of {}: {:?}", - &stacks_tip, &e - ); - NakamotoNodeError::ParentNotFound - })? { - ongoing_tenure.tenure_id_consensus_hash - } else if let Some(header) = - StacksChainState::get_stacks_block_header_info_by_index_block_hash( - self.chainstate.db(), - &stacks_tip, - ) - .map_err(|e| { - error!( - "Failed to get stacks 2.x block header for {}: {:?}", - &stacks_tip, &e - ); - NakamotoNodeError::ParentNotFound - })? - { - header.consensus_hash - } else { - error!("Could not deduce ongoing tenure"); - return Err(NakamotoNodeError::ParentNotFound); - }; - - let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( - &mut self.chainstate.index_conn(), - &stacks_tip, - &ongoing_tenure_consensus_hash, - ) - .map_err(|e| { - error!( - "Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", - &stacks_tip, &e - ); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!( - "Relayer: Failed to find tenure-start block header for stacks tip {}", - &stacks_tip - ); - NakamotoNodeError::ParentNotFound - })?; - self.globals.set_last_sortition(sn.clone()); let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); @@ -457,7 +401,6 @@ impl RelayerThread { "burn_height" => sn.block_height, "winning_txid" => %sn.winning_block_txid, "committed_parent" => %committed_index_hash, - "last_tenure_start_id" => %highest_tenure_start_block_header.index_block_hash(), "won_sortition?" => won_sortition, ); @@ -468,7 +411,7 @@ impl RelayerThread { let directive = if sn.sortition { if won_sortition { MinerDirective::BeginTenure { - parent_tenure_start: highest_tenure_start_block_header.index_block_hash(), + parent_tenure_start: committed_index_hash, burnchain_tip: sn, } } else { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 562e7c9fa8..50f40a514b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3443,6 +3443,7 @@ fn forked_tenure_is_ignored() { // Now let's produce a second block for tenure C and ensure it builds off of block C. let blocks_before = mined_blocks.load(Ordering::SeqCst); let start_time = Instant::now(); + // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = @@ -4073,10 +4074,11 @@ fn nakamoto_attempt_time() { // ----- Setup boilerplate finished, test block proposal API endpoint ----- - let mut sender_nonce = 0; let tenure_count = 2; let inter_blocks_per_tenure = 3; + info!("Begin subtest 1"); + // Subtest 1 // Mine nakamoto tenures with a few transactions // Blocks should be produced at least every 20 seconds @@ -4089,7 +4091,9 @@ fn nakamoto_attempt_time() { let mut last_tip_height = 0; // mine the interim blocks - for _ in 0..inter_blocks_per_tenure { + for tenure_count in 0..inter_blocks_per_tenure { + debug!("nakamoto_attempt_time: begin tenure {}", tenure_count); + let blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") @@ -4099,6 +4103,17 @@ fn nakamoto_attempt_time() { let tx_fee = 500; let amount = 500; + let account = loop { + // submit a tx so that the miner will mine an extra block + let Ok(account) = get_account_result(&http_origin, &sender_addr) else { + debug!("nakamoto_attempt_time: Failed to load miner account"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + break account; + }; + + let mut sender_nonce = account.nonce; for _ in 0..txs_per_block { let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, tx_fee, &recipient, amount); @@ -4142,6 +4157,8 @@ fn nakamoto_attempt_time() { } } + info!("Begin subtest 2"); + // Subtest 2 // Confirm that no blocks are mined if there are no transactions for _ in 0..2 { @@ -4168,6 +4185,8 @@ fn nakamoto_attempt_time() { assert_eq!(info.stacks_tip_height, info_before.stacks_tip_height); } + info!("Begin subtest 3"); + // Subtest 3 // Add more than `nakamoto_attempt_time_ms` worth of transactions into mempool // Multiple blocks should be mined @@ -4198,10 +4217,16 @@ fn nakamoto_attempt_time() { if tx_count >= tx_limit { break 'submit_txs; } + info!( + "nakamoto_times_ms: on account {}; sent {} txs so far (out of {})", + acct_idx, tx_count, tx_limit + ); } acct_idx += 1; } + info!("Subtest 3 sent all transactions"); + // Make sure that these transactions *could* fit into a single block assert!(tx_total_size < MAX_BLOCK_LEN as usize); From a6a1439d68a7b16149be3741db76e2facbff996c Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Thu, 11 Jul 2024 12:20:59 +0900 Subject: [PATCH 0497/1400] chore: update lib.rs libary -> library --- stacks-signer/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 2310fce938..15c0a25c3d 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -1,6 +1,6 @@ #![forbid(missing_docs)] /*! -# stacks-signer: a libary for creating a Stacks compliant signer. A default implementation binary is also provided. +# stacks-signer: a library for creating a Stacks compliant signer. A default implementation binary is also provided. Usage documentation can be found in the [README](https://github.com/Trust-Machines/core-eng/stacks-signer-api/README.md). */ From 4af9dc0d92f7b765f4cc62c7ffef6a670c5b75e1 Mon Sep 17 00:00:00 2001 From: haouvw Date: Thu, 11 Jul 2024 15:07:43 +0800 Subject: [PATCH 0498/1400] chore: fix some comments Signed-off-by: haouvw --- clarity/src/vm/docs/mod.rs | 2 +- stacks-signer/src/v0/signer.rs | 2 +- stacks-signer/src/v1/signer.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 0c660d7e67..a2d12e2128 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2187,7 +2187,7 @@ const MINT_TOKEN: SpecialAPI = SpecialAPI { type defined using `define-fungible-token`. The increased token balance is _not_ transfered from another principal, but rather minted. -If a non-positive amount is provided to mint, this function returns `(err 1)`. Otherwise, on successfuly mint, it +If a non-positive amount is provided to mint, this function returns `(err 1)`. Otherwise, on successfully mint, it returns `(ok true)`. If this call would result in more supplied tokens than defined by the total supply in `define-fungible-token`, then a `SupplyOverflow` runtime error is thrown. ", diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index ce37c0a522..d42e73377e 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -86,7 +86,7 @@ impl SignerTrait for Signer { ) { let event_parity = match event { // Block proposal events do have reward cycles, but each proposal has its own cycle, - // and the vec could be heterogenous, so, don't differentiate. + // and the vec could be heterogeneous, so, don't differentiate. Some(SignerEvent::BlockValidationResponse(_)) | Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock(_)) diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 8eff7f5fbe..b2d06bf7f0 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -170,7 +170,7 @@ impl SignerTrait for Signer { let event_parity = match event { Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), // Block proposal events do have reward cycles, but each proposal has its own cycle, - // and the vec could be heterogenous, so, don't differentiate. + // and the vec could be heterogeneous, so, don't differentiate. Some(SignerEvent::MinerMessages(..)) | Some(SignerEvent::NewBurnBlock(_)) | Some(SignerEvent::StatusCheck) From 96eb3229f69445a09ccdab60a7a9a51bf6807807 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 11 Jul 2024 10:24:26 -0400 Subject: [PATCH 0499/1400] fix: don't tie-break nakamoto blocks using block hash; just go with whatever was stored first (doing anything else will break malleablized block tests) --- stackslib/src/chainstate/burn/db/sortdb.rs | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 741cc2ec6b..15a3bf5641 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -1852,24 +1852,8 @@ impl<'a> SortitionHandleTx<'a> { } else { if &cur_ch == consensus_hash { // same sortition (i.e. nakamoto block) - // pick the one with the lexicographically-lesser block hash - cur_bhh - .0 - .iter() - .zip(stacks_block_hash.0.iter()) - .find_map(|(cur_bhh_byte, accepted_bhh_byte)| { - if cur_bhh_byte < accepted_bhh_byte { - // current is "earlier", so don't replace - Some(false) - } else if cur_bhh_byte > accepted_bhh_byte { - // current is "later", so replace - Some(true) - } else { - None - } - }) - // if somehow the block hashes are also the same, then don't replace - .unwrap_or(false) + // no replacement + false } else { // tips come from different sortitions // break ties by going with the latter-signed block From 4f757a4b9c0df210e2097d5670a3a624804fdacb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 11 Jul 2024 10:34:36 -0400 Subject: [PATCH 0500/1400] chore: remove ways to address blocks by sighash, and use block hash instead --- stackslib/src/chainstate/nakamoto/mod.rs | 14 ++++++------ .../src/chainstate/nakamoto/staging_blocks.rs | 19 +++++++--------- .../src/chainstate/nakamoto/tests/mod.rs | 22 +++++++++---------- 3 files changed, 26 insertions(+), 29 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 493ff0bca1..a7c1e2ba19 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2185,7 +2185,7 @@ impl NakamotoChainState { obtain_method: NakamotoBlockObtainMethod, ) -> Result { let block_id = block.block_id(); - let sighash = block.header.signer_signature_hash(); + let block_hash = block.header.block_hash(); // case 1 -- no block with this sighash exists. if staging_db_tx.try_store_block_with_new_signer_sighash( @@ -2196,22 +2196,22 @@ impl NakamotoChainState { )? { debug!("Stored block with new sighash"; "block_id" => %block_id, - "sighash" => %sighash); + "block_hash" => %block_hash); return Ok(true); } // case 2 -- the block exists. Consider replacing it, but only if its // signing weight is higher. - let (existing_block_id, _processed, orphaned, existing_signing_weight) = staging_db_tx.conn().get_block_processed_and_signed_weight(&block.header.consensus_hash, &sighash)? + let (existing_block_id, _processed, orphaned, existing_signing_weight) = staging_db_tx.conn().get_block_processed_and_signed_weight(&block.header.consensus_hash, &block_hash)? .ok_or_else(|| { // this should be unreachable -- there's no record of this block - error!("Could not store block {} ({}) with sighash {} -- no record of its processed status or signing weight!", &block_id, &block.header.consensus_hash, &sighash); + error!("Could not store block {} ({}) with block hash {} -- no record of its processed status or signing weight!", &block_id, &block.header.consensus_hash, &block_hash); ChainstateError::NoSuchBlockError })?; if orphaned { // nothing to do - debug!("Will not store alternative copy of block {} ({}) with sighash {}, since a block with the same sighash was orphaned", &block_id, &block.header.consensus_hash, &sighash); + debug!("Will not store alternative copy of block {} ({}) with block hash {}, since a block with the same block hash was orphaned", &block_id, &block.header.consensus_hash, &block_hash); return Ok(false); } @@ -2220,12 +2220,12 @@ impl NakamotoChainState { debug!("Replaced block"; "existing_block_id" => %existing_block_id, "block_id" => %block_id, - "sighash" => %sighash, + "block_hash" => %block_hash, "existing_signing_weight" => existing_signing_weight, "signing_weight" => signing_weight); true } else { - debug!("Will not store alternative copy of block {} ({}) with sighash {}, since it has less signing power", &block_id, &block.header.consensus_hash, &sighash); + debug!("Will not store alternative copy of block {} ({}) with block hash {}, since it has less signing power", &block_id, &block.header.consensus_hash, &block_hash); false }; diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 0185c608d3..88e1744bb6 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -22,9 +22,8 @@ use lazy_static::lazy_static; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError, ToSql}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension}; -use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; -use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; @@ -252,10 +251,10 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { pub(crate) fn get_block_processed_and_signed_weight( &self, consensus_hash: &ConsensusHash, - signer_sighash: &Sha512Trunc256Sum, + block_hash: &BlockHeaderHash, ) -> Result, ChainstateError> { let sql = "SELECT index_block_hash,processed,orphaned,signing_weight FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2 ORDER BY signing_weight DESC, index_block_hash LIMIT 1"; - let args = params![consensus_hash, signer_sighash]; + let args = params![consensus_hash, block_hash]; let mut stmt = self.deref().prepare(sql)?; Ok(stmt @@ -528,14 +527,14 @@ impl<'a> NakamotoStagingBlocksTx<'a> { /// Do we have a block with the given signer sighash? /// NOTE: the block hash and sighash are the same for Nakamoto blocks - pub(crate) fn has_nakamoto_block_with_signer_sighash( + pub(crate) fn has_nakamoto_block_with_block_hash( &self, consensus_hash: &ConsensusHash, - signer_sighash: &Sha512Trunc256Sum, + block_hash: &BlockHeaderHash, ) -> Result { let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; - let args = rusqlite::params![consensus_hash, signer_sighash]; + let args = rusqlite::params![consensus_hash, block_hash]; let present: Option = query_row(self, qry, args)?; Ok(present.is_some()) } @@ -551,10 +550,8 @@ impl<'a> NakamotoStagingBlocksTx<'a> { signing_weight: u32, obtain_method: NakamotoBlockObtainMethod, ) -> Result { - let signer_sighash = block.header.signer_signature_hash(); - if self - .has_nakamoto_block_with_signer_sighash(&block.header.consensus_hash, &signer_sighash)? - { + let block_hash = block.header.block_hash(); + if self.has_nakamoto_block_with_block_hash(&block.header.consensus_hash, &block_hash)? { return Ok(false); } self.store_block(block, burn_attachable, signing_weight, obtain_method)?; diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 6890eb0ec9..b96ed86f03 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1111,7 +1111,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .conn() .get_block_processed_and_signed_weight( &nakamoto_header_3.consensus_hash, - &nakamoto_header_3.signer_signature_hash() + &nakamoto_header_3.block_hash(), ) .unwrap() .unwrap(), @@ -1141,7 +1141,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .conn() .get_block_processed_and_signed_weight( &nakamoto_header_3.consensus_hash, - &nakamoto_header_3.signer_signature_hash() + &nakamoto_header_3.block_hash(), ) .unwrap() .unwrap(), @@ -1201,7 +1201,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .nakamoto_blocks_db() .get_block_processed_and_signed_weight( &nakamoto_header.consensus_hash, - &nakamoto_header.signer_signature_hash() + &nakamoto_header.block_hash(), ) .unwrap() .unwrap(), @@ -1225,7 +1225,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .nakamoto_blocks_db() .get_block_processed_and_signed_weight( &nakamoto_header.consensus_hash, - &nakamoto_header_2.signer_signature_hash() + &nakamoto_header_2.block_hash(), ) .unwrap() .unwrap(), @@ -1250,7 +1250,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .nakamoto_blocks_db() .get_block_processed_and_signed_weight( &nakamoto_header_3_weight_2.consensus_hash, - &nakamoto_header_3_weight_2.signer_signature_hash() + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), @@ -1292,7 +1292,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .conn() .get_block_processed_and_signed_weight( &nakamoto_header_3_weight_2.consensus_hash, - &nakamoto_header_3_weight_2.signer_signature_hash() + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), @@ -1323,7 +1323,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .conn() .get_block_processed_and_signed_weight( &nakamoto_header_3.consensus_hash, - &nakamoto_header_3.signer_signature_hash() + &nakamoto_header_3.block_hash() ) .unwrap() .unwrap(), @@ -1356,7 +1356,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .conn() .get_block_processed_and_signed_weight( &nakamoto_header_3_weight_2.consensus_hash, - &nakamoto_header_3_weight_2.signer_signature_hash() + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), @@ -1438,7 +1438,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .conn() .get_block_processed_and_signed_weight( &nakamoto_header_3_weight_2.consensus_hash, - &nakamoto_header_3_weight_2.signer_signature_hash() + &nakamoto_header_3_weight_2.block_hash() ) .unwrap() .unwrap(), @@ -1481,7 +1481,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .conn() .get_block_processed_and_signed_weight( &nakamoto_header_3.consensus_hash, - &nakamoto_header_3.signer_signature_hash() + &nakamoto_header_3.block_hash() ) .unwrap() .unwrap(), @@ -1510,7 +1510,7 @@ pub fn test_load_store_update_nakamoto_blocks() { .conn() .get_block_processed_and_signed_weight( &nakamoto_header.consensus_hash, - &nakamoto_header.signer_signature_hash() + &nakamoto_header.block_hash() ) .unwrap() .unwrap(), From 74cc300d1ef2f293b00b0a065ac6939c744f9d15 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 11 Jul 2024 13:24:16 -0400 Subject: [PATCH 0501/1400] fix: temporarily skip some mutant checks --- stackslib/src/chainstate/nakamoto/mod.rs | 1 + stackslib/src/net/p2p.rs | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index a7c1e2ba19..0dfcedb10d 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -741,6 +741,7 @@ impl NakamotoBlockHeader { /// /// Returns the signing weight on success. /// Returns ChainstateError::InvalidStacksBlock on error + #[cfg_attr(test, mutants::skip)] pub fn verify_signer_signatures(&self, reward_set: &RewardSet) -> Result { let message = self.signer_signature_hash(); let Some(signers) = &reward_set.signers else { diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 6cd160f9e5..ac4bbe28e4 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4772,6 +4772,11 @@ impl PeerNetwork { } /// Refresh our view of the last three reward cycles + /// This ensures that the PeerNetwork has cached copies of the reward cycle data (including the + /// signing set) for the current, previous, and previous-previous reward cycles. This data is + /// in turn consumed by the Nakamoto block downloader, which must validate blocks signed from + /// any of these reward cycles. + #[cfg_attr(test, mutants::skip)] fn refresh_reward_cycles( &mut self, sortdb: &SortitionDB, From cf6b8fa4bb8382978c3a2751c16fa39486b74080 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 11 Jul 2024 16:50:02 -0400 Subject: [PATCH 0502/1400] chore: remove unneeded test notes; permit DB version 6 in older chainstate DBs since a node may be stopped and restarted during genesis sync (and failure to accept DB version 6 would lead to a panic) --- stackslib/src/chainstate/stacks/db/mod.rs | 12 ++++++------ testnet/stacks-node/test-failure.txt | 1 - 2 files changed, 6 insertions(+), 7 deletions(-) delete mode 100644 testnet/stacks-node/test-failure.txt diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 7d60c36440..356b117b8b 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -298,12 +298,12 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 5, - StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 5, - StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 5, - StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 5, - StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 5, - StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 5, + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 6, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 6, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 6, StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 6, StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 6, } diff --git a/testnet/stacks-node/test-failure.txt b/testnet/stacks-node/test-failure.txt deleted file mode 100644 index c8d0258d46..0000000000 --- a/testnet/stacks-node/test-failure.txt +++ /dev/null @@ -1 +0,0 @@ -The problem is that one or both nodes determine that they each behaved badly somehow during tenure block sync, and they disconnect each other and blow each other's inventory data away faster than it can be resync'ed From 62aac67851b4104d2f12a549afaa00bbc82436c6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 11 Jul 2024 16:45:26 -0500 Subject: [PATCH 0503/1400] test: add integration tests for signer validation of reorgs --- .github/workflows/bitcoin-tests.yml | 2 + stacks-signer/src/chainstate.rs | 33 +- stacks-signer/src/config.rs | 8 +- stacks-signer/src/signerdb.rs | 10 + stacks-signer/src/v0/signer.rs | 2 +- stackslib/src/chainstate/nakamoto/miner.rs | 5 + stackslib/src/chainstate/nakamoto/mod.rs | 3 +- stackslib/src/net/api/getsortition.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 43 +-- testnet/stacks-node/src/tests/signer/mod.rs | 15 +- testnet/stacks-node/src/tests/signer/v0.rs | 345 +++++++++++++++++- 11 files changed, 417 insertions(+), 51 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index f7b96b96c7..a6aa956391 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -87,6 +87,8 @@ jobs: - tests::signer::v0::miner_gather_signatures - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::end_of_tenure + - tests::signer::v0::forked_tenure_okay + - tests::signer::v0::forked_tenure_invalid - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index b6872b8079..95c60d3a3c 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -34,22 +34,10 @@ use crate::signerdb::SignerDb; pub enum SignerChainstateError { /// Error resulting from database interactions #[error("Database error: {0}")] - DBError(DBError), + DBError(#[from] DBError), /// Error resulting from crate::client interactions #[error("Client error: {0}")] - ClientError(ClientError), -} - -impl From for SignerChainstateError { - fn from(value: ClientError) -> Self { - Self::ClientError(value) - } -} - -impl From for SignerChainstateError { - fn from(value: DBError) -> Self { - Self::DBError(value) - } + ClientError(#[from] ClientError), } /// Captures this signer's current view of a sortition's miner. @@ -93,8 +81,8 @@ pub struct SortitionState { /// Captures the configuration settings used by the signer when evaluating block proposals. #[derive(Debug, Clone)] pub struct ProposalEvalConfig { - /// How much time between the first block proposal in a tenure and the next bitcoin block - /// must pass before a subsequent miner isn't allowed to reorg the tenure + /// How much time must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing: Duration, } @@ -323,6 +311,9 @@ impl SortitionsView { "Most recent miner's tenure does not build off the prior sortition, checking if this is valid behavior"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "sortition_state.consensus_hash" => %sortition_state.consensus_hash, + "sortition_state.prior_sortition" => %sortition_state.prior_sortition, + "sortition_state.parent_tenure_id" => %sortition_state.parent_tenure_id, ); let tenures_reorged = client.get_tenure_forking_info( @@ -367,8 +358,14 @@ impl SortitionsView { sortition_state_received_time { // how long was there between when the proposal was received and the next sortition started? - let proposal_to_sortition = sortition_state_received_time - .saturating_sub(local_block_info.proposed_time); + let proposal_to_sortition = if let Some(signed_at) = + local_block_info.signed_self + { + sortition_state_received_time.saturating_sub(signed_at) + } else { + info!("We did not sign over the reorged tenure's first block, considering it as a late-arriving proposal"); + 0 + }; if Duration::from_secs(proposal_to_sortition) <= *first_proposal_burn_block_timing { diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 4b9970e38f..4c7bc565d3 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -151,8 +151,8 @@ pub struct SignerConfig { pub max_tx_fee_ustx: Option, /// The path to the signer's database file pub db_path: PathBuf, - /// How much time between the first block proposal in a tenure and the next bitcoin block - /// must pass before a subsequent miner isn't allowed to reorg the tenure + /// How much time must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing: Duration, } @@ -233,8 +233,8 @@ struct RawConfigFile { pub db_path: String, /// Metrics endpoint pub metrics_endpoint: Option, - /// How much time between the first block proposal in a tenure and the next bitcoin block - /// must pass before a subsequent miner isn't allowed to reorg the tenure + /// How much time must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing_secs: Option, } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 582e2027d2..f6405b23ad 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -83,6 +83,16 @@ impl BlockInfo { block_info } + /// Mark this block as valid, signed over, and record a timestamp in the block info if it wasn't + /// already set. + pub fn mark_signed_and_valid(&mut self) { + self.valid = Some(true); + self.signed_over = true; + if self.signed_self.is_none() { + self.signed_self = Some(get_epoch_time_secs()); + } + } + /// Return the block's signer signature hash pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { self.block.header.signer_signature_hash() diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 661e34d5ea..51a6bf91ad 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -409,7 +409,7 @@ impl Signer { return; } }; - block_info.valid = Some(true); + block_info.mark_signed_and_valid(); let signature = self .private_key .sign(&signer_signature_hash.0) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 3c4990de4d..136e7b84dd 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -216,6 +216,11 @@ impl NakamotoBlockBuilder { tenure_id_consensus_hash.clone(), parent_stacks_header.index_block_hash(), bitvec_len, + parent_stacks_header + .anchored_header + .as_stacks_nakamoto() + .map(|b| b.timestamp) + .unwrap_or(0), ), }) } diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 53e4f58747..cd1be59349 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -635,6 +635,7 @@ impl NakamotoBlockHeader { consensus_hash: ConsensusHash, parent_block_id: StacksBlockId, bitvec_len: u16, + parent_timestamp: u64, ) -> NakamotoBlockHeader { NakamotoBlockHeader { version: NAKAMOTO_BLOCK_VERSION, @@ -644,7 +645,7 @@ impl NakamotoBlockHeader { parent_block_id, tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), - timestamp: get_epoch_time_secs(), + timestamp: std::cmp::max(parent_timestamp, get_epoch_time_secs()), miner_signature: MessageSignature::empty(), signer_signature: vec![], pox_treatment: BitVec::ones(bitvec_len) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index ce17b5427b..5e0557ca26 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -247,7 +247,7 @@ impl RPCRequestHandler for GetSortitionHandler { stacks_parent_sn.consensus_hash.clone() } else { // we actually need to perform the marf lookup - let last_sortition = handle.get_last_snapshot_with_sortition(stacks_parent_sn.block_height)?; + let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height.saturating_sub(1))?; last_sortition.consensus_hash }; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 8cab796a65..436951c237 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -243,6 +243,27 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { + #[cfg(test)] + { + if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Broadcasting is stalled due to testing directive."; + "stacks_block_id" => %new_block.block_id(), + "stacks_block_hash" => %new_block.header.block_hash(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Broadcasting is no longer stalled due to testing directive."; + "block_id" => %new_block.block_id(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + } + } + let (reward_set, signer_signature) = match self.gather_signatures( &mut new_block, self.burn_block.block_height, @@ -656,26 +677,6 @@ impl BlockMinerThread { reward_set: RewardSet, stackerdbs: &StackerDBs, ) -> Result<(), NakamotoNodeError> { - #[cfg(test)] - { - if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Broadcasting is stalled due to testing directive."; - "stacks_block_id" => %block.block_id(), - "stacks_block_hash" => %block.header.block_hash(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Broadcasting is no longer stalled due to testing directive."; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - } - } let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); let sort_db = SortitionDB::open( @@ -1014,7 +1015,6 @@ impl BlockMinerThread { ChainstateError::NoTransactionsToMine, )); } - let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key .sign(block.header.miner_signature_hash().as_bytes()) @@ -1028,6 +1028,7 @@ impl BlockMinerThread { block.txs.len(); "signer_sighash" => %block.header.signer_signature_hash(), "consensus_hash" => %block.header.consensus_hash, + "timestamp" => block.header.timestamp, ); self.event_dispatcher.process_mined_nakamoto_block_event( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 18a4ea40f3..12584ab89a 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -104,6 +104,15 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, wait_on_signers: Option, + ) -> Self { + Self::new_with_config_modifications(num_signers, initial_balances, wait_on_signers, |_| {}) + } + + fn new_with_config_modifications ()>( + num_signers: usize, + initial_balances: Vec<(StacksAddress, u64)>, + wait_on_signers: Option, + modifier: F, ) -> Self { // Generate Signer Data let signer_stacks_private_keys = (0..num_signers) @@ -148,8 +157,9 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest. -use std::env; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; +use std::{env, thread}; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::boot::MINERS_NAME; +use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; @@ -40,6 +41,9 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; +use crate::event_dispatcher::MinedNakamotoBlockEvent; +use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; use crate::tests::neon_integrations::{ get_chain_info, next_block_and_wait, submit_tx, test_observer, @@ -446,6 +450,343 @@ fn mine_2_nakamoto_reward_cycles() { signer_test.shutdown(); } +#[test] +#[ignore] +fn forked_tenure_invalid() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + let result = forked_tenure_testing(Duration::from_secs(5), Duration::from_secs(7), false); + + assert_ne!(result.tip_b, result.tip_a); + assert_eq!(result.tip_b, result.tip_c); + assert_ne!(result.tip_c, result.tip_a); + + // Block B was built atop block A + assert_eq!( + result.tip_b.stacks_block_height, + result.tip_a.stacks_block_height + 1 + ); + assert_eq!( + result.mined_b.parent_block_id, + result.tip_a.index_block_hash().to_string() + ); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + assert_eq!( + result.mined_c.parent_block_id, + result.tip_a.index_block_hash().to_string() + ); + assert_ne!( + result + .tip_c + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .signer_signature_hash(), + result.mined_c.signer_signature_hash, + "Mined block during tenure C should not have become the chain tip" + ); + + assert!(result.tip_c_2.is_none()); + assert!(result.mined_c_2.is_none()); + + // Tenure D should continue progress + assert_ne!(result.tip_c, result.tip_d); + assert_ne!(result.tip_b, result.tip_d); + assert_ne!(result.tip_a, result.tip_d); + + // Tenure D builds off of Tenure B + assert_eq!( + result.tip_d.stacks_block_height, + result.tip_b.stacks_block_height + 1, + ); + assert_eq!( + result.mined_d.parent_block_id, + result.tip_b.index_block_hash().to_string() + ); +} + +#[test] +#[ignore] +fn forked_tenure_okay() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let result = forked_tenure_testing(Duration::from_secs(360), Duration::from_secs(0), true); + + assert_ne!(result.tip_b, result.tip_a); + assert_ne!(result.tip_b, result.tip_c); + assert_ne!(result.tip_c, result.tip_a); + + // Block B was built atop block A + assert_eq!( + result.tip_b.stacks_block_height, + result.tip_a.stacks_block_height + 1 + ); + assert_eq!( + result.mined_b.parent_block_id, + result.tip_a.index_block_hash().to_string() + ); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A + assert_eq!( + result.tip_c.stacks_block_height, + result.tip_a.stacks_block_height + 1 + ); + assert_eq!( + result.mined_c.parent_block_id, + result.tip_a.index_block_hash().to_string() + ); + + let tenure_c_2 = result.tip_c_2.unwrap(); + assert_ne!(result.tip_c, tenure_c_2); + assert_ne!(tenure_c_2, result.tip_d); + assert_ne!(result.tip_c, result.tip_d); + + // Second block of tenure C builds off of block C + assert_eq!( + tenure_c_2.stacks_block_height, + result.tip_c.stacks_block_height + 1, + ); + assert_eq!( + result.mined_c_2.unwrap().parent_block_id, + result.tip_c.index_block_hash().to_string() + ); + + // Tenure D builds off of the second block of tenure C + assert_eq!( + result.tip_d.stacks_block_height, + tenure_c_2.stacks_block_height + 1, + ); + assert_eq!( + result.mined_d.parent_block_id, + tenure_c_2.index_block_hash().to_string() + ); +} + +struct TenureForkingResult { + tip_a: StacksHeaderInfo, + tip_b: StacksHeaderInfo, + tip_c: StacksHeaderInfo, + tip_c_2: Option, + tip_d: StacksHeaderInfo, + mined_b: MinedNakamotoBlockEvent, + mined_c: MinedNakamotoBlockEvent, + mined_c_2: Option, + mined_d: MinedNakamotoBlockEvent, +} + +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// Miner A mines a regular tenure, its last block being block a_x. +/// Miner B starts its tenure, Miner B produces a Stacks block b_0, but miner C submits its block commit before b_0 is broadcasted. +/// Bitcoin block C, containing Miner C's block commit, is mined BEFORE miner C has a chance to update their block commit with b_0's information. +/// This test asserts: +/// * tenure C ignores b_0, and correctly builds off of block a_x. +fn forked_tenure_testing( + proposal_limit: Duration, + post_btc_block_pause: Duration, + expect_tenure_c: bool, +) -> TenureForkingResult { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |config| { + // make the duration long enough that the reorg attempt will definitely be accepted + config.first_proposal_burn_block_timing = proposal_limit; + }, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let naka_conf = signer_test.running_nodes.conf.clone(); + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let proposed_blocks = signer_test.running_nodes.nakamoto_blocks_proposed.clone(); + + info!("Starting tenure A."); + // In the next block, the miner should win the tenure and submit a stacks block + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }, + ) + .unwrap(); + + let tip_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let commits_before = commits_submitted.load(Ordering::SeqCst); + info!("Starting tenure B."); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("Commit op is submitted; unpause tenure B's block"); + + // Unpause the broadcast of Tenure B's block, do not submit commits. + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // Wait for a stacks block to be broadcasted + let start_time = Instant::now(); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcon block, and un-stall block commits."); + thread::sleep(post_btc_block_pause); + let tip_b = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let mined_b = blocks.last().unwrap().clone(); + + info!("Starting tenure C."); + // Submit a block commit op for tenure C + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = if expect_tenure_c { + mined_blocks.load(Ordering::SeqCst) + } else { + proposed_blocks.load(Ordering::SeqCst) + }; + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = if expect_tenure_c { + mined_blocks.load(Ordering::SeqCst) + } else { + proposed_blocks.load(Ordering::SeqCst) + }; + Ok(commits_count > commits_before && blocks_count > blocks_before) + }, + ) + .unwrap(); + + info!("Tenure C produced (or proposed) a block!"); + let tip_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let mined_c = blocks.last().unwrap().clone(); + + let (tip_c_2, mined_c_2) = if !expect_tenure_c { + (None, None) + } else { + // Now let's produce a second block for tenure C and ensure it builds off of block C. + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in Tenure C to mine a second block"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < Duration::from_secs(30), + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + info!("Tenure C produced a second block!"); + + let block_2_tenure_c = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_2_c = blocks.last().cloned().unwrap(); + (Some(block_2_tenure_c), Some(block_2_c)) + }; + + info!("Starting tenure D."); + // Submit a block commit op for tenure D and mine a stacks block + let commits_before = commits_submitted.load(Ordering::SeqCst); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let blocks_count = mined_blocks.load(Ordering::SeqCst); + Ok(commits_count > commits_before && blocks_count > blocks_before) + }, + ) + .unwrap(); + + let tip_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let mined_d = blocks.last().unwrap().clone(); + signer_test.shutdown(); + TenureForkingResult { + tip_a, + tip_b, + tip_c, + tip_c_2, + tip_d, + mined_b, + mined_c, + mined_c_2, + mined_d, + } +} + #[test] #[ignore] /// This test checks the behavior at the end of a tenure. Specifically: From cb69287b9f20f5fe62453a51db618f56dff31af5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 12 Jul 2024 12:28:33 -0500 Subject: [PATCH 0504/1400] test: unit tests for signerdb changes --- stacks-signer/src/signerdb.rs | 41 +++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index f6405b23ad..b0f8703e74 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -481,6 +481,47 @@ mod tests { assert_eq!(block_info.vote, Some(vote)); } + #[test] + fn get_first_signed_block() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let (mut block_info, block_proposal) = create_block(); + db.insert_block(&block_info).unwrap(); + + assert!(db + .get_first_signed_block_in_tenure(&block_proposal.block.header.consensus_hash) + .unwrap() + .is_none()); + + block_info.mark_signed_and_valid(); + db.insert_block(&block_info).unwrap(); + + let fetched_info = db + .get_first_signed_block_in_tenure(&block_proposal.block.header.consensus_hash) + .unwrap() + .unwrap(); + assert_eq!(fetched_info, block_info); + } + + #[test] + fn insert_burn_block_get_time() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let test_burn_hash = BurnchainHeaderHash([10; 32]); + let stime = SystemTime::now(); + let time_to_epoch = stime + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + db.insert_burn_block(&test_burn_hash, 10, &stime).unwrap(); + + let stored_time = db + .get_burn_block_receive_time(&test_burn_hash) + .unwrap() + .unwrap(); + assert_eq!(stored_time, time_to_epoch); + } + #[test] fn test_write_signer_state() { let db_path = tmp_db_path(); From 58f8da1eeb70ccc00eee39e27105769295941b61 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 12 Jul 2024 12:36:45 -0500 Subject: [PATCH 0505/1400] test: update unit test for proposal reorg timing --- stacks-signer/src/tests/chainstate.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index f99fde9952..5662982116 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -45,8 +45,6 @@ use crate::client::tests::MockServerClient; use crate::client::StacksClient; use crate::signerdb::{BlockInfo, SignerDb}; -static TEST_FIRST_PROPOSAL_BURN_BLOCK_TIMING: Duration = Duration::from_secs(30); - fn setup_test_environment( fn_name: &str, ) -> ( @@ -87,7 +85,7 @@ fn setup_test_environment( cur_sortition, last_sortition, config: ProposalEvalConfig { - first_proposal_burn_block_timing: TEST_FIRST_PROPOSAL_BURN_BLOCK_TIMING.clone(), + first_proposal_burn_block_timing: Duration::from_secs(30), }, }; @@ -157,10 +155,12 @@ fn check_proposal_miner_pkh_mismatch() { fn reorg_timing_testing( test_name: &str, + first_proposal_burn_block_timing_secs: u64, sortition_timing_secs: u64, ) -> Result { let (_stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment(test_name); + view.config.first_proposal_burn_block_timing = Duration::from_secs(first_proposal_burn_block_timing_secs); view.cur_sortition.parent_tenure_id = view.last_sortition.as_ref().unwrap().parent_tenure_id; block.header.consensus_hash = view.cur_sortition.consensus_hash; @@ -227,7 +227,7 @@ fn reorg_timing_testing( reward_cycle: 1, }; let mut block_info_1 = BlockInfo::from(block_proposal_1); - block_info_1.signed_over = true; + block_info_1.mark_signed_and_valid(); signer_db.insert_block(&block_info_1).unwrap(); let sortition_time = SystemTime::UNIX_EPOCH @@ -253,7 +253,8 @@ fn reorg_timing_testing( fn check_proposal_reorg_timing_bad() { let result = reorg_timing_testing( "reorg_timing_bad", - TEST_FIRST_PROPOSAL_BURN_BLOCK_TIMING.as_secs() + 1, + 30, + 31, ); assert!(!result.unwrap(), "Proposal should not validate, because the reorg occurred in a block whose proposed time was long enough before the sortition"); } @@ -262,7 +263,8 @@ fn check_proposal_reorg_timing_bad() { fn check_proposal_reorg_timing_ok() { let result = reorg_timing_testing( "reorg_timing_okay", - TEST_FIRST_PROPOSAL_BURN_BLOCK_TIMING.as_secs(), + 30, + 30, ); assert!(result.unwrap(), "Proposal should validate okay, because the reorg occurred in a block whose proposed time was close to the sortition"); } From 7c8b4fe101f90a363b0f85f7f00d156d3b07ea87 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 12 Jul 2024 11:32:08 -0700 Subject: [PATCH 0506/1400] Workflow to lock closed issues/pr/discussions --- .github/workflows/lock-threads.yml | 37 ++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 .github/workflows/lock-threads.yml diff --git a/.github/workflows/lock-threads.yml b/.github/workflows/lock-threads.yml new file mode 100644 index 0000000000..3892fb3b3e --- /dev/null +++ b/.github/workflows/lock-threads.yml @@ -0,0 +1,37 @@ +## Workflow to lock closed PRs/issues/discussions +## timeframe to lock defaults to: +## issues: 30 days +## prs: 30 days +## discussions: 365 days + +name: "Lock Threads" + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + discussions: write + +concurrency: + group: lock-threads + +jobs: + ## Lock closed issues/prs/discussions + lock: + name: Lock Threads + runs-on: ubuntu-latest + steps: + ## Perform a lookup to check if the cache already exists + - name: Lock Threads + id: lock_threads + uses: stacks-network/actions/lock-threads@main + with: + github-token: ${{ secrets.GH_TOKEN }} + issue-inactive-days: 7 + pr-inactive-days: 7 + discussion-inactive-days: 7 + From 4fa1bdcee82f446bb0417e4aa47d5f3fcdd7a8a3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 12 Jul 2024 11:38:40 -0700 Subject: [PATCH 0507/1400] remove comment --- .github/workflows/lock-threads.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/lock-threads.yml b/.github/workflows/lock-threads.yml index 3892fb3b3e..83b5a6b63f 100644 --- a/.github/workflows/lock-threads.yml +++ b/.github/workflows/lock-threads.yml @@ -25,7 +25,6 @@ jobs: name: Lock Threads runs-on: ubuntu-latest steps: - ## Perform a lookup to check if the cache already exists - name: Lock Threads id: lock_threads uses: stacks-network/actions/lock-threads@main From d8151d5371f04ce53b7a1d4b40627fafd78bb909 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 12 Jul 2024 11:43:30 -0700 Subject: [PATCH 0508/1400] update codeowners for .github workflows --- CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index f410f142e1..b30973662f 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -16,5 +16,5 @@ libsigner/**/*.rs @stacks-network/blockchain-team-signer stacks-signer/**/*.rs @stacks-network/blockchain-team-signer # CI workflows -./github/workflows/ @stacks-network/blockchain-team-ci -./github/actions/ @stacks-network/blockchain-team-ci \ No newline at end of file +/.github/workflows/ @stacks-network/blockchain-team-ci +/.github/actions/ @stacks-network/blockchain-team-ci From eefedbb21e969f8f84f1d0e6a56c3b73b2b10504 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 12 Jul 2024 13:50:40 -0500 Subject: [PATCH 0509/1400] cargo fmt-stacks and db schema tweaks --- stacks-signer/src/signerdb.rs | 10 +++++----- stacks-signer/src/tests/chainstate.rs | 15 ++++----------- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index b0f8703e74..0a0eb6c7e1 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -117,7 +117,7 @@ CREATE TABLE IF NOT EXISTS blocks ( stacks_height INTEGER NOT NULL, burn_block_height INTEGER NOT NULL, PRIMARY KEY (reward_cycle, signer_signature_hash) -)"; +) STRICT"; const CREATE_INDEXES: &str = " CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); @@ -130,14 +130,14 @@ const CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, encrypted_state BLOB NOT NULL -)"; +) STRICT"; const CREATE_BURN_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS burn_blocks ( block_hash TEXT PRIMARY KEY, - block_height INTEGER, - received_time INTEGER -)"; + block_height INTEGER NOT NULL, + received_time INTEGER NOT NULL +) STRICT"; impl SignerDb { /// Create a new `SignerState` instance. diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 5662982116..c2c65f265c 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -160,7 +160,8 @@ fn reorg_timing_testing( ) -> Result { let (_stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment(test_name); - view.config.first_proposal_burn_block_timing = Duration::from_secs(first_proposal_burn_block_timing_secs); + view.config.first_proposal_burn_block_timing = + Duration::from_secs(first_proposal_burn_block_timing_secs); view.cur_sortition.parent_tenure_id = view.last_sortition.as_ref().unwrap().parent_tenure_id; block.header.consensus_hash = view.cur_sortition.consensus_hash; @@ -251,21 +252,13 @@ fn reorg_timing_testing( #[test] fn check_proposal_reorg_timing_bad() { - let result = reorg_timing_testing( - "reorg_timing_bad", - 30, - 31, - ); + let result = reorg_timing_testing("reorg_timing_bad", 30, 31); assert!(!result.unwrap(), "Proposal should not validate, because the reorg occurred in a block whose proposed time was long enough before the sortition"); } #[test] fn check_proposal_reorg_timing_ok() { - let result = reorg_timing_testing( - "reorg_timing_okay", - 30, - 30, - ); + let result = reorg_timing_testing("reorg_timing_okay", 30, 30); assert!(result.unwrap(), "Proposal should validate okay, because the reorg occurred in a block whose proposed time was close to the sortition"); } From e9b23a0f0bf4f836a17a2cdb125aeb640b6e6a53 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 12 Jul 2024 17:30:33 -0400 Subject: [PATCH 0510/1400] chore: move mempool sync state machine into its own file --- stackslib/src/net/mempool/mod.rs | 619 ++++++++++++++ stackslib/src/net/p2p.rs | 1357 +----------------------------- 2 files changed, 626 insertions(+), 1350 deletions(-) create mode 100644 stackslib/src/net/mempool/mod.rs diff --git a/stackslib/src/net/mempool/mod.rs b/stackslib/src/net/mempool/mod.rs new file mode 100644 index 0000000000..caaea16527 --- /dev/null +++ b/stackslib/src/net/mempool/mod.rs @@ -0,0 +1,619 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::SocketAddr; + +use rand::prelude::*; +use rand::thread_rng; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; +use url; + +use crate::burnchains::Txid; +use crate::chainstate::stacks::StacksTransaction; +use crate::core::MemPoolDB; +use crate::net::chat::ConversationP2P; +use crate::net::dns::{DNSClient, DNSRequest}; +use crate::net::httpcore::StacksHttpRequest; +use crate::net::inv::inv2x::*; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, HttpRequestContents}; +use crate::util_lib::strings::UrlString; + +/// The four states the mempool sync state machine can be in +#[derive(Debug, Clone, PartialEq)] +pub enum MempoolSyncState { + /// Picking an outbound peer + PickOutboundPeer, + /// Resolving its data URL to a SocketAddr. Contains the data URL, DNS request handle, and + /// mempool page ID + ResolveURL(UrlString, DNSRequest, Txid), + /// Sending the request for mempool transactions. Contains the data URL, resolved socket, and + /// mempool page. + SendQuery(UrlString, SocketAddr, Txid), + /// Receiving the mempool response. Contains the URL, socket address, and event ID + RecvResponse(UrlString, SocketAddr, usize), +} + +/// Mempool synchronization state machine +#[derive(Debug, Clone, PartialEq)] +pub struct MempoolSync { + /// what state are we in? + mempool_state: MempoolSyncState, + /// when's the next mempool sync start? + mempool_sync_deadline: u64, + /// how long can the sync go for? + mempool_sync_timeout: u64, + /// how many complete syncs have happened + mempool_sync_completions: u64, + /// how many txs have been sync'ed? + pub(crate) mempool_sync_txs: u64, + /// what's the API endpoint? + api_endpoint: String, +} + +impl MempoolSync { + pub fn new() -> Self { + Self { + mempool_state: MempoolSyncState::PickOutboundPeer, + mempool_sync_deadline: 0, + mempool_sync_timeout: 0, + mempool_sync_completions: 0, + mempool_sync_txs: 0, + api_endpoint: "/v2/mempool/query".to_string(), + } + } + + /// Do a mempool sync. Return any transactions we might receive. + #[cfg_attr(test, mutants::skip)] + pub fn run( + &mut self, + network: &mut PeerNetwork, + dns_client_opt: &mut Option<&mut DNSClient>, + mempool: &MemPoolDB, + ibd: bool, + ) -> Option> { + if ibd { + return None; + } + + return match self.do_mempool_sync(network, dns_client_opt, mempool) { + (true, txs_opt) => { + // did we run to completion? + if let Some(txs) = txs_opt { + debug!( + "{:?}: Mempool sync obtained {} transactions from mempool sync, and done receiving", + &network.get_local_peer(), + txs.len() + ); + + self.mempool_sync_deadline = + get_epoch_time_secs() + network.get_connection_opts().mempool_sync_interval; + self.mempool_sync_completions = self.mempool_sync_completions.saturating_add(1); + self.mempool_sync_txs = self.mempool_sync_txs.saturating_add(txs.len() as u64); + Some(txs) + } else { + None + } + } + (false, txs_opt) => { + // did we get some transactions, but have more to get? + if let Some(txs) = txs_opt { + debug!( + "{:?}: Mempool sync obtained {} transactions from mempool sync, but have more", + &network.get_local_peer(), + txs.len() + ); + + self.mempool_sync_txs = self.mempool_sync_txs.saturating_add(txs.len() as u64); + Some(txs) + } else { + None + } + } + }; + } + + /// Reset a mempool sync + fn mempool_sync_reset(&mut self) { + self.mempool_state = MempoolSyncState::PickOutboundPeer; + self.mempool_sync_timeout = 0; + } + + /// Pick a peer to mempool sync with. + /// Returns Ok(None) if we're done syncing the mempool. + /// Returns Ok(Some(..)) if we're not done, and can proceed + /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, + /// or SendQuery if we got the IP address and can just issue the query. + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_pick_outbound_peer( + &mut self, + network: &mut PeerNetwork, + dns_client_opt: &mut Option<&mut DNSClient>, + page_id: &Txid, + ) -> Result, NetError> { + let num_peers = network.get_num_p2p_convos(); + if num_peers == 0 { + debug!("No peers connected; cannot do mempool sync"); + return Ok(None); + } + + let mut idx = thread_rng().gen::() % num_peers; + let mut mempool_sync_data_url = None; + let mut mempool_sync_data_url_and_sockaddr = None; + for _ in 0..num_peers { + let Some((_event_id, convo)) = network.iter_peer_convos().skip(idx).next() else { + idx = 0; + continue; + }; + idx = (idx + 1) % num_peers; + + // only talk to authenticated, outbound peers + if !convo.is_authenticated() || !convo.is_outbound() { + continue; + } + // peer must support mempool protocol + if !ConversationP2P::supports_mempool_query(convo.peer_services) { + continue; + } + // has a data URL? + if convo.data_url.len() == 0 { + continue; + } + // already resolved? + if let Some(sockaddr) = convo.data_ip.as_ref() { + mempool_sync_data_url_and_sockaddr = + Some((convo.data_url.clone(), sockaddr.clone())); + break; + } + // can we resolve the data URL? + let url = convo.data_url.clone(); + if dns_client_opt.is_none() { + if let Ok(Some(_)) = PeerNetwork::try_get_url_ip(&url) { + } else { + // need a DNS client for this one + continue; + } + } + + // will resolve + mempool_sync_data_url = Some(url); + break; + } + + if let Some((url_str, sockaddr)) = mempool_sync_data_url_and_sockaddr { + // already resolved + return Ok(Some(MempoolSyncState::SendQuery( + url_str, + sockaddr, + page_id.clone(), + ))); + } else if let Some(url) = mempool_sync_data_url { + // will need to resolve + self.mempool_sync_begin_resolve_data_url(network, url, dns_client_opt, page_id) + } else { + debug!("No peer has a data URL, so no mempool sync can happen"); + Ok(None) + } + } + + /// Begin resolving the DNS host of a data URL for mempool sync. + /// Returns Ok(None) if we're done syncing the mempool. + /// Returns Ok(Some(..)) if we're not done, and can proceed + /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, + /// or SendQuery if we got the IP address and can just issue the query. + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_begin_resolve_data_url( + &self, + network: &PeerNetwork, + url_str: UrlString, + dns_client_opt: &mut Option<&mut DNSClient>, + page_id: &Txid, + ) -> Result, NetError> { + // start resolving + let url = url_str.parse_to_block_url()?; + let port = match url.port_or_known_default() { + Some(p) => p, + None => { + warn!("Unsupported URL {:?}: unknown port", &url); + return Ok(None); + } + }; + + // bare IP address? + if let Some(addr) = PeerNetwork::try_get_url_ip(&url_str)? { + return Ok(Some(MempoolSyncState::SendQuery( + url_str, + addr, + page_id.clone(), + ))); + } else if let Some(url::Host::Domain(domain)) = url.host() { + if let Some(ref mut dns_client) = dns_client_opt { + // begin DNS query + match dns_client.queue_lookup( + domain, + port, + get_epoch_time_ms() + network.get_connection_opts().dns_timeout, + ) { + Ok(_) => {} + Err(_) => { + warn!("Failed to queue DNS lookup on {}", &url_str); + return Ok(None); + } + } + return Ok(Some(MempoolSyncState::ResolveURL( + url_str, + DNSRequest::new(domain.to_string(), port, 0), + page_id.clone(), + ))); + } else { + // can't proceed -- no DNS client + return Ok(None); + } + } else { + // can't proceed + return Ok(None); + } + } + + /// Resolve our picked mempool sync peer's data URL. + /// Returns Ok(true, ..) if we're done syncing the mempool. + /// Returns Ok(false, ..) if there's more to do + /// Returns the socket addr if we ever succeed in resolving it. + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_resolve_data_url( + url_str: &UrlString, + request: &DNSRequest, + dns_client_opt: &mut Option<&mut DNSClient>, + ) -> Result<(bool, Option), NetError> { + if let Ok(Some(addr)) = PeerNetwork::try_get_url_ip(url_str) { + // URL contains an IP address -- go with that + Ok((false, Some(addr))) + } else if let Some(dns_client) = dns_client_opt { + // keep trying to resolve + match dns_client.poll_lookup(&request.host, request.port) { + Ok(Some(dns_response)) => match dns_response.result { + Ok(mut addrs) => { + if let Some(addr) = addrs.pop() { + // resolved! + return Ok((false, Some(addr))); + } else { + warn!("DNS returned no results for {}", url_str); + return Ok((true, None)); + } + } + Err(msg) => { + warn!("DNS failed to look up {:?}: {}", &url_str, msg); + return Ok((true, None)); + } + }, + Ok(None) => { + // still in-flight + return Ok((false, None)); + } + Err(e) => { + warn!("DNS lookup failed on {:?}: {:?}", url_str, &e); + return Ok((true, None)); + } + } + } else { + // can't do anything + debug!("No DNS client, and URL contains a domain, so no mempool sync can happen"); + return Ok((true, None)); + } + } + + /// Ask the remote peer for its mempool, connecting to it in the process if need be. + /// Returns Ok((true, ..)) if we're done mempool syncing + /// Returns Ok((false, ..)) if there's more to do + /// Returns the event ID on success + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_send_query( + &mut self, + network: &mut PeerNetwork, + url: &UrlString, + addr: &SocketAddr, + mempool: &MemPoolDB, + page_id: Txid, + ) -> Result<(bool, Option), NetError> { + let sync_data = mempool.make_mempool_sync_data()?; + let request = StacksHttpRequest::new_for_peer( + PeerHost::from_socketaddr(addr), + "POST".into(), + self.api_endpoint.clone(), + HttpRequestContents::new() + .query_arg("page_id".into(), format!("{}", &page_id)) + .payload_stacks(&sync_data), + )?; + + let event_id = network.connect_or_send_http_request(url.clone(), addr.clone(), request)?; + return Ok((false, Some(event_id))); + } + + /// Receive the mempool sync response. + /// Return Ok(true, ..) if we're done with the mempool sync. + /// Return Ok(false, ..) if we have more work to do. + /// Returns the page ID of the next request to make, and the list of transactions we got + #[cfg_attr(test, mutants::skip)] + fn mempool_sync_recv_response( + &mut self, + network: &mut PeerNetwork, + event_id: usize, + ) -> Result<(bool, Option, Option>), NetError> { + PeerNetwork::with_http(network, |network, http| { + match http.get_conversation(event_id) { + None => { + if http.is_connecting(event_id) { + debug!( + "{:?}: Mempool sync event {} is not connected yet", + &network.local_peer, event_id + ); + return Ok((false, None, None)); + } else { + // conversation died + debug!("{:?}: Mempool sync peer hung up", &network.local_peer); + return Ok((true, None, None)); + } + } + Some(ref mut convo) => { + match convo.try_get_response() { + None => { + // still waiting + debug!( + "{:?}: Mempool sync event {} still waiting for a response", + &network.get_local_peer(), + event_id + ); + return Ok((false, None, None)); + } + Some(http_response) => match http_response.decode_mempool_txs_page() { + Ok((txs, page_id_opt)) => { + debug!("{:?}: Mempool sync received response for {} txs, next page {:?}", &network.local_peer, txs.len(), &page_id_opt); + return Ok((true, page_id_opt, Some(txs))); + } + Err(e) => { + warn!( + "{:?}: Mempool sync request did not receive a txs page: {:?}", + &network.local_peer, &e + ); + return Ok((true, None, None)); + } + }, + } + } + } + }) + } + + /// Do a mempool sync + /// Return true if we're done and can advance to the next state. + /// Returns the transactions as well if the sync ran to completion. + #[cfg_attr(test, mutants::skip)] + fn do_mempool_sync( + &mut self, + network: &mut PeerNetwork, + dns_client_opt: &mut Option<&mut DNSClient>, + mempool: &MemPoolDB, + ) -> (bool, Option>) { + if get_epoch_time_secs() <= self.mempool_sync_deadline { + debug!( + "{:?}: Wait until {} to do a mempool sync", + &network.get_local_peer(), + self.mempool_sync_deadline + ); + return (true, None); + } + + if self.mempool_sync_timeout == 0 { + // begin new sync + self.mempool_sync_timeout = + get_epoch_time_secs() + network.get_connection_opts().mempool_sync_timeout; + } else { + if get_epoch_time_secs() > self.mempool_sync_timeout { + debug!( + "{:?}: Mempool sync took too long; terminating", + &network.get_local_peer() + ); + self.mempool_sync_reset(); + return (true, None); + } + } + + // try advancing states until we get blocked. + // Once we get blocked, return. + loop { + let cur_state = self.mempool_state.clone(); + debug!( + "{:?}: Mempool sync state is {:?}", + &network.get_local_peer(), + &cur_state + ); + match cur_state { + MempoolSyncState::PickOutboundPeer => { + // 1. pick a random outbound conversation. + match self.mempool_sync_pick_outbound_peer( + network, + dns_client_opt, + &Txid([0u8; 32]), + ) { + Ok(Some(next_state)) => { + // success! can advance to either resolve a URL or to send a query + self.mempool_state = next_state; + } + Ok(None) => { + // done + self.mempool_sync_reset(); + return (true, None); + } + Err(e) => { + // done; need reset + warn!("mempool_sync_pick_outbound_peer returned {:?}", &e); + self.mempool_sync_reset(); + return (true, None); + } + } + } + MempoolSyncState::ResolveURL(ref url_str, ref dns_request, ref page_id) => { + // 2. resolve its data URL + match Self::mempool_sync_resolve_data_url(url_str, dns_request, dns_client_opt) + { + Ok((false, Some(addr))) => { + // address must be resolvable + if PeerAddress::from_socketaddr(&addr).is_in_private_range() { + debug!( + "{:?}: Mempool sync skips {}, which has private IP {}", + network.get_local_peer(), + &url_str, + &addr + ); + self.mempool_sync_reset(); + return (true, None); + } + // success! advance + self.mempool_state = + MempoolSyncState::SendQuery(url_str.clone(), addr, page_id.clone()); + } + Ok((false, None)) => { + // try again later + return (false, None); + } + Ok((true, _)) => { + // done + self.mempool_sync_reset(); + return (true, None); + } + Err(e) => { + // failed + warn!( + "mempool_sync_resolve_data_url({}) failed: {:?}", + url_str, &e + ); + self.mempool_sync_reset(); + return (true, None); + } + } + } + MempoolSyncState::SendQuery(ref url, ref addr, ref page_id) => { + // 3. ask for the remote peer's mempool's novel txs + debug!( + "{:?}: Mempool sync will query {} for mempool transactions at {}", + &network.get_local_peer(), + url, + page_id + ); + match self.mempool_sync_send_query(network, url, addr, mempool, page_id.clone()) + { + Ok((false, Some(event_id))) => { + // success! advance + debug!("{:?}: Mempool sync query {} for mempool transactions at {} on event {}", &network.get_local_peer(), url, page_id, event_id); + self.mempool_state = + MempoolSyncState::RecvResponse(url.clone(), addr.clone(), event_id); + } + Ok((false, None)) => { + // try again later + return (false, None); + } + Ok((true, _)) => { + // done + self.mempool_sync_reset(); + return (true, None); + } + Err(e) => { + // done + warn!("mempool_sync_send_query({}) returned {:?}", url, &e); + self.mempool_sync_reset(); + return (true, None); + } + } + } + MempoolSyncState::RecvResponse(ref url, ref addr, ref event_id) => { + match self.mempool_sync_recv_response(network, *event_id) { + Ok((true, next_page_id_opt, Some(txs))) => { + debug!( + "{:?}: Mempool sync received {} transactions; next page is {:?}", + &network.get_local_peer(), + txs.len(), + &next_page_id_opt + ); + + // done! got data + let ret = match next_page_id_opt { + Some(next_page_id) => { + // get the next page + self.mempool_state = MempoolSyncState::SendQuery( + url.clone(), + addr.clone(), + next_page_id, + ); + false + } + None => { + // done + self.mempool_sync_reset(); + true + } + }; + return (ret, Some(txs)); + } + Ok((true, _, None)) => { + // done! did not get data + self.mempool_sync_reset(); + return (true, None); + } + Ok((false, _, None)) => { + // still receiving; try again later + return (false, None); + } + Ok((false, _, Some(_))) => { + // should never happen + if cfg!(test) { + panic!("Reached invalid state in {:?}, aborting...", &cur_state); + } + warn!("Reached invalid state in {:?}, resetting...", &cur_state); + self.mempool_sync_reset(); + return (true, None); + } + Err(e) => { + // likely a network error + warn!("mempool_sync_recv_response returned {:?}", &e); + self.mempool_sync_reset(); + return (true, None); + } + } + } + } + } + } +} + +impl PeerNetwork { + /// Run the internal mempool sync machine + pub fn run_mempool_sync( + &mut self, + dns_client: &mut Option<&mut DNSClient>, + mempool: &MemPoolDB, + ibd: bool, + ) -> Option> { + let Some(mut mempool_sync) = self.mempool_sync.take() else { + return None; + }; + + let res = mempool_sync.run(self, dns_client, mempool, ibd); + + self.mempool_sync = Some(mempool_sync); + res + } +} diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index ac4bbe28e4..861a6e6cfa 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -63,6 +63,7 @@ use crate::net::http::HttpRequestContents; use crate::net::httpcore::StacksHttpRequest; use crate::net::inv::inv2x::*; use crate::net::inv::nakamoto::{InvGenerator, NakamotoInvStateMachine}; +use crate::net::mempool::MempoolSync; use crate::net::neighbors::*; use crate::net::poll::{NetworkPollState, NetworkState}; use crate::net::prune::*; @@ -194,21 +195,6 @@ pub enum PeerNetworkWorkState { Prune, } -/// The four states the mempool sync state machine can be in -#[derive(Debug, Clone, PartialEq)] -pub enum MempoolSyncState { - /// Picking an outbound peer - PickOutboundPeer, - /// Resolving its data URL to a SocketAddr. Contains the data URL, DNS request handle, and - /// mempool page ID - ResolveURL(UrlString, DNSRequest, Txid), - /// Sending the request for mempool transactions. Contains the data URL, resolved socket, and - /// mempool page. - SendQuery(UrlString, SocketAddr, Txid), - /// Receiving the mempool response. Contains the URL, socket address, and event ID - RecvResponse(UrlString, SocketAddr, usize), -} - pub type PeerMap = HashMap; pub type PendingMessages = HashMap>; @@ -348,6 +334,9 @@ pub struct PeerNetwork { pub nakamoto_work_state: PeerNetworkWorkState, pub(crate) have_data_to_download: bool, + /// Mempool sync machine + pub mempool_sync: Option, + // neighbor walk state pub walk: Option>, pub walk_deadline: u64, @@ -389,15 +378,6 @@ pub struct PeerNetwork { // handle to all stacker DB state pub stackerdbs: StackerDBs, - // outstanding request to perform a mempool sync - // * mempool_sync_deadline is when the next mempool sync must start - // * mempool_sync_timeout is when the current mempool sync must stop - mempool_state: MempoolSyncState, - mempool_sync_deadline: u64, - mempool_sync_timeout: u64, - mempool_sync_completions: u64, - mempool_sync_txs: u64, - // how often we pruned a given inbound/outbound peer pub prune_outbound_counts: HashMap, pub prune_inbound_counts: HashMap, @@ -541,6 +521,8 @@ impl PeerNetwork { nakamoto_work_state: PeerNetworkWorkState::GetPublicIP, have_data_to_download: false, + mempool_sync: Some(MempoolSync::new()), + walk: None, walk_deadline: 0, walk_attempts: 0, @@ -565,12 +547,6 @@ impl PeerNetwork { stacker_db_configs: stacker_db_configs, stackerdbs: stackerdbs, - mempool_state: MempoolSyncState::PickOutboundPeer, - mempool_sync_deadline: 0, - mempool_sync_timeout: 0, - mempool_sync_completions: 0, - mempool_sync_txs: 0, - prune_outbound_counts: HashMap::new(), prune_inbound_counts: HashMap::new(), @@ -2624,55 +2600,6 @@ impl PeerNetwork { done } - /// Do a mempool sync. Return any transactions we might receive. - #[cfg_attr(test, mutants::skip)] - fn do_network_mempool_sync( - &mut self, - dns_client_opt: &mut Option<&mut DNSClient>, - mempool: &MemPoolDB, - ibd: bool, - ) -> Option> { - if ibd { - return None; - } - - return match self.do_mempool_sync(dns_client_opt, mempool) { - (true, txs_opt) => { - // did we run to completion? - if let Some(txs) = txs_opt { - debug!( - "{:?}: Mempool sync obtained {} transactions from mempool sync, and done receiving", - &self.local_peer, - txs.len() - ); - - self.mempool_sync_deadline = - get_epoch_time_secs() + self.connection_opts.mempool_sync_interval; - self.mempool_sync_completions = self.mempool_sync_completions.saturating_add(1); - self.mempool_sync_txs = self.mempool_sync_txs.saturating_add(txs.len() as u64); - Some(txs) - } else { - None - } - } - (false, txs_opt) => { - // did we get some transactions, but have more to get? - if let Some(txs) = txs_opt { - debug!( - "{:?}: Mempool sync obtained {} transactions from mempool sync, but have more", - &self.local_peer, - txs.len() - ); - - self.mempool_sync_txs = self.mempool_sync_txs.saturating_add(txs.len() as u64); - Some(txs) - } else { - None - } - } - }; - } - /// Begin the process of learning this peer's public IP address. /// Return Ok(finished with this step) /// Return Err(..) on failure @@ -3602,435 +3529,6 @@ impl PeerNetwork { } } - /// Reset a mempool sync - fn mempool_sync_reset(&mut self) { - self.mempool_state = MempoolSyncState::PickOutboundPeer; - self.mempool_sync_timeout = 0; - } - - /// Pick a peer to mempool sync with. - /// Returns Ok(None) if we're done syncing the mempool. - /// Returns Ok(Some(..)) if we're not done, and can proceed - /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, - /// or SendQuery if we got the IP address and can just issue the query. - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_pick_outbound_peer( - &mut self, - dns_client_opt: &mut Option<&mut DNSClient>, - page_id: &Txid, - ) -> Result, net_error> { - if self.peers.len() == 0 { - debug!("No peers connected; cannot do mempool sync"); - return Ok(None); - } - - let mut idx = thread_rng().gen::() % self.peers.len(); - let mut mempool_sync_data_url = None; - for _ in 0..self.peers.len() + 1 { - let event_id = match self.peers.keys().skip(idx).next() { - Some(eid) => *eid, - None => { - idx = 0; - continue; - } - }; - idx = (idx + 1) % self.peers.len(); - - if let Some(convo) = self.peers.get(&event_id) { - if !convo.is_authenticated() || !convo.is_outbound() { - continue; - } - if !ConversationP2P::supports_mempool_query(convo.peer_services) { - continue; - } - if convo.data_url.len() == 0 { - continue; - } - let url = convo.data_url.clone(); - if dns_client_opt.is_none() { - if let Ok(Some(_)) = PeerNetwork::try_get_url_ip(&url) { - } else { - // need a DNS client for this one - continue; - } - } - - mempool_sync_data_url = Some(url); - break; - } - } - - if let Some(url) = mempool_sync_data_url { - self.mempool_sync_begin_resolve_data_url(url, dns_client_opt, page_id) - } else { - debug!("No peer has a data URL, so no mempool sync can happen"); - Ok(None) - } - } - - /// Begin resolving the DNS host of a data URL for mempool sync. - /// Returns Ok(None) if we're done syncing the mempool. - /// Returns Ok(Some(..)) if we're not done, and can proceed - /// Returns the new sync state -- either ResolveURL if we need to resolve a data URL, - /// or SendQuery if we got the IP address and can just issue the query. - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_begin_resolve_data_url( - &self, - url_str: UrlString, - dns_client_opt: &mut Option<&mut DNSClient>, - page_id: &Txid, - ) -> Result, net_error> { - // start resolving - let url = url_str.parse_to_block_url()?; - let port = match url.port_or_known_default() { - Some(p) => p, - None => { - warn!("Unsupported URL {:?}: unknown port", &url); - return Ok(None); - } - }; - - // bare IP address? - if let Some(addr) = PeerNetwork::try_get_url_ip(&url_str)? { - return Ok(Some(MempoolSyncState::SendQuery( - url_str, - addr, - page_id.clone(), - ))); - } else if let Some(url::Host::Domain(domain)) = url.host() { - if let Some(ref mut dns_client) = dns_client_opt { - // begin DNS query - match dns_client.queue_lookup( - domain, - port, - get_epoch_time_ms() + self.connection_opts.dns_timeout, - ) { - Ok(_) => {} - Err(_) => { - warn!("Failed to queue DNS lookup on {}", &url_str); - return Ok(None); - } - } - return Ok(Some(MempoolSyncState::ResolveURL( - url_str, - DNSRequest::new(domain.to_string(), port, 0), - page_id.clone(), - ))); - } else { - // can't proceed -- no DNS client - return Ok(None); - } - } else { - // can't proceed - return Ok(None); - } - } - - /// Resolve our picked mempool sync peer's data URL. - /// Returns Ok(true, ..) if we're done syncing the mempool. - /// Returns Ok(false, ..) if there's more to do - /// Returns the socket addr if we ever succeed in resolving it. - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_resolve_data_url( - &mut self, - url_str: &UrlString, - request: &DNSRequest, - dns_client_opt: &mut Option<&mut DNSClient>, - ) -> Result<(bool, Option), net_error> { - if let Ok(Some(addr)) = PeerNetwork::try_get_url_ip(url_str) { - // URL contains an IP address -- go with that - Ok((false, Some(addr))) - } else if let Some(dns_client) = dns_client_opt { - // keep trying to resolve - match dns_client.poll_lookup(&request.host, request.port) { - Ok(Some(dns_response)) => match dns_response.result { - Ok(mut addrs) => { - if let Some(addr) = addrs.pop() { - // resolved! - return Ok((false, Some(addr))); - } else { - warn!("DNS returned no results for {}", url_str); - return Ok((true, None)); - } - } - Err(msg) => { - warn!("DNS failed to look up {:?}: {}", &url_str, msg); - return Ok((true, None)); - } - }, - Ok(None) => { - // still in-flight - return Ok((false, None)); - } - Err(e) => { - warn!("DNS lookup failed on {:?}: {:?}", url_str, &e); - return Ok((true, None)); - } - } - } else { - // can't do anything - debug!("No DNS client, and URL contains a domain, so no mempool sync can happen"); - return Ok((true, None)); - } - } - - /// Ask the remote peer for its mempool, connecting to it in the process if need be. - /// Returns Ok((true, ..)) if we're done mempool syncing - /// Returns Ok((false, ..)) if there's more to do - /// Returns the event ID on success - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_send_query( - &mut self, - url: &UrlString, - addr: &SocketAddr, - mempool: &MemPoolDB, - page_id: Txid, - ) -> Result<(bool, Option), net_error> { - let sync_data = mempool.make_mempool_sync_data()?; - let request = StacksHttpRequest::new_for_peer( - PeerHost::from_socketaddr(addr), - "POST".into(), - "/v2/mempool/query".into(), - HttpRequestContents::new() - .query_arg("page_id".into(), format!("{}", &page_id)) - .payload_stacks(&sync_data), - )?; - - let event_id = self.connect_or_send_http_request(url.clone(), addr.clone(), request)?; - return Ok((false, Some(event_id))); - } - - /// Receive the mempool sync response. - /// Return Ok(true, ..) if we're done with the mempool sync. - /// Return Ok(false, ..) if we have more work to do. - /// Returns the page ID of the next request to make, and the list of transactions we got - #[cfg_attr(test, mutants::skip)] - fn mempool_sync_recv_response( - &mut self, - event_id: usize, - ) -> Result<(bool, Option, Option>), net_error> { - PeerNetwork::with_http(self, |network, http| { - match http.get_conversation(event_id) { - None => { - if http.is_connecting(event_id) { - debug!( - "{:?}: Mempool sync event {} is not connected yet", - &network.local_peer, event_id - ); - return Ok((false, None, None)); - } else { - // conversation died - debug!("{:?}: Mempool sync peer hung up", &network.local_peer); - return Ok((true, None, None)); - } - } - Some(ref mut convo) => { - match convo.try_get_response() { - None => { - // still waiting - debug!( - "{:?}: Mempool sync event {} still waiting for a response", - &network.local_peer, event_id - ); - return Ok((false, None, None)); - } - Some(http_response) => match http_response.decode_mempool_txs_page() { - Ok((txs, page_id_opt)) => { - debug!("{:?}: Mempool sync received response for {} txs, next page {:?}", &network.local_peer, txs.len(), &page_id_opt); - return Ok((true, page_id_opt, Some(txs))); - } - Err(e) => { - warn!( - "{:?}: Mempool sync request did not receive a txs page: {:?}", - &network.local_peer, &e - ); - return Ok((true, None, None)); - } - }, - } - } - } - }) - } - - /// Do a mempool sync - /// Return true if we're done and can advance to the next state. - /// Returns the transactions as well if the sync ran to completion. - #[cfg_attr(test, mutants::skip)] - fn do_mempool_sync( - &mut self, - dns_client_opt: &mut Option<&mut DNSClient>, - mempool: &MemPoolDB, - ) -> (bool, Option>) { - if get_epoch_time_secs() <= self.mempool_sync_deadline { - debug!( - "{:?}: Wait until {} to do a mempool sync", - &self.local_peer, self.mempool_sync_deadline - ); - return (true, None); - } - - if self.mempool_sync_timeout == 0 { - // begin new sync - self.mempool_sync_timeout = - get_epoch_time_secs() + self.connection_opts.mempool_sync_timeout; - } else { - if get_epoch_time_secs() > self.mempool_sync_timeout { - debug!( - "{:?}: Mempool sync took too long; terminating", - &self.local_peer - ); - self.mempool_sync_reset(); - return (true, None); - } - } - - // try advancing states until we get blocked. - // Once we get blocked, return. - loop { - let cur_state = self.mempool_state.clone(); - debug!( - "{:?}: Mempool sync state is {:?}", - &self.local_peer, &cur_state - ); - match cur_state { - MempoolSyncState::PickOutboundPeer => { - // 1. pick a random outbound conversation. - match self.mempool_sync_pick_outbound_peer(dns_client_opt, &Txid([0u8; 32])) { - Ok(Some(next_state)) => { - // success! can advance to either resolve a URL or to send a query - self.mempool_state = next_state; - } - Ok(None) => { - // done - self.mempool_sync_reset(); - return (true, None); - } - Err(e) => { - // done; need reset - warn!("mempool_sync_pick_outbound_peer returned {:?}", &e); - self.mempool_sync_reset(); - return (true, None); - } - } - } - MempoolSyncState::ResolveURL(ref url_str, ref dns_request, ref page_id) => { - // 2. resolve its data URL - match self.mempool_sync_resolve_data_url(url_str, dns_request, dns_client_opt) { - Ok((false, Some(addr))) => { - // success! advance - self.mempool_state = - MempoolSyncState::SendQuery(url_str.clone(), addr, page_id.clone()); - } - Ok((false, None)) => { - // try again later - return (false, None); - } - Ok((true, _)) => { - // done - self.mempool_sync_reset(); - return (true, None); - } - Err(e) => { - // failed - warn!( - "mempool_sync_resolve_data_url({}) failed: {:?}", - url_str, &e - ); - self.mempool_sync_reset(); - return (true, None); - } - } - } - MempoolSyncState::SendQuery(ref url, ref addr, ref page_id) => { - // 3. ask for the remote peer's mempool's novel txs - debug!( - "{:?}: Mempool sync will query {} for mempool transactions at {}", - &self.local_peer, url, page_id - ); - match self.mempool_sync_send_query(url, addr, mempool, page_id.clone()) { - Ok((false, Some(event_id))) => { - // success! advance - debug!("{:?}: Mempool sync query {} for mempool transactions at {} on event {}", &self.local_peer, url, page_id, event_id); - self.mempool_state = - MempoolSyncState::RecvResponse(url.clone(), addr.clone(), event_id); - } - Ok((false, None)) => { - // try again later - return (false, None); - } - Ok((true, _)) => { - // done - self.mempool_sync_reset(); - return (true, None); - } - Err(e) => { - // done - warn!("mempool_sync_send_query({}) returned {:?}", url, &e); - self.mempool_sync_reset(); - return (true, None); - } - } - } - MempoolSyncState::RecvResponse(ref url, ref addr, ref event_id) => { - match self.mempool_sync_recv_response(*event_id) { - Ok((true, next_page_id_opt, Some(txs))) => { - debug!( - "{:?}: Mempool sync received {} transactions; next page is {:?}", - &self.local_peer, - txs.len(), - &next_page_id_opt - ); - - // done! got data - let ret = match next_page_id_opt { - Some(next_page_id) => { - // get the next page - self.mempool_state = MempoolSyncState::SendQuery( - url.clone(), - addr.clone(), - next_page_id, - ); - false - } - None => { - // done - self.mempool_sync_reset(); - true - } - }; - return (ret, Some(txs)); - } - Ok((true, _, None)) => { - // done! did not get data - self.mempool_sync_reset(); - return (true, None); - } - Ok((false, _, None)) => { - // still receiving; try again later - return (false, None); - } - Ok((false, _, Some(_))) => { - // should never happen - if cfg!(test) { - panic!("Reached invalid state in {:?}, aborting...", &cur_state); - } - warn!("Reached invalid state in {:?}, resetting...", &cur_state); - self.mempool_sync_reset(); - return (true, None); - } - Err(e) => { - // likely a network error - warn!("mempool_sync_recv_response returned {:?}", &e); - self.mempool_sync_reset(); - return (true, None); - } - } - } - } - } - } - /// Do the actual work in the state machine. /// Return true if we need to prune connections. /// This will call the epoch-appropriate network worker @@ -5191,7 +4689,7 @@ impl PeerNetwork { // In parallel, do a mempool sync. // Remember any txs we get, so we can feed them to the relayer thread. - if let Some(mut txs) = self.do_network_mempool_sync(&mut dns_client_opt, mempool, ibd) { + if let Some(mut txs) = self.run_mempool_sync(&mut dns_client_opt, mempool, ibd) { network_result.synced_transactions.append(&mut txs); } @@ -5950,847 +5448,6 @@ mod test { }) } - #[test] - fn test_mempool_sync_2_peers() { - // peer 1 gets some transactions; verify peer 2 gets the recent ones and not the old - // ones - let mut peer_1_config = TestPeerConfig::new(function_name!(), 2210, 2211); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 2212, 2213); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - peer_1_config.connection_opts.mempool_sync_interval = 1; - peer_2_config.connection_opts.mempool_sync_interval = 1; - - let num_txs = 10; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); - let initial_balances: Vec<_> = addrs - .iter() - .map(|a| (a.to_account_principal(), 1000000000)) - .collect(); - - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances.clone(); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..(num_blocks / 2) { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let addr = StacksAddress { - version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - bytes: Hash160([0xff; 20]), - }; - - // old transactions - let num_txs = 10; - let mut old_txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - for i in 0..num_txs { - let pk = &pks[i]; - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); - - let tx = tx_signer.get_tx().unwrap(); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - old_txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - (num_blocks / 2) as u64, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - // keep mining to make these txs old - for i in (num_blocks / 2)..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - let mut txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - for i in 0..num_txs { - let pk = &pks[i]; - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(1); - - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); - - let tx = tx_signer.get_tx().unwrap(); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - num_blocks as u64, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - let mut round = 0; - let mut peer_1_mempool_txs = 0; - let mut peer_2_mempool_txs = 0; - - while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { - if let Ok(mut result) = peer_1.step_with_ibd(false) { - let lp = peer_1.network.local_peer.clone(); - let burnchain = peer_1.network.burnchain.clone(); - peer_1 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - &burnchain, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - if let Ok(mut result) = peer_2.step_with_ibd(false) { - let lp = peer_2.network.local_peer.clone(); - let burnchain = peer_2.network.burnchain.clone(); - peer_2 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - &burnchain, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - round += 1; - - let mp = peer_1.mempool.take().unwrap(); - peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_1.mempool.replace(mp); - - let mp = peer_2.mempool.take().unwrap(); - peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_2.mempool.replace(mp); - - info!( - "Peer 1: {}, Peer 2: {}", - peer_1_mempool_txs, peer_2_mempool_txs - ); - } - - info!("Completed mempool sync in {} step(s)", round); - - let mp = peer_2.mempool.take().unwrap(); - let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); - peer_2.mempool.replace(mp); - - // peer 2 has all the recent txs - // peer 2 has none of the old ones - for tx in peer_2_mempool_txs { - assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); - assert!(old_txs.get(&tx.tx.txid()).is_none()); - } - } - - #[test] - fn test_mempool_sync_2_peers_paginated() { - // peer 1 gets some transactions; verify peer 2 gets them all - let mut peer_1_config = TestPeerConfig::new(function_name!(), 2214, 2215); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 2216, 2217); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - peer_1_config.connection_opts.mempool_sync_interval = 1; - peer_2_config.connection_opts.mempool_sync_interval = 1; - - let num_txs = 1024; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); - let initial_balances: Vec<_> = addrs - .iter() - .map(|a| (a.to_account_principal(), 1000000000)) - .collect(); - - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances.clone(); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let addr = StacksAddress { - version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - bytes: Hash160([0xff; 20]), - }; - - // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - for i in 0..num_txs { - let pk = &pks[i]; - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); - - let tx = tx_signer.get_tx().unwrap(); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - num_blocks, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - let mut round = 0; - let mut peer_1_mempool_txs = 0; - let mut peer_2_mempool_txs = 0; - - while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { - if let Ok(mut result) = peer_1.step_with_ibd(false) { - let lp = peer_1.network.local_peer.clone(); - let burnchain = peer_1.network.burnchain.clone(); - peer_1 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - &burnchain, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - if let Ok(mut result) = peer_2.step_with_ibd(false) { - let lp = peer_2.network.local_peer.clone(); - let burnchain = peer_2.network.burnchain.clone(); - peer_2 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - &burnchain, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - round += 1; - - let mp = peer_1.mempool.take().unwrap(); - peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_1.mempool.replace(mp); - - let mp = peer_2.mempool.take().unwrap(); - peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_2.mempool.replace(mp); - - info!( - "Peer 1: {}, Peer 2: {}", - peer_1_mempool_txs, peer_2_mempool_txs - ); - } - - info!("Completed mempool sync in {} step(s)", round); - - let mp = peer_2.mempool.take().unwrap(); - let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); - peer_2.mempool.replace(mp); - - for tx in peer_2_mempool_txs { - assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); - } - } - - #[test] - fn test_mempool_sync_2_peers_blacklisted() { - // peer 1 gets some transactions; peer 2 blacklists some of them; - // verify peer 2 gets only the non-blacklisted ones. - let mut peer_1_config = TestPeerConfig::new(function_name!(), 2218, 2219); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 2220, 2221); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - peer_1_config.connection_opts.mempool_sync_interval = 1; - peer_2_config.connection_opts.mempool_sync_interval = 1; - - let num_txs = 1024; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); - let initial_balances: Vec<_> = addrs - .iter() - .map(|a| (a.to_account_principal(), 1000000000)) - .collect(); - - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances.clone(); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let addr = StacksAddress { - version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - bytes: Hash160([0xff; 20]), - }; - - // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - let mut peer_2_blacklist = vec![]; - for i in 0..num_txs { - let pk = &pks[i]; - let mut tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0x80000000, - auth: TransactionAuth::from_p2pkh(&pk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::TokenTransfer( - addr.to_account_principal(), - 123, - TokenTransferMemo([0u8; 34]), - ), - }; - tx.set_tx_fee(1000); - tx.set_origin_nonce(0); - - let mut tx_signer = StacksTransactionSigner::new(&tx); - tx_signer.sign_origin(&pk).unwrap(); - - let tx = tx_signer.get_tx().unwrap(); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - num_blocks, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - - if i % 2 == 0 { - // peer 2 blacklists even-numbered txs - peer_2_blacklist.push(txid); - } - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - // peer 2 blacklists them all - let mut peer_2_mempool = peer_2.mempool.take().unwrap(); - - // blacklisted txs never time out - peer_2_mempool.blacklist_timeout = u64::MAX / 2; - - let mempool_tx = peer_2_mempool.tx_begin().unwrap(); - MemPoolDB::inner_blacklist_txs(&mempool_tx, &peer_2_blacklist, get_epoch_time_secs()) - .unwrap(); - mempool_tx.commit().unwrap(); - - peer_2.mempool = Some(peer_2_mempool); - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - let mut round = 0; - let mut peer_1_mempool_txs = 0; - let mut peer_2_mempool_txs = 0; - - while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs / 2 { - if let Ok(mut result) = peer_1.step_with_ibd(false) { - let lp = peer_1.network.local_peer.clone(); - let burnchain = peer_1.network.burnchain.clone(); - peer_1 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - &burnchain, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - if let Ok(mut result) = peer_2.step_with_ibd(false) { - let lp = peer_2.network.local_peer.clone(); - let burnchain = peer_2.network.burnchain.clone(); - peer_2 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - &burnchain, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - round += 1; - - let mp = peer_1.mempool.take().unwrap(); - peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_1.mempool.replace(mp); - - let mp = peer_2.mempool.take().unwrap(); - peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_2.mempool.replace(mp); - - info!( - "Peer 1: {}, Peer 2: {}", - peer_1_mempool_txs, peer_2_mempool_txs - ); - } - - info!("Completed mempool sync in {} step(s)", round); - - let mp = peer_2.mempool.take().unwrap(); - let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); - peer_2.mempool.replace(mp); - - for tx in peer_2_mempool_txs { - assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); - assert!(!peer_2_blacklist.contains(&tx.tx.txid())); - } - } - - /// Make sure mempool sync never stores problematic transactions - #[test] - fn test_mempool_sync_2_peers_problematic() { - // peer 1 gets some transactions; peer 2 blacklists them all due to being invalid. - // verify peer 2 stores nothing. - let mut peer_1_config = TestPeerConfig::new(function_name!(), 2218, 2219); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 2220, 2221); - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - peer_1_config.connection_opts.mempool_sync_interval = 1; - peer_2_config.connection_opts.mempool_sync_interval = 1; - - let num_txs = 128; - let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); - let initial_balances: Vec<_> = addrs - .iter() - .map(|a| (a.to_account_principal(), 1000000000)) - .collect(); - - peer_1_config.initial_balances = initial_balances.clone(); - peer_2_config.initial_balances = initial_balances.clone(); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - for i in 0..num_blocks { - let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - peer_1.next_burnchain_block(burn_ops.clone()); - peer_2.next_burnchain_block(burn_ops.clone()); - - peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - } - - let addr = StacksAddress { - version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - bytes: Hash160([0xff; 20]), - }; - - // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); - let mut peer_1_mempool = peer_1.mempool.take().unwrap(); - let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); - for i in 0..num_txs { - let pk = &pks[i]; - - let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); - - let tx = make_contract_tx( - &pk, - 0, - (tx_exceeds_body.len() * 100) as u64, - "test-exceeds", - &tx_exceeds_body, - ); - - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let origin_addr = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); - let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); - let tx_fee = tx.get_tx_fee(); - - txs.insert(tx.txid(), tx.clone()); - - // should succeed - MemPoolDB::try_add_tx( - &mut mempool_tx, - peer_1.chainstate(), - &ConsensusHash([0x1 + (num_blocks as u8); 20]), - &BlockHeaderHash([0x2 + (num_blocks as u8); 32]), - txid.clone(), - tx_bytes, - tx_fee, - num_blocks, - &origin_addr, - origin_nonce, - &sponsor_addr, - sponsor_nonce, - None, - ) - .unwrap(); - - eprintln!("Added {} {}", i, &txid); - } - mempool_tx.commit().unwrap(); - peer_1.mempool = Some(peer_1_mempool); - - // blacklisted txs never time out - let mut peer_2_mempool = peer_2.mempool.take().unwrap(); - peer_2_mempool.blacklist_timeout = u64::MAX / 2; - peer_2.mempool = Some(peer_2_mempool); - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height + 1 - }; - - let mut round = 0; - let mut peer_1_mempool_txs = 0; - - while peer_1_mempool_txs < num_txs || peer_2.network.mempool_sync_txs < (num_txs as u64) { - if let Ok(mut result) = peer_1.step_with_ibd(false) { - let lp = peer_1.network.local_peer.clone(); - let burnchain = peer_1.network.burnchain.clone(); - peer_1 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - &burnchain, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - if let Ok(mut result) = peer_2.step_with_ibd(false) { - let lp = peer_2.network.local_peer.clone(); - let burnchain = peer_2.network.burnchain.clone(); - peer_2 - .with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - &burnchain, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - } - - round += 1; - - let mp = peer_1.mempool.take().unwrap(); - peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); - peer_1.mempool.replace(mp); - - info!( - "Peer 1: {}, Peer 2: {}", - peer_1_mempool_txs, peer_2.network.mempool_sync_txs - ); - } - - info!("Completed mempool sync in {} step(s)", round); - - let mp = peer_2.mempool.take().unwrap(); - let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); - peer_2.mempool.replace(mp); - - assert_eq!(peer_2_mempool_txs.len(), 128); - } - #[test] fn test_is_connecting() { let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); From 8b2a5cf442ffcc4ba8cc7bb5a25ce713ff34e5d2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 12 Jul 2024 17:30:46 -0400 Subject: [PATCH 0511/1400] chore: API sync --- stackslib/src/chainstate/stacks/miner.rs | 2 -- stackslib/src/chainstate/stacks/tests/block_construction.rs | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 41ae4a2646..4495fb36b9 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1203,7 +1203,6 @@ impl<'a> StacksMicroblockBuilder<'a> { intermediate_result = mem_pool.iterate_candidates( &mut clarity_tx, &mut tx_events, - self.anchor_block_height, mempool_settings.clone(), |clarity_tx, to_consider, estimator| { let mempool_tx = &to_consider.tx; @@ -2210,7 +2209,6 @@ impl StacksBlockBuilder { intermediate_result = mempool.iterate_candidates( epoch_tx, &mut tx_events, - tip_height, mempool_settings.clone(), |epoch_tx, to_consider, estimator| { // first, have we been preempted? diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 56ae3d8d52..4194207840 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -3060,7 +3060,7 @@ fn test_build_microblock_stream_forks_with_descendants() { // erase any pending transactions -- this is a "worse" poison-microblock, // and we want to avoid mining the "better" one - mempool.clear_before_height(10).unwrap(); + mempool.clear_before_coinbase_height(10).unwrap(); let mut tx_bytes = vec![]; poison_microblock_tx @@ -4784,6 +4784,7 @@ fn paramaterized_mempool_walk_test( &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -4832,7 +4833,6 @@ fn paramaterized_mempool_walk_test( .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; From b93b103ecc491a500bdef3eb351cbba59a078c8e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 12 Jul 2024 17:30:57 -0400 Subject: [PATCH 0512/1400] feat: make mempool state compatible with nakamoto by treating `height` as the coinbase height, and `consensus_hash` and `block_hash` as the tenure-start consensus hash and block hash --- stackslib/src/core/mempool.rs | 300 +++++++++++++++++----------------- 1 file changed, 149 insertions(+), 151 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 8e8912f569..a85ffd7327 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -301,7 +301,7 @@ pub struct MemPoolAdmitter { enum MemPoolWalkResult { Chainstate(ConsensusHash, BlockHeaderHash, u64, u64), - NoneAtHeight(ConsensusHash, BlockHeaderHash, u64), + NoneAtCoinbaseHeight(ConsensusHash, BlockHeaderHash, u64), Done, } @@ -432,9 +432,19 @@ pub struct MemPoolTxMetadata { pub txid: Txid, pub len: u64, pub tx_fee: u64, - pub consensus_hash: ConsensusHash, - pub block_header_hash: BlockHeaderHash, - pub block_height: u64, + /// The tenure ID in which this transaction was accepted. + /// In epoch 2.x, this is the consensus hash of the sortition that chose the Stacks block + /// In Nakamoto, this is the consensus hash of the ongoing tenure. + pub tenure_consensus_hash: ConsensusHash, + /// The tenure block in which this transaction was accepted. + /// In epoch 2.x, this is the hash of the Stacks block produced in the sortition. + /// In Nakamoto, this is the hash of the tenure-start block. + pub tenure_block_header_hash: BlockHeaderHash, + /// The number of coinbases that have transpired at the time of this transaction's acceptance. + /// In epoch 2.x, this is the same as the Stacks block height + /// In Nakamoto, this is the simply the number of coinbases produced in the history tipped at + /// `tenure_consensus_hash` and `tenure_block_header_hash` + pub coinbase_height: u64, pub origin_address: StacksAddress, pub origin_nonce: u64, pub sponsor_address: StacksAddress, @@ -564,10 +574,10 @@ impl FromRow for Txid { impl FromRow for MemPoolTxMetadata { fn from_row<'a>(row: &'a Row) -> Result { let txid = Txid::from_column(row, "txid")?; - let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; - let block_header_hash = BlockHeaderHash::from_column(row, "block_header_hash")?; + let tenure_consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?; + let tenure_block_header_hash = BlockHeaderHash::from_column(row, "block_header_hash")?; let tx_fee = u64::from_column(row, "tx_fee")?; - let block_height = u64::from_column(row, "height")?; + let coinbase_height = u64::from_column(row, "height")?; let len = u64::from_column(row, "length")?; let accept_time = u64::from_column(row, "accept_time")?; let origin_address = StacksAddress::from_column(row, "origin_address")?; @@ -581,9 +591,9 @@ impl FromRow for MemPoolTxMetadata { txid, len, tx_fee, - consensus_hash, - block_header_hash, - block_height, + tenure_consensus_hash, + tenure_block_header_hash, + coinbase_height, origin_address, origin_nonce, sponsor_address, @@ -657,8 +667,13 @@ const MEMPOOL_INITIAL_SCHEMA: &'static [&'static str] = &[r#" tx_fee INTEGER NOT NULL, length INTEGER NOT NULL, consensus_hash TEXT NOT NULL, + -- In epoch2x, this is the Stacks tip block hash at the time of this tx's arrival. + -- In Nakamoto, this is the tenure-start block hash of the ongoing tenure at the time of this tx's arrival. block_header_hash TEXT NOT NULL, - height INTEGER NOT NULL, -- stacks block height + -- This is the *coinbase height* of the chain tip above. + -- In epoch2x (when this schema was written), this also happened to be the block height; hence the name. + -- In Nakamoto, this is not a block height any longer. + height INTEGER NOT NULL, accept_time INTEGER NOT NULL, tx BLOB NOT NULL, PRIMARY KEY (txid), @@ -855,15 +870,18 @@ impl<'a> MemPoolTx<'a> { self.tx.commit().map_err(db_error::SqliteError) } - /// Remove all txids at the given height from the bloom counter. + /// Remove all txids at the given coinbase height from the bloom counter. /// Used to clear out txids that are now outside the bloom counter's depth. - fn prune_bloom_counter(&mut self, target_height: u64) -> Result<(), MemPoolRejection> { + fn prune_bloom_counter(&mut self, target_coinbase_height: u64) -> Result<(), MemPoolRejection> { let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height = ?1"; - let args = params![u64_to_sql(target_height)?]; + let args = params![u64_to_sql(target_coinbase_height)?]; let txids: Vec = query_rows(&self.tx, sql, args)?; let _num_txs = txids.len(); - test_debug!("Prune bloom counter from height {}", target_height); + test_debug!( + "Prune bloom counter from coinbase height {}", + target_coinbase_height + ); // keep borrow-checker happy MemPoolTx::with_bloom_state(self, |ref mut dbtx, ref mut bloom_counter| { @@ -880,8 +898,8 @@ impl<'a> MemPoolTx<'a> { })?; test_debug!( - "Pruned bloom filter at height {}: removed {} txs", - target_height, + "Pruned bloom filter at coinbase height {}: removed {} txs", + target_coinbase_height, _num_txs ); Ok(()) @@ -889,26 +907,26 @@ impl<'a> MemPoolTx<'a> { /// Add the txid to the bloom counter in the mempool DB, optionally replacing a prior /// transaction (identified by prior_txid) if the bloom counter is full. - /// If this is the first txid at this block height, then also garbage-collect the bloom counter to remove no-longer-recent transactions. + /// If this is the first txid at this coinbase height, then also garbage-collect the bloom counter to remove no-longer-recent transactions. /// If the bloom counter is saturated -- i.e. it represents more than MAX_BLOOM_COUNTER_TXS /// transactions -- then pick another transaction to evict from the bloom filter and return its txid. /// (Note that no transactions are ever removed from the mempool; we just don't prioritize them /// in the bloom filter). fn update_bloom_counter( &mut self, - height: u64, + coinbase_height: u64, txid: &Txid, prior_txid: Option, ) -> Result, MemPoolRejection> { - // is this the first-ever txid at this height? + // is this the first-ever txid at this coinbase height? let sql = "SELECT 1 FROM mempool WHERE height = ?1"; - let args = params![u64_to_sql(height)?]; + let args = params![u64_to_sql(coinbase_height)?]; let present: Option = query_row(&self.tx, sql, args)?; - if present.is_none() && height > (BLOOM_COUNTER_DEPTH as u64) { - // this is the first-ever tx at this height. + if present.is_none() && coinbase_height > (BLOOM_COUNTER_DEPTH as u64) { + // this is the first-ever tx at this coinbase height. // which means, the bloom filter window has advanced. // which means, we need to remove all the txs that are now out of the window. - self.prune_bloom_counter(height - (BLOOM_COUNTER_DEPTH as u64))?; + self.prune_bloom_counter(coinbase_height - (BLOOM_COUNTER_DEPTH as u64))?; } MemPoolTx::with_bloom_state(self, |ref mut dbtx, ref mut bloom_counter| { @@ -926,7 +944,7 @@ impl<'a> MemPoolTx<'a> { // deprioritized) let sql = "SELECT a.txid FROM mempool AS a LEFT OUTER JOIN removed_txids AS b ON a.txid = b.txid WHERE b.txid IS NULL AND a.height > ?1 ORDER BY a.tx_fee ASC LIMIT 1"; let args = params![u64_to_sql( - height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), + coinbase_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64), )?]; let evict_txid: Option = query_row(&dbtx, sql, args)?; if let Some(evict_txid) = evict_txid { @@ -971,46 +989,6 @@ impl<'a> MemPoolTx<'a> { } } -impl MemPoolTxInfo { - pub fn from_tx( - tx: StacksTransaction, - consensus_hash: ConsensusHash, - block_header_hash: BlockHeaderHash, - block_height: u64, - ) -> MemPoolTxInfo { - let txid = tx.txid(); - let mut tx_data = vec![]; - tx.consensus_serialize(&mut tx_data) - .expect("BUG: failed to serialize to vector"); - - let origin_address = tx.origin_address(); - let origin_nonce = tx.get_origin_nonce(); - let (sponsor_address, sponsor_nonce) = - if let (Some(addr), Some(nonce)) = (tx.sponsor_address(), tx.get_sponsor_nonce()) { - (addr, nonce) - } else { - (origin_address.clone(), origin_nonce) - }; - - let metadata = MemPoolTxMetadata { - txid, - len: tx_data.len() as u64, - tx_fee: tx.get_tx_fee(), - consensus_hash, - block_header_hash, - block_height, - origin_address, - origin_nonce, - sponsor_address, - sponsor_nonce, - accept_time: get_epoch_time_secs(), - last_known_origin_nonce: None, - last_known_sponsor_nonce: None, - }; - MemPoolTxInfo { tx, metadata } - } -} - /// Used to locally cache nonces to avoid repeatedly looking them up in the nonce. struct NonceCache { cache: HashMap, @@ -1496,8 +1474,8 @@ impl MemPoolDB { /// Find the origin addresses who have sent the highest-fee transactions fn find_origin_addresses_by_descending_fees( &self, - start_height: i64, - end_height: i64, + start_coinbase_height: i64, + end_coinbase_height: i64, min_fees: u64, offset: u32, count: u32, @@ -1505,8 +1483,8 @@ impl MemPoolDB { let sql = "SELECT DISTINCT origin_address FROM mempool WHERE height > ?1 AND height <= ?2 AND tx_fee >= ?3 ORDER BY tx_fee DESC LIMIT ?4 OFFSET ?5"; let args = params![ - start_height, - end_height, + start_coinbase_height, + end_coinbase_height, u64_to_sql(min_fees)?, count, offset, @@ -1612,7 +1590,6 @@ impl MemPoolDB { &mut self, clarity_tx: &mut C, output_events: &mut Vec, - _tip_height: u64, settings: MemPoolWalkSettings, mut todo: F, ) -> Result @@ -1977,28 +1954,8 @@ impl MemPoolDB { Ok(rows.len()) } - /// Get all transactions at a particular timestamp on a given chain tip. - /// Order them by origin nonce. - pub fn get_txs_at( - conn: &DBConn, - consensus_hash: &ConsensusHash, - block_header_hash: &BlockHeaderHash, - timestamp: u64, - ) -> Result, db_error> { - let sql = "SELECT * FROM mempool WHERE accept_time = ?1 AND consensus_hash = ?2 AND block_header_hash = ?3 ORDER BY origin_nonce ASC"; - let args = params![u64_to_sql(timestamp)?, consensus_hash, block_header_hash]; - let rows = query_rows::(conn, &sql, args)?; - Ok(rows) - } - - /// Given a chain tip, find the highest block-height from _before_ this tip - pub fn get_previous_block_height(conn: &DBConn, height: u64) -> Result, db_error> { - let sql = "SELECT height FROM mempool WHERE height < ?1 ORDER BY height DESC LIMIT 1"; - let args = params![u64_to_sql(height)?]; - query_row(conn, sql, args) - } - /// Get a number of transactions after a given timestamp on a given chain tip. + #[cfg(test)] pub fn get_txs_after( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -2048,6 +2005,9 @@ impl MemPoolDB { query_row(conn, &sql, args) } + /// Are the given fully-qualified blocks, identified by their (consensus-hash, block-header-hash) pairs, in the same fork? + /// That is, is one block an ancestor of another? + /// TODO: Nakamoto-ize fn are_blocks_in_same_fork( chainstate: &mut StacksChainState, first_consensus_hash: &ConsensusHash, @@ -2080,17 +2040,33 @@ impl MemPoolDB { /// Add a transaction to the mempool. If it already exists, then replace it if the given fee /// is higher than the one that's already there. /// Carry out the mempool admission test before adding. + /// + /// `tip_consensus_hash`, `tip_block_header_hash`, and `coinbase_height` describe the fork that + /// was canonical when this transaction is added. While `coinbase_height` would be derived + /// from these first two fields, it is supplied independently to facilitate testing. + /// + /// If this is called in the Nakamoto epoch -- i.e. if `tip_consensus_hash` is in the Nakamoto + /// epoch -- then these tip hashes will be resolved to the tenure-start hashes first. This is + /// because in Nakamoto, we index transactions by tenure-start blocks since they directly + /// correspond to epoch 2.x Stacks blocks (meaning, the semantics of mempool sync are preserved + /// across epoch 2.x and Nakamoto as long as we treat transactions this way). In both epochs, + /// transactions arrive during a miner's tenure, not during a particular block's status as + /// the canonical chain tip. + /// + /// The tenure resolution behavior can be short-circuited with `resolve_tenure = false`. + /// However, this is only used in testing. + /// /// Don't call directly; use submit(). - /// This is `pub` only for testing. - pub fn try_add_tx( + pub(crate) fn try_add_tx( tx: &mut MemPoolTx, chainstate: &mut StacksChainState, - consensus_hash: &ConsensusHash, - block_header_hash: &BlockHeaderHash, + tip_consensus_hash: &ConsensusHash, + tip_block_header_hash: &BlockHeaderHash, + resolve_tenure: bool, txid: Txid, tx_bytes: Vec, tx_fee: u64, - height: u64, + coinbase_height: u64, origin_address: &StacksAddress, origin_nonce: u64, sponsor_address: &StacksAddress, @@ -2099,6 +2075,32 @@ impl MemPoolDB { ) -> Result<(), MemPoolRejection> { let length = tx_bytes.len() as u64; + // this transaction is said to arrive during this _tenure_, not during this _block_. + // In epoch 2.x, these are the same as `tip_consensus_hash` and `tip_block_header_hash`. + // In Nakamoto, they may be different. + // + // The only exception to this rule is if `tip_consensus_hash` and `tip_block_header_hash` + // are `FIRST_BURNCHAIN_CONSENSUS_HASH` and `FIRST_STACKS_BLOCK_HASH` -- in this case, + // there's no need to find the tenure-start header + let (consensus_hash, block_header_hash) = if resolve_tenure { + let tenure_start_header = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &StacksBlockId::new(tip_consensus_hash, tip_block_header_hash), + tip_consensus_hash, + ) + .map_err(|e| MemPoolRejection::FailedToValidate(e))? + .ok_or(MemPoolRejection::NoSuchChainTip( + tip_consensus_hash.clone(), + tip_block_header_hash.clone(), + ))?; + + let consensus_hash = tenure_start_header.consensus_hash; + let block_header_hash = tenure_start_header.anchored_header.block_hash(); + (consensus_hash, block_header_hash) + } else { + (tip_consensus_hash.clone(), tip_block_header_hash.clone()) + }; + // do we already have txs with either the same origin nonce or sponsor nonce ? let prior_tx = { match MemPoolDB::get_tx_metadata_by_address(tx, true, origin_address, origin_nonce)? { @@ -2126,10 +2128,10 @@ impl MemPoolDB { true } else if !MemPoolDB::are_blocks_in_same_fork( chainstate, - &prior_tx.consensus_hash, - &prior_tx.block_header_hash, - consensus_hash, - block_header_hash, + &prior_tx.tenure_consensus_hash, + &prior_tx.tenure_block_header_hash, + &consensus_hash, + &block_header_hash, )? { // is this a replace-across-fork ? debug!( @@ -2160,7 +2162,11 @@ impl MemPoolDB { return Err(MemPoolRejection::ConflictingNonceInMempool); } - tx.update_bloom_counter(height, &txid, prior_tx.as_ref().map(|tx| tx.txid.clone()))?; + tx.update_bloom_counter( + coinbase_height, + &txid, + prior_tx.as_ref().map(|tx| tx.txid.clone()), + )?; let sql = "INSERT OR REPLACE INTO mempool ( txid, @@ -2187,7 +2193,7 @@ impl MemPoolDB { u64_to_sql(length)?, consensus_hash, block_header_hash, - u64_to_sql(height)?, + u64_to_sql(coinbase_height)?, u64_to_sql(get_epoch_time_secs())?, tx_bytes, ]; @@ -2215,10 +2221,12 @@ impl MemPoolDB { let tx = self.tx_begin()?; match behavior { MempoolCollectionBehavior::ByStacksHeight => { + // NOTE: this is the epoch2x behavior, so `chain_height` is 1-to-1 with coinbase + // height. This will not be true in Nakamoto! let Some(min_height) = chain_height.checked_sub(MEMPOOL_MAX_TRANSACTION_AGE) else { return Ok(()); }; - Self::garbage_collect_by_height(&tx, min_height, event_observer)?; + Self::garbage_collect_by_coinbase_height(&tx, min_height, event_observer)?; } MempoolCollectionBehavior::ByReceiveTime => { Self::garbage_collect_by_time( @@ -2253,14 +2261,14 @@ impl MemPoolDB { Ok(()) } - /// Garbage-collect the mempool. Remove transactions that were received `min_height` + /// Garbage-collect the mempool. Remove transactions that were received `min_coinbase_height` /// blocks ago. - pub fn garbage_collect_by_height( + pub fn garbage_collect_by_coinbase_height( tx: &MemPoolTx, - min_height: u64, + min_coinbase_height: u64, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), db_error> { - let args = params![u64_to_sql(min_height)?]; + let args = params![u64_to_sql(min_coinbase_height)?]; if let Some(event_observer) = event_observer { let sql = "SELECT txid FROM mempool WHERE height < ?1"; @@ -2276,41 +2284,17 @@ impl MemPoolDB { } #[cfg(test)] - pub fn clear_before_height(&mut self, min_height: u64) -> Result<(), db_error> { + pub fn clear_before_coinbase_height( + &mut self, + min_coinbase_height: u64, + ) -> Result<(), db_error> { let tx = self.tx_begin()?; - MemPoolDB::garbage_collect_by_height(&tx, min_height, None)?; + MemPoolDB::garbage_collect_by_coinbase_height(&tx, min_coinbase_height, None)?; tx.commit() } - /// Scan the chain tip for all available transactions (but do not remove them!) - pub fn poll( - &mut self, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - ) -> Vec { - test_debug!("Mempool poll at {}/{}", consensus_hash, block_hash); - MemPoolDB::get_txs_after( - &self.db, - consensus_hash, - block_hash, - 0, - (i64::MAX - 1) as u64, - ) - .unwrap_or(vec![]) - .into_iter() - .map(|tx_info| { - test_debug!( - "Mempool poll {} at {}/{}", - &tx_info.tx.txid(), - consensus_hash, - block_hash - ); - tx_info.tx - }) - .collect() - } - /// Submit a transaction to the mempool at a particular chain tip. + /// TODO: Nakamoto-ize fn tx_submit( mempool_tx: &mut MemPoolTx, chainstate: &mut StacksChainState, @@ -2330,7 +2314,8 @@ impl MemPoolDB { ); let block_id = StacksBlockId::new(consensus_hash, block_hash); - let height = match NakamotoChainState::get_block_header(chainstate.db(), &block_id) { + let coinbase_height = match NakamotoChainState::get_block_header(chainstate.db(), &block_id) + { Ok(Some(header)) => header.stacks_block_height, Ok(None) => { if *consensus_hash == FIRST_BURNCHAIN_CONSENSUS_HASH { @@ -2380,10 +2365,11 @@ impl MemPoolDB { chainstate, &consensus_hash, &block_hash, + true, txid.clone(), tx_data, tx_fee, - height, + coinbase_height, &origin_address, origin_nonce, &sponsor_address, @@ -2405,7 +2391,20 @@ impl MemPoolDB { Ok(()) } - /// One-shot submit + /// One-shot transaction submit. + /// + /// Transactions are indexed relative to a chain tip, identified by `consensus_hash` and + /// `block_hash`. These fields have slightly different interpretations depending on what epoch + /// we're in: + /// * In epoch 2.x, these are the Stacks chain tip. + /// * In Nakamoto, these will be resolved to the tenure-start block of the tenure in which this + /// Stacks block lies. The reason for this is because of how the mempool performs + /// garbage collection in its DB and bloom filter -- the latter of which is used for mempool + /// sync. + /// + /// No action is required by te caller to handle this discrepancy; the caller should just submit + /// the canonical Stacks tip. If the current epoch is a Nakamoto epoch, it will be resolved to + /// the tenure-start block internally. pub fn submit( &mut self, chainstate: &mut StacksChainState, @@ -2490,8 +2489,7 @@ impl MemPoolDB { } /// Directly submit to the mempool, and don't do any admissions checks. - /// This method is only used during testing, but because it is used by the - /// integration tests, it cannot be marked #[cfg(test)]. + #[cfg(any(test, feature = "testing"))] pub fn submit_raw( &mut self, chainstate: &mut StacksChainState, @@ -2698,8 +2696,8 @@ impl MemPoolDB { self.bloom_counter.to_bloom_filter(&self.conn()) } - /// Find maximum height represented in the mempool - pub fn get_max_height(conn: &DBConn) -> Result, db_error> { + /// Find maximum Stacks coinbase height represented in the mempool. + pub fn get_max_coinbase_height(conn: &DBConn) -> Result, db_error> { let sql = "SELECT 1 FROM mempool WHERE height >= 0"; let count = query_rows::(conn, sql, NO_PARAMS)?.len(); if count == 0 { @@ -2713,7 +2711,7 @@ impl MemPoolDB { /// Get the transaction ID list that represents the set of transactions that are represented in /// the bloom counter. pub fn get_bloom_txids(&self) -> Result, db_error> { - let max_height = match MemPoolDB::get_max_height(&self.conn())? { + let max_height = match MemPoolDB::get_max_coinbase_height(&self.conn())? { Some(h) => h, None => { // mempool is empty @@ -2738,10 +2736,10 @@ impl MemPoolDB { }) } - /// How many recent transactions are there -- i.e. within BLOOM_COUNTER_DEPTH block heights of + /// How many recent transactions are there -- i.e. within BLOOM_COUNTER_DEPTH coinbase heights of /// the chain tip? pub fn get_num_recent_txs(conn: &DBConn) -> Result { - let max_height = match MemPoolDB::get_max_height(conn)? { + let max_height = match MemPoolDB::get_max_coinbase_height(conn)? { Some(h) => h, None => { // mempool is empty @@ -2778,7 +2776,7 @@ impl MemPoolDB { pub fn find_next_missing_transactions( &self, data: &MemPoolSyncData, - height: u64, + coinbase_height: u64, last_randomized_txid: &Txid, max_txs: u64, max_run: u64, @@ -2786,7 +2784,7 @@ impl MemPoolDB { Self::static_find_next_missing_transactions( self.conn(), data, - height, + coinbase_height, last_randomized_txid, max_txs, max_run, @@ -2803,7 +2801,7 @@ impl MemPoolDB { pub fn static_find_next_missing_transactions( conn: &DBConn, data: &MemPoolSyncData, - height: u64, + coinbase_height: u64, last_randomized_txid: &Txid, max_txs: u64, max_run: u64, @@ -2820,7 +2818,7 @@ impl MemPoolDB { let args = params![ last_randomized_txid, - u64_to_sql(height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, + u64_to_sql(coinbase_height.saturating_sub(BLOOM_COUNTER_DEPTH as u64))?, u64_to_sql(max_run)?, ]; From c1993125baa52212146a04f05d9a67173069ef0b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 12 Jul 2024 17:31:30 -0400 Subject: [PATCH 0513/1400] chore: API sync --- stackslib/src/core/tests/mod.rs | 58 ++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 8e70f4dcbd..3b74325f3b 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -240,6 +240,7 @@ fn mempool_walk_over_fork() { &mut chainstate, &block.0, &block.1, + true, txid, tx_bytes, tx_fee, @@ -275,7 +276,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -314,7 +314,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -352,7 +351,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 3, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -395,7 +393,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -436,7 +433,6 @@ fn mempool_walk_over_fork() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 3, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -498,6 +494,7 @@ fn mempool_walk_over_fork() { &mut chainstate, &block.0, &block.1, + true, txid, tx_bytes, tx_fee, @@ -551,6 +548,7 @@ fn mempool_walk_over_fork() { &mut chainstate, &block.0, &block.1, + true, txid, tx_bytes, tx_fee, @@ -630,6 +628,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -672,7 +671,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -710,7 +708,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -748,7 +745,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -826,6 +822,7 @@ fn test_iterate_candidates_skipped_transaction() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -850,7 +847,6 @@ fn test_iterate_candidates_skipped_transaction() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -939,6 +935,7 @@ fn test_iterate_candidates_processing_error_transaction() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -963,7 +960,6 @@ fn test_iterate_candidates_processing_error_transaction() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -1054,6 +1050,7 @@ fn test_iterate_candidates_problematic_transaction() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -1078,7 +1075,6 @@ fn test_iterate_candidates_problematic_transaction() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -1183,6 +1179,7 @@ fn test_iterate_candidates_concurrent_write_lock() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -1239,7 +1236,6 @@ fn test_iterate_candidates_concurrent_write_lock() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -1342,6 +1338,7 @@ fn mempool_do_not_replace_tx() { &mut chainstate, &b_1.0, &b_1.1, + true, txid, tx_bytes, tx_fee, @@ -1370,6 +1367,7 @@ fn mempool_do_not_replace_tx() { &mut chainstate, &b_2.0, &b_2.1, + true, txid, tx_bytes, tx_fee, @@ -1446,6 +1444,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, txid, tx_bytes, tx_fee, @@ -1471,12 +1470,15 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) assert_eq!(tx_info.metadata.origin_nonce, origin_nonce); assert_eq!(tx_info.metadata.sponsor_address, sponsor_address); assert_eq!(tx_info.metadata.sponsor_nonce, sponsor_nonce); - assert_eq!(tx_info.metadata.consensus_hash, ConsensusHash([0x1; 20])); assert_eq!( - tx_info.metadata.block_header_hash, + tx_info.metadata.tenure_consensus_hash, + ConsensusHash([0x1; 20]) + ); + assert_eq!( + tx_info.metadata.tenure_block_header_hash, BlockHeaderHash([0x2; 32]) ); - assert_eq!(tx_info.metadata.block_height, height); + assert_eq!(tx_info.metadata.coinbase_height, height); // test replace-by-fee with a higher fee let old_txid = txid; @@ -1503,6 +1505,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, txid, tx_bytes, tx_fee, @@ -1539,12 +1542,15 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) assert_eq!(tx_info.metadata.origin_nonce, origin_nonce); assert_eq!(tx_info.metadata.sponsor_address, sponsor_address); assert_eq!(tx_info.metadata.sponsor_nonce, sponsor_nonce); - assert_eq!(tx_info.metadata.consensus_hash, ConsensusHash([0x1; 20])); assert_eq!( - tx_info.metadata.block_header_hash, + tx_info.metadata.tenure_consensus_hash, + ConsensusHash([0x1; 20]) + ); + assert_eq!( + tx_info.metadata.tenure_block_header_hash, BlockHeaderHash([0x2; 32]) ); - assert_eq!(tx_info.metadata.block_height, height); + assert_eq!(tx_info.metadata.coinbase_height, height); // test replace-by-fee with a lower fee let old_txid = txid; @@ -1563,6 +1569,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, txid, tx_bytes, tx_fee, @@ -1622,7 +1629,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) let mut mempool_tx = mempool.tx_begin().unwrap(); match behavior { MempoolCollectionBehavior::ByStacksHeight => { - MemPoolDB::garbage_collect_by_height(&mut mempool_tx, 101, None) + MemPoolDB::garbage_collect_by_coinbase_height(&mut mempool_tx, 101, None) } MempoolCollectionBehavior::ByReceiveTime => { let test_max_age = Duration::from_secs(1); @@ -1712,6 +1719,7 @@ fn mempool_db_test_rbf() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, txid, tx_bytes, tx_fee, @@ -1761,6 +1769,7 @@ fn mempool_db_test_rbf() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), + false, txid, tx_bytes, tx_fee, @@ -1843,6 +1852,7 @@ fn test_add_txs_bloom_filter() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, txid, tx_bytes, tx_fee, @@ -1953,6 +1963,7 @@ fn test_txtags() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, txid, tx_bytes, tx_fee, @@ -2046,6 +2057,7 @@ fn test_make_mempool_sync_data() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, txid.clone(), tx_bytes, tx_fee, @@ -2084,7 +2096,7 @@ fn test_make_mempool_sync_data() { let recent_txids = mempool.get_bloom_txids().unwrap(); assert!(recent_txids.len() <= MAX_BLOOM_COUNTER_TXS as usize); - let max_height = MemPoolDB::get_max_height(mempool.conn()) + let max_height = MemPoolDB::get_max_coinbase_height(mempool.conn()) .unwrap() .unwrap_or(0); eprintln!( @@ -2223,6 +2235,7 @@ fn test_find_next_missing_transactions() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, txid.clone(), tx_bytes, tx_fee, @@ -2492,6 +2505,7 @@ fn test_drop_and_blacklist_txs_by_time() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, txid.clone(), tx_bytes, tx_fee, @@ -2611,6 +2625,7 @@ fn test_drop_and_blacklist_txs_by_size() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, txid.clone(), tx_bytes, tx_fee, @@ -2728,6 +2743,7 @@ fn test_filter_txs_by_type() { &mut chainstate, &b_2.0, &b_2.1, + true, txid.clone(), tx_bytes, tx_fee, @@ -2763,7 +2779,6 @@ fn test_filter_txs_by_type() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; @@ -2799,7 +2814,6 @@ fn test_filter_txs_by_type() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - 2, mempool_settings.clone(), |_, available_tx, _| { count_txs += 1; From 22ecca98f103774df60ff58babd8a85a2aba7d45 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 12 Jul 2024 17:31:42 -0400 Subject: [PATCH 0514/1400] feat: make /v2/mempool/query compatible with Nakamoto by querying Nakamoto headers for the tip --- stackslib/src/net/api/postmempoolquery.rs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 1e8caa1843..2155863220 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -29,6 +29,7 @@ use url::form_urlencoded; use {serde, serde_json}; use crate::burnchains::Txid; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{Error as ChainError, StacksTransaction}; use crate::core::mempool::{decode_tx_stream, MemPoolDB, MemPoolSyncData}; @@ -89,8 +90,8 @@ pub struct StacksMemPoolStream { pub num_txs: u64, /// maximum we can visit in the query pub max_txs: u64, - /// height of the chain at time of query - pub height: u64, + /// coinbase height of the chain at time of query + pub coinbase_height: u64, /// Are we done sending transactions, and are now in the process of sending the trailing page /// ID? pub corked: bool, @@ -105,7 +106,7 @@ impl StacksMemPoolStream { mempool_db: DBConn, tx_query: MemPoolSyncData, max_txs: u64, - height: u64, + coinbase_height: u64, page_id_opt: Option, ) -> Self { let last_randomized_txid = page_id_opt.unwrap_or_else(|| { @@ -118,7 +119,7 @@ impl StacksMemPoolStream { last_randomized_txid: last_randomized_txid, num_txs: 0, max_txs: max_txs, - height: height, + coinbase_height, corked: false, finished: false, mempool_db, @@ -159,7 +160,7 @@ impl HttpChunkGenerator for StacksMemPoolStream { MemPoolDB::static_find_next_missing_transactions( &self.mempool_db, &self.tx_query, - self.height, + self.coinbase_height, &self.last_randomized_txid, 1, remaining, @@ -275,12 +276,18 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { let page_id = self.page_id.take(); let stream_res = node.with_node_state(|network, sortdb, chainstate, mempool, _rpc_args| { - let height = self.get_stacks_chain_tip(&preamble, sortdb, chainstate).map(|hdr| hdr.anchored_header.height()).unwrap_or(0); + let header = self.get_stacks_chain_tip(&preamble, sortdb, chainstate) + .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e))))?; + + let coinbase_height = NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &header.index_block_hash()) + .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e))))? + .unwrap_or(0); + let max_txs = network.connection_opts.mempool_max_tx_query; debug!( "Begin mempool query"; "page_id" => %page_id.map(|txid| format!("{}", &txid)).unwrap_or("(none".to_string()), - "block_height" => height, + "coinbase_height" => coinbase_height, "max_txs" => max_txs ); @@ -291,7 +298,7 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { } }; - Ok(StacksMemPoolStream::new(mempool_db, mempool_query, max_txs, height, page_id)) + Ok(StacksMemPoolStream::new(mempool_db, mempool_query, max_txs, coinbase_height, page_id)) }); let stream = match stream_res { From a999518b6e56c606e884e5c66ae2d8e7b4c7c8bf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 12 Jul 2024 17:32:14 -0400 Subject: [PATCH 0515/1400] chore: move mempool tests into their own file and API sync --- stackslib/src/net/api/tests/mod.rs | 1 + .../src/net/api/tests/postmempoolquery.rs | 1 + stackslib/src/net/mod.rs | 1 + stackslib/src/net/tests/mempool/mod.rs | 906 ++++++++++++++++++ stackslib/src/net/tests/mod.rs | 1 + 5 files changed, 910 insertions(+) create mode 100644 stackslib/src/net/tests/mempool/mod.rs diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index fffce02e1a..f0a537d045 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -600,6 +600,7 @@ impl<'a> TestRPC<'a> { peer_1.chainstate(), &consensus_hash, &stacks_block.block_hash(), + true, txid.clone(), tx_bytes, tx_fee, diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index b669beb2e4..ba122be2e8 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -174,6 +174,7 @@ fn test_stream_mempool_txs() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), + false, txid.clone(), tx_bytes, tx_fee, diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index b865476fd2..e836bdfec2 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -124,6 +124,7 @@ pub mod http; /// Links http crate to Stacks pub mod httpcore; pub mod inv; +pub mod mempool; pub mod neighbors; pub mod p2p; /// Implements wrapper around `mio` crate, which itself is a wrapper around Linux's `epoll(2)` syscall. diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs new file mode 100644 index 0000000000..d44f21e8ff --- /dev/null +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -0,0 +1,906 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{thread, time}; + +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use clarity::vm::types::StacksAddressExtensions; +use clarity::vm::MAX_CALL_STACK_DEPTH; +use rand; +use rand::RngCore; +use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::util::secp256k1::Secp256k1PrivateKey; +use stacks_common::util::{log, sleep_ms}; + +use super::*; +use crate::burnchains::burnchain::*; +use crate::burnchains::*; +use crate::chainstate::stacks::test::*; +use crate::chainstate::stacks::*; +use crate::core::StacksEpochExtension; +use crate::net::atlas::*; +use crate::net::codec::*; +use crate::net::db::*; +use crate::net::test::*; +use crate::net::tests::relay::epoch2x::make_contract_tx; +use crate::net::*; +use crate::util_lib::test::*; + +#[test] +fn test_mempool_sync_2_peers() { + // peer 1 gets some transactions; verify peer 2 gets the recent ones and not the old + // ones + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 10; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..(num_blocks / 2) { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // old transactions + let num_txs = 10; + let mut old_txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + old_txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + (num_blocks / 2) as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + // keep mining to make these txs old + for i in (num_blocks / 2)..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(1); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + num_blocks as u64, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + // peer 2 has all the recent txs + // peer 2 has none of the old ones + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + assert!(old_txs.get(&tx.tx.txid()).is_none()); + } +} + +#[test] +fn test_mempool_sync_2_peers_paginated() { + // peer 1 gets some transactions; verify peer 2 gets them all + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 1024; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + num_blocks, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + } +} + +#[test] +fn test_mempool_sync_2_peers_blacklisted() { + // peer 1 gets some transactions; peer 2 blacklists some of them; + // verify peer 2 gets only the non-blacklisted ones. + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 1024; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + let mut peer_2_blacklist = vec![]; + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + num_blocks, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + + if i % 2 == 0 { + // peer 2 blacklists even-numbered txs + peer_2_blacklist.push(txid); + } + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + // peer 2 blacklists them all + let mut peer_2_mempool = peer_2.mempool.take().unwrap(); + + // blacklisted txs never time out + peer_2_mempool.blacklist_timeout = u64::MAX / 2; + + let mempool_tx = peer_2_mempool.tx_begin().unwrap(); + MemPoolDB::inner_blacklist_txs(&mempool_tx, &peer_2_blacklist, get_epoch_time_secs()).unwrap(); + mempool_tx.commit().unwrap(); + + peer_2.mempool = Some(peer_2_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs / 2 { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + assert!(!peer_2_blacklist.contains(&tx.tx.txid())); + } +} + +/// Make sure mempool sync never stores problematic transactions +#[test] +fn test_mempool_sync_2_peers_problematic() { + // peer 1 gets some transactions; peer 2 blacklists them all due to being invalid. + // verify peer 2 stores nothing. + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + + peer_1_config.connection_opts.mempool_sync_interval = 1; + peer_2_config.connection_opts.mempool_sync_interval = 1; + + let num_txs = 128; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + peer_1_config.initial_balances = initial_balances.clone(); + peer_2_config.initial_balances = initial_balances.clone(); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + for i in 0..num_blocks { + let (burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + peer_1.next_burnchain_block(burn_ops.clone()); + peer_2.next_burnchain_block(burn_ops.clone()); + + peer_1.process_stacks_epoch_at_tip(&stacks_block, µblocks); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + } + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + + let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + + let tx = make_contract_tx( + &pk, + 0, + (tx_exceeds_body.len() * 100) as u64, + "test-exceeds", + &tx_exceeds_body, + ); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + num_blocks, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + // blacklisted txs never time out + let mut peer_2_mempool = peer_2.mempool.take().unwrap(); + peer_2_mempool.blacklist_timeout = u64::MAX / 2; + peer_2.mempool = Some(peer_2_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs + || peer_2 + .network + .mempool_sync + .as_ref() + .unwrap() + .mempool_sync_txs + < (num_txs as u64) + { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, + peer_2 + .network + .mempool_sync + .as_ref() + .unwrap() + .mempool_sync_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + assert_eq!(peer_2_mempool_txs.len(), 128); +} diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 63e19a50c7..05477bb08c 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -17,6 +17,7 @@ pub mod download; pub mod httpcore; pub mod inv; +pub mod mempool; pub mod neighbors; pub mod relay; From 0a73ad090c01ece6d12d4eb042b0ef664cdaaa9f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 12 Jul 2024 14:53:21 -0500 Subject: [PATCH 0516/1400] chore: add migration logic for signerdb --- stacks-signer/src/signerdb.rs | 95 ++++++++++++++++++++++++++++------- 1 file changed, 76 insertions(+), 19 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 0a0eb6c7e1..5ecef398d4 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -19,16 +19,17 @@ use std::time::SystemTime; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; use blockstack_lib::util_lib::db::{ - query_row, sqlite_open, table_exists, u64_to_sql, Error as DBError, + query_row, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, }; use clarity::types::chainstate::BurnchainHeaderHash; use clarity::util::get_epoch_time_secs; use libsigner::BlockProposal; -use rusqlite::{params, Connection, Error as SqliteError, OpenFlags}; +use rusqlite::{ + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, +}; use serde::{Deserialize, Serialize}; use slog::{slog_debug, slog_error}; use stacks_common::types::chainstate::ConsensusHash; -use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::{debug, error}; use wsts::net::NonceRequest; @@ -107,7 +108,7 @@ pub struct SignerDb { db: Connection, } -const CREATE_BLOCKS_TABLE: &str = " +static CREATE_BLOCKS_TABLE: &str = " CREATE TABLE IF NOT EXISTS blocks ( reward_cycle INTEGER NOT NULL, signer_signature_hash TEXT NOT NULL, @@ -119,55 +120,111 @@ CREATE TABLE IF NOT EXISTS blocks ( PRIMARY KEY (reward_cycle, signer_signature_hash) ) STRICT"; -const CREATE_INDEXES: &str = " +static CREATE_INDEXES: &str = " CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); CREATE INDEX IF NOT EXISTS blocks_valid ON blocks ((json_extract(block_info, '$.valid'))); CREATE INDEX IF NOT EXISTS burn_blocks_height ON burn_blocks (block_height); "; -const CREATE_SIGNER_STATE_TABLE: &str = " +static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, encrypted_state BLOB NOT NULL ) STRICT"; -const CREATE_BURN_STATE_TABLE: &str = " +static CREATE_BURN_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS burn_blocks ( block_hash TEXT PRIMARY KEY, block_height INTEGER NOT NULL, received_time INTEGER NOT NULL ) STRICT"; +static CREATE_DB_CONFIG: &str = " + CREATE TABLE db_config( + version INTEGER NOT NULL + ) STRICT +"; + +static DROP_SCHEMA_0: &str = " + DROP TABLE IF EXISTS burn_blocks; + DROP TABLE IF EXISTS signer_states; + DROP TABLE IF EXISTS blocks; + DROP TABLE IF EXISTS db_config;"; + +static SCHEMA_1: &[&str] = &[ + DROP_SCHEMA_0, + CREATE_DB_CONFIG, + CREATE_BURN_STATE_TABLE, + CREATE_BLOCKS_TABLE, + CREATE_SIGNER_STATE_TABLE, + CREATE_INDEXES, + "INSERT INTO db_config (version) VALUES (1);", +]; + impl SignerDb { + /// The current schema version used in this build of the signer binary. + pub const SCHEMA_VERSION: u32 = 1; + /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path /// or an in-memory database if the path is ":memory:" pub fn new(db_path: impl AsRef) -> Result { let connection = Self::connect(db_path)?; - let signer_db = Self { db: connection }; - - signer_db.instantiate_db()?; + let mut signer_db = Self { db: connection }; + signer_db.create_or_migrate()?; Ok(signer_db) } - fn instantiate_db(&self) -> Result<(), DBError> { - if !table_exists(&self.db, "blocks")? { - self.db.execute(CREATE_BLOCKS_TABLE, NO_PARAMS)?; + /// Returns the schema version of the database + fn get_schema_version(conn: &Connection) -> Result { + if !table_exists(conn, "db_config")? { + return Ok(0); } + let result = conn + .query_row("SELECT version FROM db_config LIMIT 1", [], |row| { + row.get(0) + }) + .optional(); + match result { + Ok(x) => Ok(x.unwrap_or_else(|| 0)), + Err(e) => Err(DBError::from(e)), + } + } - if !table_exists(&self.db, "signer_states")? { - self.db.execute(CREATE_SIGNER_STATE_TABLE, NO_PARAMS)?; + /// Migrate from schema 0 to schema 1 + fn schema_1_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 1 { + // no migration necessary + return Ok(()); } - if !table_exists(&self.db, "burn_blocks")? { - self.db.execute(CREATE_BURN_STATE_TABLE, NO_PARAMS)?; + for statement in SCHEMA_1.iter() { + tx.execute_batch(statement)?; } - self.db.execute_batch(CREATE_INDEXES)?; + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one + /// If the detected version of the existing database is 0 (i.e., a pre-migration + /// logic DB, the DB will be dropped). + fn create_or_migrate(&mut self) -> Result<(), DBError> { + let sql_tx = tx_begin_immediate(&mut self.db)?; + loop { + let version = Self::get_schema_version(&sql_tx)?; + match version { + 0 => Self::schema_1_migration(&sql_tx)?, + 1 => break, + x => return Err(DBError::Other(format!( + "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", + Self::SCHEMA_VERSION, + ))), + } + } + sql_tx.commit()?; Ok(()) } @@ -597,7 +654,7 @@ mod tests { let db_path = tmp_db_path(); let db = SignerDb::new(db_path).expect("Failed to create signer db"); assert_eq!( - query_row(&db.db, "SELECT sqlite_version()", NO_PARAMS).unwrap(), + query_row(&db.db, "SELECT sqlite_version()", []).unwrap(), Some("3.45.0".to_string()) ); } From 7ab23506e971e47dd6a31d180a579be7daab6ec6 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Mon, 15 Jul 2024 20:56:58 +0300 Subject: [PATCH 0517/1400] feat: modify workflow to also build signer releases --- .../dockerfiles/Dockerfile.alpine-binary | 17 +++- .../dockerfiles/Dockerfile.debian-binary | 17 +++- .github/workflows/ci.yml | 81 +++++++++++++------ .github/workflows/github-release.yml | 9 ++- .github/workflows/image-build-binary.yml | 71 +++++++++++++--- 5 files changed, 153 insertions(+), 42 deletions(-) diff --git a/.github/actions/dockerfiles/Dockerfile.alpine-binary b/.github/actions/dockerfiles/Dockerfile.alpine-binary index 1915a13d11..d185121e73 100644 --- a/.github/actions/dockerfiles/Dockerfile.alpine-binary +++ b/.github/actions/dockerfiles/Dockerfile.alpine-binary @@ -23,5 +23,18 @@ RUN case ${TARGETPLATFORM} in \ && unzip ${BIN_ARCH}.zip -d /out FROM --platform=${TARGETPLATFORM} alpine -COPY --from=builder /out/stacks-node /out/stacks-signer /bin/ -CMD ["stacks-node", "mainnet"] +COPY --from=builder /out/* /bin/ +ARG TAG + +RUN case "${TAG}" in \ + signer-*) \ + echo "/bin/stacks-signer run --config /signer-config.toml" > /tmp/command.sh \ + ;; \ + *) \ + echo "/bin/stacks-node mainnet" > /tmp/command.sh && \ + rm /bin/blockstack-cli /bin/clarity-cli /bin/relay-server /bin/stacks-events /bin/stacks-inspect \ + ;; \ + esac && \ + chmod +x /tmp/command.sh + +CMD ["sh", "-c", "/tmp/command.sh"] diff --git a/.github/actions/dockerfiles/Dockerfile.debian-binary b/.github/actions/dockerfiles/Dockerfile.debian-binary index 5432e92377..757379095c 100644 --- a/.github/actions/dockerfiles/Dockerfile.debian-binary +++ b/.github/actions/dockerfiles/Dockerfile.debian-binary @@ -23,5 +23,18 @@ RUN case ${TARGETPLATFORM} in \ && unzip ${BIN_ARCH}.zip -d /out FROM --platform=${TARGETPLATFORM} debian:bookworm -COPY --from=builder /out/stacks-node /out/stacks-signer /bin/ -CMD ["stacks-node", "mainnet"] +COPY --from=builder /out/* /bin/ +ARG TAG + +RUN case "${TAG}" in \ + signer-*) \ + echo "/bin/stacks-signer run --config /signer-config.toml" > /tmp/command.sh \ + ;; \ + *) \ + echo "/bin/stacks-node mainnet" > /tmp/command.sh && \ + rm /bin/blockstack-cli /bin/clarity-cli /bin/relay-server /bin/stacks-events /bin/stacks-inspect \ + ;; \ + esac && \ + chmod +x /tmp/command.sh + +CMD ["sh", "-c", "/tmp/command.sh"] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a566eb9e41..0b3ab46bf1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,10 +14,6 @@ on: - "**.md" - "**.yml" workflow_dispatch: - inputs: - tag: - description: "The tag to create (optional)" - required: false pull_request: types: - opened @@ -34,7 +30,7 @@ concurrency: ## Always cancel duplicate jobs cancel-in-progress: true -run-name: ${{ inputs.tag }} +run-name: ${{ github.ref_name }} jobs: ## @@ -55,48 +51,76 @@ jobs: alias: "fmt-stacks" ###################################################################################### - ## Create a tagged github release + ## Check if the head branch of the PR is a release branch ## ## Runs when the following is true: - ## - tag is provided + ## - The workflow was triggered by a `workflow_dispatch` action + check-release: + if: | + ( + github.event_name == 'workflow_dispatch' + ) + name: Check Release + needs: + - rustfmt + runs-on: ubuntu-latest + outputs: + tag: ${{ steps.check_release.outputs.tag }} + docker_tag: ${{ steps.check_release.outputs.docker_tag }} + is_release: ${{ steps.check_release.outputs.is_release }} + steps: + - name: Check Release + id: check_release + uses: stacks-network/actions/stacks-core/check-release@main + with: + tag: ${{ github.ref_name }} + + ###################################################################################### + ## Create a tagged github release + ## + ## Runs when: + ## - it is a release run create-release: if: | - inputs.tag != '' + needs.check-release.outputs.is_release == 'true' name: Create Release needs: - rustfmt + - check-release uses: ./.github/workflows/github-release.yml with: - tag: ${{ inputs.tag }} + tag: ${{ needs.check-release.outputs.tag }} + docker_tag: ${{ needs.check-release.outputs.docker_tag }} secrets: inherit ## Build and push Debian image built from source ## ## Runs when: - ## - tag is not provided + ## - it is not a release run docker-image: if: | - inputs.tag == '' + needs.check-release.outputs.is_release == 'false' name: Docker Image (Source) uses: ./.github/workflows/image-build-source.yml needs: - rustfmt + - check-release secrets: inherit ## Create a reusable cache for tests ## ## Runs when: - ## - tag is provided + ## - it is a release run ## or: - ## - no tag provided + ## - it is not a release run ## and any of: ## - this workflow is called manually ## - PR is opened ## - commit to either (development, master) branch create-cache: if: | - inputs.tag != '' || ( - inputs.tag == '' && ( + needs.check-release.outputs.is_release == 'true' || ( + needs.check-release.outputs.is_release == 'false' && ( github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' || @@ -113,14 +137,15 @@ jobs: name: Create Test Cache needs: - rustfmt + - check-release uses: ./.github/workflows/create-cache.yml ## Tests to run regularly ## ## Runs when: - ## - tag is provided + ## - it is a release run ## or: - ## - no tag provided + ## - it is not a release run ## and any of: ## - this workflow is called manually ## - PR is opened @@ -128,8 +153,8 @@ jobs: ## - commit to either (development, next, master) branch stacks-core-tests: if: | - inputs.tag != '' || ( - inputs.tag == '' && ( + needs.check-release.outputs.is_release == 'true' || ( + needs.check-release.outputs.is_release == 'false' && ( github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' || @@ -147,12 +172,13 @@ jobs: needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/stacks-core-tests.yml bitcoin-tests: if: | - inputs.tag != '' || ( - inputs.tag == '' && ( + needs.check-release.outputs.is_release == 'true' || ( + needs.check-release.outputs.is_release == 'false' && ( github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' || @@ -170,33 +196,36 @@ jobs: needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/bitcoin-tests.yml ## Test to run on a tagged release ## ## Runs when: - ## - tag is provided + ## - it is a release run atlas-tests: - if: inputs.tag != '' + if: needs.check-release.outputs.is_release == 'true' name: Atlas Tests needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/atlas-tests.yml epoch-tests: - if: inputs.tag != '' + if: needs.check-release.outputs.is_release == 'true' name: Epoch Tests needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/epoch-tests.yml slow-tests: - if: inputs.tag != '' + if: needs.check-release.outputs.is_release == 'true' name: Slow Tests needs: - rustfmt - create-cache + - check-release uses: ./.github/workflows/slow-tests.yml - diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 0c8c19176a..9d4e18c665 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -9,6 +9,10 @@ on: description: "Release Tag" required: true type: string + docker_tag: + description: "Docker Release Tag" + required: true + type: string secrets: GH_TOKEN: required: true @@ -68,8 +72,8 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} with: - name: Release ${{ github.event.inputs.tag || github.ref }} - tag_name: ${{ github.event.inputs.tag || github.ref }} + name: Release ${{ inputs.tag || github.ref }} + tag_name: ${{ inputs.tag || github.ref }} draft: false prerelease: true fail_on_unmatched_files: true @@ -94,4 +98,5 @@ jobs: - create-release with: tag: ${{ inputs.tag }} + docker_tag: ${{ inputs.docker_tag }} secrets: inherit diff --git a/.github/workflows/image-build-binary.yml b/.github/workflows/image-build-binary.yml index 23e75892fe..5966d7e68a 100644 --- a/.github/workflows/image-build-binary.yml +++ b/.github/workflows/image-build-binary.yml @@ -6,6 +6,10 @@ on: workflow_call: inputs: tag: + required: true + type: string + description: "Version tag of release" + docker_tag: required: true type: string description: "Version tag for docker images" @@ -57,11 +61,39 @@ jobs: run: | echo "docker-org=${{ github.repository_owner }}" >> "$GITHUB_ENV" + - name: Check Signer Release + id: check_signer_release + run: | + case "${{ inputs.tag }}" in + signer-*) + echo "is-signer-release=true" >> $GITHUB_ENV + ;; + *) + echo "is-signer-release=false" >> $GITHUB_ENV + ;; + esac + ## Set docker metatdata ## - depending on the matrix.dist, different tags will be enabled ## ex. debian will have this tag: `type=ref,event=tag,enable=${{ matrix.dist == 'debian' }}` - name: Docker Metadata ( ${{matrix.dist}} ) - id: docker_metadata + if: ${{ env.is-signer-release == 'true' }} + id: docker_metadata_signer + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 + with: + images: | + ${{env.docker-org}}/stacks-signer + tags: | + type=raw,value=latest,enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} + type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian'}} + type=raw,value=${{ inputs.docker_tag }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian' }} + type=ref,event=tag,enable=${{ matrix.dist == 'debian' }} + type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} + type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'alpine' }} + + - name: Docker Metadata ( ${{matrix.dist}} ) + if: ${{ env.is-signer-release == 'false' }} + id: docker_metadata_node uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 with: ## tag images with current repo name `stacks-core` as well as legacy `stacks-blockchain` @@ -69,22 +101,41 @@ jobs: ${{env.docker-org}}/${{ github.event.repository.name }} ${{env.docker-org}}/stacks-blockchain tags: | - type=raw,value=latest,enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} - type=raw,value=${{ inputs.tag }}-${{ matrix.dist }},enable=${{ inputs.tag != '' && matrix.dist == 'debian'}} - type=raw,value=${{ inputs.tag }},enable=${{ inputs.tag != '' && matrix.dist == 'debian' }} + type=raw,value=latest,enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} + type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian'}} + type=raw,value=${{ inputs.docker_tag }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian' }} type=ref,event=tag,enable=${{ matrix.dist == 'debian' }} - type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} - type=raw,value=${{ inputs.tag }}-${{ matrix.dist }},enable=${{ inputs.tag != '' && matrix.dist == 'alpine' }} + type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} + type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'alpine' }} + + ## Build docker image for signer release + - name: Build and Push ( ${{matrix.dist}} ) + if: ${{ env.is-signer-release == 'true' }} + id: docker_build_signer + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 + with: + file: ./.github/actions/dockerfiles/Dockerfile.${{ matrix.dist }}-binary + platforms: ${{ env.docker_platforms }} + tags: ${{ steps.docker_metadata_signer.outputs.tags }} + labels: ${{ steps.docker_metadata_signer.outputs.labels }} + build-args: | + TAG=${{ inputs.tag }} + REPO=${{ github.repository_owner }}/${{ github.event.repository.name }} + STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + push: ${{ env.DOCKER_PUSH }} - ## Build docker image for release + ## Build docker image for node release - name: Build and Push ( ${{matrix.dist}} ) - id: docker_build + if: ${{ env.is-signer-release == 'false' }} + id: docker_build_node uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 with: file: ./.github/actions/dockerfiles/Dockerfile.${{ matrix.dist }}-binary platforms: ${{ env.docker_platforms }} - tags: ${{ steps.docker_metadata.outputs.tags }} - labels: ${{ steps.docker_metadata.outputs.labels }} + tags: ${{ steps.docker_metadata_node.outputs.tags }} + labels: ${{ steps.docker_metadata_node.outputs.labels }} build-args: | TAG=${{ inputs.tag }} REPO=${{ github.repository_owner }}/${{ github.event.repository.name }} From e04c089696326c317f7ef7a5014f3f060f5d3965 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Mon, 15 Jul 2024 23:46:44 +0300 Subject: [PATCH 0518/1400] fix: always run check-release job --- .github/workflows/ci.yml | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b3ab46bf1..2303e8865e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -51,15 +51,8 @@ jobs: alias: "fmt-stacks" ###################################################################################### - ## Check if the head branch of the PR is a release branch - ## - ## Runs when the following is true: - ## - The workflow was triggered by a `workflow_dispatch` action + ## Check if the branch that this workflow is being run against is a release branch check-release: - if: | - ( - github.event_name == 'workflow_dispatch' - ) name: Check Release needs: - rustfmt From 5efaf3e9eea999d8a17ddc6ff448d4f384f6ea44 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 16 Jul 2024 15:31:32 +0300 Subject: [PATCH 0519/1400] update: ci - remove redundant lines, negate expression --- .github/workflows/ci.yml | 68 ++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2303e8865e..d1ae652266 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -92,7 +92,7 @@ jobs: ## - it is not a release run docker-image: if: | - needs.check-release.outputs.is_release == 'false' + needs.check-release.outputs.is_release != 'true' name: Docker Image (Source) uses: ./.github/workflows/image-build-source.yml needs: @@ -113,18 +113,16 @@ jobs: create-cache: if: | needs.check-release.outputs.is_release == 'true' || ( - needs.check-release.outputs.is_release == 'false' && ( - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' || - ( - contains(' - refs/heads/master - refs/heads/develop - refs/heads/next - ', github.event.pull_request.head.ref) && - github.event_name == 'push' - ) + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' ) ) name: Create Test Cache @@ -147,18 +145,16 @@ jobs: stacks-core-tests: if: | needs.check-release.outputs.is_release == 'true' || ( - needs.check-release.outputs.is_release == 'false' && ( - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' || - ( - contains(' - refs/heads/master - refs/heads/develop - refs/heads/next - ', github.event.pull_request.head.ref) && - github.event_name == 'push' - ) + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' ) ) name: Stacks Core Tests @@ -171,18 +167,16 @@ jobs: bitcoin-tests: if: | needs.check-release.outputs.is_release == 'true' || ( - needs.check-release.outputs.is_release == 'false' && ( - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' || - ( - contains(' - refs/heads/master - refs/heads/develop - refs/heads/next - ', github.event.pull_request.head.ref) && - github.event_name == 'push' - ) + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' ) ) name: Bitcoin Tests From ad97432177b73d8416150962b8c4e4ff99c8670d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 16 Jul 2024 11:36:06 -0400 Subject: [PATCH 0520/1400] chore: add log when unblocking miner thread --- stackslib/src/chainstate/stacks/miner.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 41ae4a2646..bf81293a27 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -133,6 +133,7 @@ pub fn signal_mining_blocked(miner_status: Arc>) { /// resume mining if we blocked it earlier pub fn signal_mining_ready(miner_status: Arc>) { + debug!("Signaling miner to resume"; "thread_id" => ?std::thread::current().id()); match miner_status.lock() { Ok(mut status) => { status.remove_blocked(); From c03704c69a9c150cd0c6f88a9cd54ee01af6e4f1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 16 Jul 2024 11:41:53 -0400 Subject: [PATCH 0521/1400] chore: formatting --- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 27309ce3a3..17cfed5cd6 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -758,7 +758,7 @@ impl< match self.handle_new_nakamoto_burnchain_block() { Ok(can_proceed) => { if !can_proceed { - error!("Missing canonical anchor block",); + error!("Missing canonical anchor block"); } } Err(e) => { From 85309c31591d9d992ff4a29bea1faa1a26c05d96 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 16 Jul 2024 16:39:34 -0400 Subject: [PATCH 0522/1400] chore: don't talk to unroutable nodes if not configured to do so --- stackslib/src/net/mempool/mod.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/mempool/mod.rs b/stackslib/src/net/mempool/mod.rs index caaea16527..2a4232ad2f 100644 --- a/stackslib/src/net/mempool/mod.rs +++ b/stackslib/src/net/mempool/mod.rs @@ -471,17 +471,6 @@ impl MempoolSync { match Self::mempool_sync_resolve_data_url(url_str, dns_request, dns_client_opt) { Ok((false, Some(addr))) => { - // address must be resolvable - if PeerAddress::from_socketaddr(&addr).is_in_private_range() { - debug!( - "{:?}: Mempool sync skips {}, which has private IP {}", - network.get_local_peer(), - &url_str, - &addr - ); - self.mempool_sync_reset(); - return (true, None); - } // success! advance self.mempool_state = MempoolSyncState::SendQuery(url_str.clone(), addr, page_id.clone()); @@ -508,6 +497,18 @@ impl MempoolSync { } MempoolSyncState::SendQuery(ref url, ref addr, ref page_id) => { // 3. ask for the remote peer's mempool's novel txs + // address must be resolvable + if !network.get_connection_opts().private_neighbors + && PeerAddress::from_socketaddr(&addr).is_in_private_range() + { + debug!( + "{:?}: Mempool sync skips {}, which has private IP", + network.get_local_peer(), + &addr + ); + self.mempool_sync_reset(); + return (true, None); + } debug!( "{:?}: Mempool sync will query {} for mempool transactions at {}", &network.get_local_peer(), From 5ee77d14ab234b87d989a57153751965e942e6bc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 16 Jul 2024 16:39:52 -0400 Subject: [PATCH 0523/1400] fix: a request is inflight if the response is inflight as well --- stackslib/src/net/rpc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 6f0e1db01b..efa0484d4b 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -194,7 +194,7 @@ impl ConversationHttp { /// Is a request in-progress? pub fn is_request_inflight(&self) -> bool { - self.pending_request.is_some() + self.pending_request.is_some() || self.pending_response.is_some() } /// Start a HTTP request from this peer, and expect a response. From d0f056b664589499c784301c607c1978b2e0feb1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 16 Jul 2024 16:40:15 -0400 Subject: [PATCH 0524/1400] feat: take additional balances --- stackslib/src/net/tests/inv/nakamoto.rs | 47 ++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 539233812f..fd9f1dcc1f 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -20,6 +20,7 @@ use std::sync::mpsc::sync_channel; use std::thread; use std::thread::JoinHandle; +use clarity::vm::types::PrincipalData; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; @@ -336,6 +337,49 @@ pub fn make_nakamoto_peers_from_invs<'a>( prepare_len: u32, bitvecs: Vec>, num_peers: usize, +) -> (TestPeer<'a>, Vec>) { + inner_make_nakamoto_peers_from_invs( + test_name, + observer, + rc_len, + prepare_len, + bitvecs, + num_peers, + vec![], + ) +} + +/// NOTE: The second return value does _not_ need `<'a>`, since `observer` is never installed into +/// the peers here. However, it appears unavoidable to the borrow-checker. +pub fn make_nakamoto_peers_from_invs_and_balances<'a>( + test_name: &str, + observer: &'a TestEventObserver, + rc_len: u32, + prepare_len: u32, + bitvecs: Vec>, + num_peers: usize, + initial_balances: Vec<(PrincipalData, u64)>, +) -> (TestPeer<'a>, Vec>) { + inner_make_nakamoto_peers_from_invs( + test_name, + observer, + rc_len, + prepare_len, + bitvecs, + num_peers, + initial_balances, + ) +} + +/// Make peers from inventories and balances +fn inner_make_nakamoto_peers_from_invs<'a>( + test_name: &str, + observer: &'a TestEventObserver, + rc_len: u32, + prepare_len: u32, + bitvecs: Vec>, + num_peers: usize, + mut initial_balances: Vec<(PrincipalData, u64)>, ) -> (TestPeer<'a>, Vec>) { for bitvec in bitvecs.iter() { assert_eq!(bitvec.len() as u32, rc_len); @@ -415,10 +459,11 @@ pub fn make_nakamoto_peers_from_invs<'a>( 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, ]); + initial_balances.push((addr.into(), 1_000_000)); let plan = NakamotoBootPlan::new(test_name) .with_private_key(private_key) .with_pox_constants(rc_len, prepare_len) - .with_initial_balances(vec![(addr.into(), 1_000_000)]) + .with_initial_balances(initial_balances) .with_extra_peers(num_peers) .with_test_signers(test_signers) .with_test_stackers(test_stackers); From 1d2dff0b2e90ff5a541054bb0c59ea820c5f519f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 16 Jul 2024 16:40:24 -0400 Subject: [PATCH 0525/1400] chore: nakamoto-specific mempool testing --- stackslib/src/net/tests/mempool/mod.rs | 396 +++++++++++++++++++++++++ 1 file changed, 396 insertions(+) diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index d44f21e8ff..7a44a56788 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::cell::RefCell; use std::{thread, time}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; @@ -28,6 +29,7 @@ use stacks_common::util::{log, sleep_ms}; use super::*; use crate::burnchains::burnchain::*; use crate::burnchains::*; +use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::stacks::test::*; use crate::chainstate::stacks::*; use crate::core::StacksEpochExtension; @@ -35,6 +37,7 @@ use crate::net::atlas::*; use crate::net::codec::*; use crate::net::db::*; use crate::net::test::*; +use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs_and_balances; use crate::net::tests::relay::epoch2x::make_contract_tx; use crate::net::*; use crate::util_lib::test::*; @@ -904,3 +907,396 @@ fn test_mempool_sync_2_peers_problematic() { assert_eq!(peer_2_mempool_txs.len(), 128); } + +/// Verify that when transactions get stored into the mempool, they are always keyed to the +/// tenure-start block and its coinbase height +#[test] +pub fn test_mempool_storage_nakamoto() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let (mut test_signers, test_stackers) = TestStacker::common_signing_set(); + let mut peer = boot_nakamoto( + function_name!(), + vec![(addr.into(), 100_000_000)], + &mut test_signers, + &test_stackers, + None, + ); + + let mut total_blocks = 0; + let mut all_txs = vec![]; + let stx_miner_key = peer.miner.nakamoto_miner_key(); + let stx_miner_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + // duplicate handles to the chainstates so we can submit txs + let mut mempool = + MemPoolDB::open_test(false, peer.config.network_id, &peer.chainstate_path).unwrap(); + let (mut chainstate, _) = peer.chainstate().reopen().unwrap(); + let sortdb = peer.sortdb().reopen().unwrap(); + + for i in 0..10 { + debug!("Tenure {}", i); + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + debug!("Next burnchain block: {}", &consensus_hash); + + let num_blocks: usize = (thread_rng().gen::() % 10) + 1; + + let block_height = peer.get_burn_block_height(); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + let mempool_txs = RefCell::new(vec![]); + let blocks_and_sizes = peer.make_nakamoto_tenure_and( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |_| {}, + |miner, chainstate, sortdb, blocks_so_far| { + let mut txs = vec![]; + if blocks_so_far.len() < num_blocks { + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 200, + 200, + &recipient_addr, + ); + txs.push(stx_transfer.clone()); + (*mempool_txs.borrow_mut()).push(stx_transfer.clone()); + all_txs.push(stx_transfer.clone()); + } + txs + }, + |_| { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let sort_tip = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tip.consensus_hash) + .unwrap() + .unwrap(); + let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sort_tip.block_height) + .unwrap() + .unwrap(); + + // submit each transaction to the mempool + for mempool_tx in (*mempool_txs.borrow()).as_slice() { + mempool + .submit( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + &mempool_tx, + None, + &epoch.block_limit, + &epoch.epoch_id, + ) + .unwrap(); + } + + (*mempool_txs.borrow_mut()).clear(); + true + }, + ); + + total_blocks += num_blocks; + } + + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + // each transaction is present, and is paired with a tenure-start block + let mut recovered_txs = HashSet::new(); + let tip_block_id = tip.index_block_hash(); + let mut tenure_id = tip.consensus_hash; + loop { + let tenure_start = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &tip_block_id, + &tenure_id, + ) + .unwrap() + .unwrap(); + + let all_txdata = MemPoolDB::get_txs_after( + mempool.conn(), + &tenure_start.consensus_hash, + &tenure_start.anchored_header.block_hash(), + 0, + u64::try_from(i64::MAX - 1).unwrap(), + ) + .unwrap(); + for txdata in all_txdata { + recovered_txs.insert(txdata.tx.txid()); + } + + let Some(parent_tenure_id) = + NakamotoChainState::get_nakamoto_parent_tenure_id_consensus_hash( + &mut chainstate.index_conn(), + &tip_block_id, + &tenure_id, + ) + .unwrap() + else { + break; + }; + tenure_id = parent_tenure_id; + } + + let all_txs_set: HashSet<_> = all_txs.into_iter().map(|tx| tx.txid()).collect(); + assert_eq!(all_txs_set, recovered_txs); +} + +#[test] +fn test_mempool_sync_2_peers_nakamoto_paginated() { + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + ]; + let num_txs = 1024; + let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); + let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let initial_balances: Vec<_> = addrs + .iter() + .map(|a| (a.to_account_principal(), 1000000000)) + .collect(); + + let (mut peer_1, mut other_peers) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 1, + initial_balances, + ); + let mut peer_2 = other_peers.pop().unwrap(); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer_1.config.burnchain.pox_constants); + + let tip = { + let sort_db = peer_1.sortdb.as_mut().unwrap(); + SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() + }; + let total_rcs = peer_1 + .config + .burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // run peer and other_peer until they connect + loop { + let _ = peer_1.step_with_ibd(false); + let _ = peer_2.step_with_ibd(false); + + let event_ids: Vec = peer_1 + .network + .iter_peer_event_ids() + .map(|e_id| *e_id) + .collect(); + let other_event_ids: Vec = peer_2 + .network + .iter_peer_event_ids() + .map(|e_id| *e_id) + .collect(); + + if event_ids.len() > 0 && other_event_ids.len() > 0 { + break; + } + } + + debug!("Peers are connected"); + + let addr = StacksAddress { + version: C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + bytes: Hash160([0xff; 20]), + }; + + let stacks_tip_ch = peer_1.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); + + // find coinbase height + let coinbase_height = NakamotoChainState::get_coinbase_height( + &mut peer_1.chainstate().index_conn(), + &StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh), + ) + .unwrap() + .unwrap(); + + // fill peer 1 with lots of transactions + let mut txs = HashMap::new(); + let mut peer_1_mempool = peer_1.mempool.take().unwrap(); + let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); + for i in 0..num_txs { + let pk = &pks[i]; + let mut tx = StacksTransaction { + version: TransactionVersion::Testnet, + chain_id: 0x80000000, + auth: TransactionAuth::from_p2pkh(&pk).unwrap(), + anchor_mode: TransactionAnchorMode::Any, + post_condition_mode: TransactionPostConditionMode::Allow, + post_conditions: vec![], + payload: TransactionPayload::TokenTransfer( + addr.to_account_principal(), + 123, + TokenTransferMemo([0u8; 34]), + ), + }; + tx.set_tx_fee(1000); + tx.set_origin_nonce(0); + + let mut tx_signer = StacksTransactionSigner::new(&tx); + tx_signer.sign_origin(&pk).unwrap(); + + let tx = tx_signer.get_tx().unwrap(); + + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let origin_addr = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_addr = tx.sponsor_address().unwrap_or(origin_addr.clone()); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + let tx_fee = tx.get_tx_fee(); + + txs.insert(tx.txid(), tx.clone()); + + // should succeed + MemPoolDB::try_add_tx( + &mut mempool_tx, + peer_1.chainstate(), + &stacks_tip_ch, + &stacks_tip_bhh, + true, + txid.clone(), + tx_bytes, + tx_fee, + coinbase_height, + &origin_addr, + origin_nonce, + &sponsor_addr, + sponsor_nonce, + None, + ) + .unwrap(); + + eprintln!("Added {} {}", i, &txid); + } + mempool_tx.commit().unwrap(); + peer_1.mempool = Some(peer_1_mempool); + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + 1 + }; + + let mut round = 0; + let mut peer_1_mempool_txs = 0; + let mut peer_2_mempool_txs = 0; + + while peer_1_mempool_txs < num_txs || peer_2_mempool_txs < num_txs { + if let Ok(mut result) = peer_1.step_with_ibd(false) { + let lp = peer_1.network.local_peer.clone(); + let burnchain = peer_1.network.burnchain.clone(); + peer_1 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + if let Ok(mut result) = peer_2.step_with_ibd(false) { + let lp = peer_2.network.local_peer.clone(); + let burnchain = peer_2.network.burnchain.clone(); + peer_2 + .with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + &burnchain, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + } + + round += 1; + + let mp = peer_1.mempool.take().unwrap(); + peer_1_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_1.mempool.replace(mp); + + let mp = peer_2.mempool.take().unwrap(); + peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap().len(); + peer_2.mempool.replace(mp); + + info!( + "Peer 1: {}, Peer 2: {}", + peer_1_mempool_txs, peer_2_mempool_txs + ); + } + + info!("Completed mempool sync in {} step(s)", round); + + let mp = peer_2.mempool.take().unwrap(); + let peer_2_mempool_txs = MemPoolDB::get_all_txs(mp.conn()).unwrap(); + peer_2.mempool.replace(mp); + + for tx in peer_2_mempool_txs { + assert_eq!(&tx.tx, txs.get(&tx.tx.txid()).unwrap()); + } +} From 8182591367acded6e3d4055ed539ce02e432d4a9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 17 Jul 2024 10:40:36 -0400 Subject: [PATCH 0526/1400] fix: document `false` usage in `MemPoolDB::try_add_tx()` and remove TODO --- stackslib/src/core/mempool.rs | 1 - stackslib/src/core/tests/mod.rs | 22 +++++++++---------- .../src/net/api/tests/postmempoolquery.rs | 2 +- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index a85ffd7327..fe75d62bd2 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -2294,7 +2294,6 @@ impl MemPoolDB { } /// Submit a transaction to the mempool at a particular chain tip. - /// TODO: Nakamoto-ize fn tx_submit( mempool_tx: &mut MemPoolTx, chainstate: &mut StacksChainState, diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 3b74325f3b..158feeeba5 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -1444,7 +1444,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1505,7 +1505,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1569,7 +1569,7 @@ fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1719,7 +1719,7 @@ fn mempool_db_test_rbf() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1769,7 +1769,7 @@ fn mempool_db_test_rbf() { &mut chainstate, &ConsensusHash([0x1; 20]), &BlockHeaderHash([0x2; 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1852,7 +1852,7 @@ fn test_add_txs_bloom_filter() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -1963,7 +1963,7 @@ fn test_txtags() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid, tx_bytes, tx_fee, @@ -2057,7 +2057,7 @@ fn test_make_mempool_sync_data() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, @@ -2235,7 +2235,7 @@ fn test_find_next_missing_transactions() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, @@ -2505,7 +2505,7 @@ fn test_drop_and_blacklist_txs_by_time() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, @@ -2625,7 +2625,7 @@ fn test_drop_and_blacklist_txs_by_size() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, diff --git a/stackslib/src/net/api/tests/postmempoolquery.rs b/stackslib/src/net/api/tests/postmempoolquery.rs index ba122be2e8..6954024844 100644 --- a/stackslib/src/net/api/tests/postmempoolquery.rs +++ b/stackslib/src/net/api/tests/postmempoolquery.rs @@ -174,7 +174,7 @@ fn test_stream_mempool_txs() { &mut chainstate, &ConsensusHash([0x1 + (block_height as u8); 20]), &BlockHeaderHash([0x2 + (block_height as u8); 32]), - false, + false, // don't resolve the above chain tip since it doesn't exist txid.clone(), tx_bytes, tx_fee, From e2818d9d956d63cef92fc5a2acc94099ddea2d57 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 16 Jul 2024 13:46:56 -0700 Subject: [PATCH 0527/1400] Creating release process docs for signer --- libsigner/CHANGELOG.md | 9 ++++ libsigner/release-process.md | 85 ++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 libsigner/CHANGELOG.md create mode 100644 libsigner/release-process.md diff --git a/libsigner/CHANGELOG.md b/libsigner/CHANGELOG.md new file mode 100644 index 0000000000..fcc7ab17f5 --- /dev/null +++ b/libsigner/CHANGELOG.md @@ -0,0 +1,9 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to the versioning scheme outlined in the [README.md](README.md). + +## [Unreleased] + diff --git a/libsigner/release-process.md b/libsigner/release-process.md new file mode 100644 index 0000000000..57ebf2c4b1 --- /dev/null +++ b/libsigner/release-process.md @@ -0,0 +1,85 @@ +# Release Process + +## Platform support + +| Platform | Supported | +| --------------------------- | ------------------------------------ | +| Linux 64-bit | :white_check_mark: | +| MacOS 64-bit | :white_check_mark: | +| Windows 64-bit | :white_check_mark: | +| MacOS Apple Silicon (ARM64) | _builds are provided but not tested_ | +| Linux ARMv7 | _builds are provided but not tested_ | +| Linux ARM64 | _builds are provided but not tested_ | + + +## Release Schedule and Hotfixes + +Normal releases in this repository that add new or updated features shall be released in an ad-hoc manner. The currently staged changes for such releases +are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). It is generally safe to run a `stacks-signer` from that branch, though it has received less rigorous testing than release branches. If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. + +For fixes that impact the correct functioning or liveness of the signer, _hotfixes_ may be issued. These hotfixes are categorized by priority +according to the following rubric: + +- **High Priority**. Any fix for an issue that could deny service to the network as a whole, e.g., an issue where a particular kind of invalid transaction would cause nodes to stop processing requests or shut down unintentionally. +- **Medium Priority**. ny fix for an issue that could deny service to individual nodes. +- **Low Priority**. Any fix for an issue that is not high or medium priority. + +## Versioning + +This project uses a 6 part version number. When there is a stacks-core release, `stacks-signer` will assume the same version as the tagged `stacks-core` release (5 part version). When there are changes in-between stacks-core releases, the signer binary will assume a 6 part version. + +``` +X.Y.Z.A.n.x + +X = 2 and does not change in practice unless there’s another Stacks 2.0 type event +Y increments on consensus-breaking changes +Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) +A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) +n increments on patches and hot-fixes (akin to semantic PATCH) +x increments on the current stacks-core release version +``` + +For example, if there is a stacks-core release of 2.6.0.0.0, `stacks-signer` will also be versioned as 2.6.0.0.0. If a change is needed in the signer, it may be released apart from the stacks-core as version 2.6.0.0.0.1 and will increment until the next stacks-core release. + +## Release Process + + +1. The release must be timed so that it does not interfere with a _prepare + phase_. The timing of the next Stacking cycle can be found + [here](https://stx.eco/dao/tools). A release should happen + at least 48 hours before the start of a new cycle, to avoid interfering + with the prepare phase. + +2. Before creating the release, the release manager must determine the _version + number_ for this release, and create a release branch in the format: `release/signer-X.Y.Z.A.n.x`. + The factors that determine the version number are discussed in [Versioning](#versioning). + +3. _Blocking_ PRs or issues are enumerated and a label should be applied to each + issue/PR such as `signer-X.Y.Z.A.n.x-blocker`. The Issue/PR owners for each should be pinged + for updates on whether or not those issues/PRs have any blockers or are waiting on feedback. + __Note__: It may be necessary to cherry-pick these PR's into the target branch `release/signer-X.Y.Z.A.n.x` + +4. The [CHANGELOG.md](./CHANGELOG.md) file shall be updated with summaries of what + was `Added`, `Changed`, and `Fixed` in the base branch. For example, pull requests + merged into `develop` can be found [here](https://github.com/stacks-network/stacks-blockchain/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). + Note, however, that GitHub apparently does not allow sorting by _merge time_, + so, when sorting by some proxy criterion, some care should be used to understand + which PR's were _merged_ after the last release. + +5. Once any blocker PRs have merged, a new tag will be created + by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) + against the `release/signer-X.Y.Z.A.n.x` branch. + +6. Ecosystem participants will be notified of the release candidate in order + to test the release on various staging infrastructure. + +7. If bugs or issues emerge from the rollout on staging infrastructure, the release + will be delayed until those regressions are resolved. As regressions are resolved, + additional release candidates shall be tagged. + +8. Once the final release candidate has rolled out successfully without issue on staging + infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-blockchain/releases/) + page. Announcements will then be shared in the `#stacks-core-devs` channel in the + Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). + +9. Finally, the release branch `release/signer-X.Y.Z.A.n.x` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. \ No newline at end of file From ee2a2c204c0e9945accff19b3b4d625c99711096 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 17 Jul 2024 07:49:56 -0700 Subject: [PATCH 0528/1400] Update stacking cycle link --- libsigner/release-process.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libsigner/release-process.md b/libsigner/release-process.md index 57ebf2c4b1..599d8c7af4 100644 --- a/libsigner/release-process.md +++ b/libsigner/release-process.md @@ -46,7 +46,7 @@ For example, if there is a stacks-core release of 2.6.0.0.0, `stacks-signer` wil 1. The release must be timed so that it does not interfere with a _prepare phase_. The timing of the next Stacking cycle can be found - [here](https://stx.eco/dao/tools). A release should happen + [here](https://stx.eco/dao/tools?tool=2). A release should happen at least 48 hours before the start of a new cycle, to avoid interfering with the prepare phase. @@ -82,4 +82,4 @@ For example, if there is a stacks-core release of 2.6.0.0.0, `stacks-signer` wil page. Announcements will then be shared in the `#stacks-core-devs` channel in the Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). -9. Finally, the release branch `release/signer-X.Y.Z.A.n.x` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. \ No newline at end of file +9. Finally, the release branch `release/signer-X.Y.Z.A.n.x` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. From bef210f09078e299245de021bf4aad6941baf5b3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 17 Jul 2024 10:28:44 -0700 Subject: [PATCH 0529/1400] Updating stacks-core release process --- docs/release-process.md | 51 +++++++++++++++-------------------------- 1 file changed, 18 insertions(+), 33 deletions(-) diff --git a/docs/release-process.md b/docs/release-process.md index 1e833caf66..5e2be08b5d 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -11,13 +11,12 @@ | Linux ARMv7 | _builds are provided but not tested_ | | Linux ARM64 | _builds are provided but not tested_ | -For help cross-compiling on memory-constrained devices (such as a Raspberry Pi), please see the community supported documentation here: [Cross Compiling](https://github.com/dantrevino/cross-compiling-stacks-blockchain/blob/master/README.md). ## Release Schedule and Hotfixes Normal releases in this repository that add features such as improved RPC endpoints, improved boot-up time, new event observer fields or event types, etc., are released on a monthly schedule. The currently staged changes for such releases -are in the [develop branch](https://github.com/stacks-network/stacks-blockchain/tree/develop). It is generally safe to run +are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). It is generally safe to run a `stacks-node` from that branch, though it has received less rigorous testing than release tags. If bugs are found in the `develop` branch, please do report them as issues on this repository. @@ -52,13 +51,14 @@ For non-consensus breaking releases, this project uses the following release pro 1. The release must be timed so that it does not interfere with a _prepare phase_. The timing of the next Stacking cycle can be found - [here](https://stacking.club/cycles/next). A release to `mainnet` should happen + [here](https://stx.eco/dao/tools?tool=2). A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. So, start by being aware of when the release can happen. 1. Before creating the release, the release manager must determine the _version - number_ for this release. The factors that determine the version number are + number_ for this release, and create a release branch in the format: `release/X.Y.Z.A.n`. + The factors that determine the version number are discussed in [Versioning](#versioning). We assume, in this section, that the change is not consensus-breaking. So, the release manager must first determine whether there are any "non-consensus-breaking changes that require a @@ -66,32 +66,24 @@ For non-consensus breaking releases, this project uses the following release pro changed, but an automatic migration was not implemented. Then, the release manager should determine whether this is a feature release, as opposed to a hotfix or a patch. Given the answers to these questions, the version number can be computed. - + 1. The release manager enumerates the PRs or issues that would _block_ the release. A label should be applied to each such issue/PR as - `2.0.x.y.z-blocker`. The release manager should ping these + `X.Y.Z.A.n-blocker`. The release manager should ping these issue/PR owners for updates on whether or not those issues/PRs have any blockers or are waiting on feedback. -1. The release manager should open a `develop -> master` PR. This can be done before - all the blocker PRs have merged, as it is helpful for the manager and others - to see the staged changes. - 1. The release manager must update the `CHANGELOG.md` file with summaries what was `Added`, `Changed`, and `Fixed`. The pull requests merged into `develop` can be found - [here](https://github.com/stacks-network/stacks-blockchain/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). Note, however, that GitHub apparently does not allow sorting by + [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). Note, however, that GitHub apparently does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should - be used to understand which PR's were _merged_ after the last `develop -> -master` release PR. This `CHANGELOG.md` should also be used as the description - of the `develop -> master` so that it acts as _release notes_ when the branch - is tagged. + be used to understand which PR's were _merged_ after the last release. 1. Once the blocker PRs have merged, the release manager will create a new tag - by manually triggering the [`stacks-blockchain` Github Actions workflow](https://github.com/stacks-network/stacks-blockchain/actions/workflows/stacks-blockchain.yml) - against the `develop` branch, inputting the release candidate tag, `2.0.x.y.z-rc0`, - in the Action's input textbox. - + by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) + against the `release/X.Y.Z.A.n` branch. + 1. Once the release candidate has been built, and docker images, etc. are available, the release manager will notify various ecosystem participants to test the release candidate on various staging infrastructure: @@ -104,7 +96,7 @@ master` release PR. This `CHANGELOG.md` should also be used as the description Stacks Discord. For coordinating rollouts on specific infrastructure, the release manager should contact the above participants directly either through e-mail or Discord DM. The release manager should also confirm that the built release on the - [Github releases](https://github.com/stacks-network/stacks-blockchain/releases/) + [Github releases](https://github.com/stacks-network/stacks-core/releases/) page is marked as `Pre-Release`. 1. The release manager will test that the release candidate successfully syncs with @@ -119,16 +111,9 @@ master` release PR. This `CHANGELOG.md` should also be used as the description even if other community members and developers may be addressing the discovered issues. -1. Once the final release candidate has rolled out successfully without issue on the - above staging infrastructure, the release manager tags 2 additional `stacks-blockchain` - team members to review the `develop -> master` PR. If there is a merge conflict in this - PR, this is the protocol: open a branch off of develop, merge master into that branch, - and then open a PR from this side branch to develop. The merge conflicts will be - resolved. - -1. Once reviewed and approved, the release manager merges the PR, and tags the release - via the [`stacks-blockchain` Github action](https://github.com/stacks-network/stacks-blockchain/actions/workflows/stacks-blockchain.yml) - by clicking "Run workflow" and providing the release version as the tag (e.g., - `2.0.11.1.0`) This creates a release and release images. Once the release has been - created, the release manager should update the Github release text with the - `CHANGELOG.md` "top-matter" for the release. +1. Once the final release candidate has rolled out successfully without issue on staging + infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-core/releases/) page. + Announcements will then be shared in the `#stacks-core-devs` channel in the + Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). + +1. Finally, the release branch `release/X.Y.Z.A.n` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. From e01e40d41d975b9019b82896c229998083a0a48c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 18 Jul 2024 21:09:12 -0400 Subject: [PATCH 0530/1400] fix: add default for `treatment` to support existing chainstate --- stackslib/src/chainstate/burn/operations/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 41e9f8fc0c..0843e03b1e 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -249,6 +249,7 @@ pub struct LeaderBlockCommitOp { /// of the PoX addresses active during the block commit. /// /// This value is set by the check() call, not during parsing. + #[serde(default = "default_treatment")] pub treatment: Vec, // PoX sunset burn @@ -261,6 +262,10 @@ pub struct LeaderBlockCommitOp { pub burn_header_hash: BurnchainHeaderHash, // hash of the burn chain block header } +fn default_treatment() -> Vec { + Vec::new() +} + #[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] pub struct LeaderKeyRegisterOp { pub consensus_hash: ConsensusHash, // consensus hash at time of issuance From b3043a21fbce987c524af2db2c02a9ae0350083d Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Fri, 19 Jul 2024 19:16:34 +0300 Subject: [PATCH 0531/1400] move changelog and release-process docs from `libsigner` to `stacks-signer` --- {libsigner => stacks-signer}/CHANGELOG.md | 0 {libsigner => stacks-signer}/release-process.md | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {libsigner => stacks-signer}/CHANGELOG.md (100%) rename {libsigner => stacks-signer}/release-process.md (100%) diff --git a/libsigner/CHANGELOG.md b/stacks-signer/CHANGELOG.md similarity index 100% rename from libsigner/CHANGELOG.md rename to stacks-signer/CHANGELOG.md diff --git a/libsigner/release-process.md b/stacks-signer/release-process.md similarity index 100% rename from libsigner/release-process.md rename to stacks-signer/release-process.md From 2d83847eb9e4ea6baea22ec7d7326cf885fb64ce Mon Sep 17 00:00:00 2001 From: CharlieC3 <2747302+CharlieC3@users.noreply.github.com> Date: Fri, 19 Jul 2024 12:07:42 -0400 Subject: [PATCH 0532/1400] chore: update bitcoin peer and ports in example config files --- docs/profiling.md | 2 +- testnet/stacks-node/conf/mainnet-follower-conf.toml | 6 +++--- testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 6 +++--- testnet/stacks-node/conf/regtest-follower-conf.toml | 6 +++--- testnet/stacks-node/conf/testnet-follower-conf.toml | 10 +++++----- testnet/stacks-node/conf/testnet-miner-conf.toml | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/profiling.md b/docs/profiling.md index 832b3d4457..3e43cf9b63 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -28,7 +28,7 @@ Enabling debug logging using environment variable `STACKS_LOG_DEBUG=1`: $ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml -DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoind.stacks.co"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } +DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoin.hiro.so"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 2ecbc80686..02379c65d9 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -7,9 +7,9 @@ bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a60592 [burnchain] chain = "bitcoin" mode = "mainnet" -peer_host = "bitcoind.stacks.co" -username = "blockstack" -password = "blockstacksystem" +peer_host = "bitcoin.hiro.so" +username = "hirosystems" +password = "hirosystems" rpc_port = 8332 peer_port = 8333 diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index aed3e9874c..e3c93bfd2b 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -9,9 +9,9 @@ bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a60592 [burnchain] chain = "bitcoin" mode = "mainnet" -peer_host = "bitcoind.stacks.co" -username = "blockstack" -password = "blockstacksystem" +peer_host = "bitcoin.hiro.so" +username = "hirosystems" +password = "hirosystems" rpc_port = 8332 peer_port = 8333 burn_fee_cap = 1 diff --git a/testnet/stacks-node/conf/regtest-follower-conf.toml b/testnet/stacks-node/conf/regtest-follower-conf.toml index a2a71c8acb..5677551264 100644 --- a/testnet/stacks-node/conf/regtest-follower-conf.toml +++ b/testnet/stacks-node/conf/regtest-follower-conf.toml @@ -8,9 +8,9 @@ wait_time_for_microblocks = 10000 [burnchain] chain = "bitcoin" mode = "krypton" -peer_host = "bitcoind.regtest.stacks.co" -username = "blockstack" -password = "blockstacksystem" +peer_host = "bitcoin.regtest.hiro.so" +username = "hirosystems" +password = "hirosystems" rpc_port = 18443 peer_port = 18444 diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index cb23477b27..46c70a0198 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -8,11 +8,11 @@ wait_time_for_microblocks = 10000 [burnchain] chain = "bitcoin" mode = "xenon" -peer_host = "bitcoind.testnet.stacks.co" -username = "blockstack" -password = "blockstacksystem" -rpc_port = 18332 -peer_port = 18333 +peer_host = "bitcoin.regtest.hiro.so" +username = "hirosystems" +password = "hirosystems" +rpc_port = 18443 +peer_port = 18444 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index ca52b33a23..7e1ce1bf5e 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -14,8 +14,8 @@ mode = "xenon" peer_host = "127.0.0.1" username = "" password = "" -rpc_port = 18332 -peer_port = 18333 +rpc_port = 18443 +peer_port = 18444 [[ustx_balance]] address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" From 09ddd9ef73f685a21c675fd3429b9e56e2e04ffb Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Fri, 19 Jul 2024 19:16:34 +0300 Subject: [PATCH 0533/1400] move changelog and release-process docs from `libsigner` to `stacks-signer` --- {libsigner => stacks-signer}/CHANGELOG.md | 0 {libsigner => stacks-signer}/release-process.md | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {libsigner => stacks-signer}/CHANGELOG.md (100%) rename {libsigner => stacks-signer}/release-process.md (100%) diff --git a/libsigner/CHANGELOG.md b/stacks-signer/CHANGELOG.md similarity index 100% rename from libsigner/CHANGELOG.md rename to stacks-signer/CHANGELOG.md diff --git a/libsigner/release-process.md b/stacks-signer/release-process.md similarity index 100% rename from libsigner/release-process.md rename to stacks-signer/release-process.md From 592a31b4df5d73d6a5647bfc481bd47b45a3e0bb Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 19 Jul 2024 13:52:31 -0700 Subject: [PATCH 0534/1400] Updating docs removing several old docs files that are no longer relevant correcting other docs that were out of date --- docs/SIPS.md | 2 +- docs/ci-release.md | 355 ------------------ docs/ci-workflow.md | 281 ++++++++++++++ docs/community.md | 23 -- docs/init.md | 5 +- docs/mining.md | 16 +- docs/release-process.md | 146 ++++--- .../stacks-node/conf/local-follower-conf.toml | 47 --- .../stacks-node/conf/local-leader-conf.toml | 44 --- .../conf/mainnet-follower-conf.toml | 4 +- .../stacks-node/conf/mainnet-miner-conf.toml | 19 +- .../conf/mainnet-mockminer-conf.toml | 3 +- .../conf/mocknet-follower-conf.toml | 33 -- .../stacks-node/conf/mocknet-miner-conf.toml | 32 -- testnet/stacks-node/conf/prometheus.yml | 13 - .../conf/regtest-follower-conf.toml | 37 -- .../conf/testnet-follower-conf.toml | 50 ++- .../stacks-node/conf/testnet-miner-conf.toml | 34 -- 18 files changed, 418 insertions(+), 726 deletions(-) delete mode 100644 docs/ci-release.md create mode 100644 docs/ci-workflow.md delete mode 100644 docs/community.md delete mode 100644 testnet/stacks-node/conf/local-follower-conf.toml delete mode 100644 testnet/stacks-node/conf/local-leader-conf.toml delete mode 100644 testnet/stacks-node/conf/mocknet-follower-conf.toml delete mode 100644 testnet/stacks-node/conf/mocknet-miner-conf.toml delete mode 100644 testnet/stacks-node/conf/prometheus.yml delete mode 100644 testnet/stacks-node/conf/regtest-follower-conf.toml delete mode 100644 testnet/stacks-node/conf/testnet-miner-conf.toml diff --git a/docs/SIPS.md b/docs/SIPS.md index abce8c220c..0930f5d51e 100644 --- a/docs/SIPS.md +++ b/docs/SIPS.md @@ -4,4 +4,4 @@ Stacks improvement proposals (SIPs) are aimed at describing the implementation o See [SIP 000](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md) for more details. -The SIPs now located in the [stacksgov/sips](https://github.com/stacksgov/sips) repository as part of the [Stacks Community Governance organization](https://github.com/stacksgov). +The SIPs are located in the [stacksgov/sips](https://github.com/stacksgov/sips) repository as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/docs/ci-release.md b/docs/ci-release.md deleted file mode 100644 index f7881ba675..0000000000 --- a/docs/ci-release.md +++ /dev/null @@ -1,355 +0,0 @@ -# Releases - -All releases are built via a Github Actions workflow named `CI` ([ci.yml](../.github/workflows/ci.yml)), and is responsible for: - -- Verifying code is formatted correctly -- Building binary archives and checksums -- Docker images -- Triggering tests conditionally (different tests run for a release vs a PR) - -1. Releases are only created if a tag is **manually** provided when the [CI workflow](../.github/workflows/ci.yml) is triggered. -2. [Caching](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) is used to speed up testing - a cache is created based on the type of data (i.e. cargo) and the commit sha. tests can be retried quickly since the cache will persist until the cleanup job is run. -3. [nextest](https://nexte.st/) is used to run the tests from an archived file that is cached (using commit sha as a key)) - - Two [archives](https://nexte.st/book/reusing-builds.html) are created, one for genesis tests and one for generic tests (it is done this way to reduce the time spent building) - - Unit-tests are [partitioned](https://nexte.st/book/partitioning.html) and multi-threaded to speed up execution time - -## TL;DR - -- Pushing a feature branch will not trigger a workflow -- An open/re-opened/synchronized PR will produce a single image built from source on Debian with glibc with 2 tags: - - `stacks-core:` - - `stacks-core:` -- A merged PR into `default-branch` from `develop` will produce a single image built from source on Debian with glibc: - - `stacks-core:` -- An untagged build of any branch will produce a single image built from source on Debian with glibc: - - `stacks-core:` -- A tagged release on a non-default branch will produce: - - Docker Alpine image for several architectures tagged with: - - `stacks-core:` - - Docker Debian image for several architectures tagged with: - - `stacks-core:` -- A tagged release on the default branch will produce: - - Github Release of the specified tag with: - - Binary archives for several architectures - - Docker Alpine image for several architectures tagged with: - - `stacks-core:` - - `stacks-core:` - - Docker Debian image for several architectures tagged with: - - `stacks-core:` - - `stacks-core:` - -## Release workflow - -1. Create a feature branch: `feat/fix-something` -2. PR `feat/fix-something` to the `develop` branch where the PR is numbered `112` - 1. Docker image tagged with the **branch name** and **PR number** - - ex: - - `stacks-core:feat-fix-something` - - `stacks-core:pr-112` - 2. CI tests are run -3. PR `develop` to the default branch where the PR is numbered `112` - 1. Docker image tagged with the **branch name** and **PR number** - - ex: - - `stacks-core:feat-fix-something` - - `stacks-core:pr-112` - 2. CI tests are run -4. Merge `develop` branch to the default branch - 1. Docker image is tagged with the **default branch** `master` - - ex: - - `stacks-core:master` - 2. CI tests are run -5. CI workflow is manually triggered on **non-default branch** with a version, i.e. `2.1.0.0.0-rc0` - 1. No Docker images/binaries are created - 2. All release tests are run -6. CI workflow is manually triggered on **default branch** with a version, i.e. `2.1.0.0.0` - 1. Github release for the manually input version is created with binaries - 2. All release tests are run - 3. Docker image pushed with tags of the **input version** and **latest** - - ex: - - `stacks-core:2.1.0.0.0-debian` - - `stacks-core:latest-debian` - - `stacks-core:2.1.0.0.0` - - `stacks-core:latest` - -## Tests - -Tests are separated into several different workflows, with the intention that they can be _conditionally_ run depending upon the triggering operation. For example, on a PR synchronize we don't want to run some identified "slow" tests, but we do want to run the [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) and [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml). - -There are also 2 different methods in use with regard to running tests: - -1. [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs) -2. [nextest partitioning](https://nexte.st/book/partitioning.html) - -A matrix is used when there are several known tests that need to be run. Partitions (shards) are used when there is a large and unknown number of tests to run (ex: `cargo test` to run all tests). - -There is also a workflow designed to run tests that are manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). -This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. - -Files: - -- [Standalone Tests](../.github/workflows/standalone-tests.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Atlas Tests](../.github/workflows/atlas-tests.yml) -- [Epoch Tests](../.github/workflows/epoch-tests.yml) -- [Slow Tests](../.github/workflows/slow-tests.yml) - -### Adding/changing tests - -With the exception of `unit-tests` in [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml), adding/removing a test requires a change to the workflow matrix. Example from [Atlas Tests](../.github/workflows/atlas-tests.yml): - -```yaml -atlas-tests: - name: Atlas Test - runs-on: ubuntu-latest - strategy: - ## Continue with the test matrix even if we've had a failure - fail-fast: false - ## Run a maximum of 2 concurrent tests from the test matrix - max-parallel: 2 - matrix: - test-name: - - tests::neon_integrations::atlas_integration_test - - tests::neon_integrations::atlas_stress_integration_test -``` - -Example of adding a new test `tests::neon_integrations::atlas_new_test`: - -```yaml - ... - matrix: - test-name: - - tests::neon_integrations::atlas_integration_test - - tests::neon_integrations::atlas_stress_integration_test - - tests::neon_integrations::atlas_new_test -``` - -The separation of tests (outside of [Slow Tests](../.github/workflows/slow-tests.yml)) is performed by creating a separate workflow for each _type_ of test that is being run. Using the example above, to add/remove any tests from being run - the `matrix` will need to be adjusted. - -ex: - -- `Atlas Tests`: Tests related to Atlas -- `Bitcoin Tests`: Tests relating to burnchain operations -- `Epoch Tests`: Tests related to epoch changes -- `Slow Tests`: These tests have been identified as taking longer than others. The threshold used is if a test takes longer than `10 minutes` to complete successfully (or times out semi-regularly), it should be added here. -- `Stacks Blockchain Tests`: - - `full-genesis`: Tests related to full genesis - -### Checking the result of multiple tests at once - -You can use the [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action in order to check that multiple tests are successful in 1 job. -If any of the tests given to the action (JSON string of `needs` field) fails, the step that calls the action will also fail. - -If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. - -In the following example, `unit-tests` is a matrix job with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. -If any of the 11 jobs are failing, the `check-tests` job will also fail. - -```yaml -check-tests: - name: Check Tests - runs-on: ubuntu-latest - if: always() - needs: - - full-genesis - - unit-tests - - open-api-validation - - core-contracts-clarinet-test - steps: - - name: Check Tests Status - id: check_tests_status - uses: stacks-network/actions/check-jobs-status@main - with: - jobs: ${{ toJson(needs) }} - summary_print: "true" -``` - -## Triggering a workflow - -### PR a branch to develop - -ex: Branch is named `feat/fix-something` and the PR is numbered `112` - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker image](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name and PR number as tags -- ex: - - `stacks-core:feat-fix-something` - - `stacks-core:pr-112` - ---- - -### Merging a branch to develop - -Nothing is triggered automatically - ---- - -### PR develop to master branches - -ex: Branch is named `develop` and the PR is numbered `113` - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker image](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name and PR number as tags -- ex: - - `stacks-core:develop` - - `stacks-core:pr-113` - ---- - -### Merging a PR from develop to master - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker image](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name as a tag -- ex: - - `stacks-core:master` - ---- - -### Manually triggering workflow without tag (any branch) - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker image](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name as a tag -- ex: - - `stacks-core:` - ---- - -### Manually triggering workflow with tag on a non-default branch (i.e. tag of `2.1.0.0.0-rc0`) - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Atlas Tests](../.github/workflows/atlas-tests.yml) -- [Epoch Tests](../.github/workflows/epoch-tests.yml) -- [Slow Tests](../.github/workflows/slow-tests.yml) - ---- - -### Manually triggering workflow with tag on default branch (i.e. tag of `2.1.0.0.0`) - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Blockchain Tests](../.github/workflows/stacks-blockchain-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Atlas Tests](../.github/workflows/atlas-tests.yml) -- [Epoch Tests](../.github/workflows/epoch-tests.yml) -- [Slow Tests](../.github/workflows/slow-tests.yml) -- [Binaries built for specified architectures](../.github/workflows/create-source-binary.yml) - - Archive and checksum files added to github release -- [Github release](../.github/workflows/github-release.yml) (with artifacts/checksum) is created using the manually input tag -- [Docker image](../.github/workflows/image-build-binary.yml) built from binaries on debian/alpine distributions and pushed with the provided input tag and `latest` -- ex: - - `stacks-core:2.1.0.0.0-debian` - - `stacks-core:latest-debian` - - `stacks-core:2.1.0.0.0` - - `stacks-core:latest` - -## Mutation Testing - -When a new Pull Request (PR) is submitted, this feature evaluates the quality of the tests added or modified in the PR. -It checks the new and altered functions through mutation testing. -Mutation testing involves making small changes (mutations) to the code to check if the tests can detect these changes. - -The mutations are run with or without a [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs). -The matrix is used when there is a large number of mutations to run ([check doc specific cases](https://github.com/stacks-network/actions/blob/main/stacks-core/mutation-testing/check-packages-and-shards/README.md#outputs)). -We utilize a matrix strategy with shards to enable parallel execution in GitHub Actions. -This approach allows for the concurrent execution of multiple jobs across various runners. -The total workload is divided across all shards, effectively reducing the overall duration of a workflow because the time taken is approximately the total time divided by the number of shards (+ initial build & test time). -This is particularly advantageous for large packages that have significant build and test times, as it enhances efficiency and speeds up the process. - -Since mutation testing is directly correlated to the written tests, there are slower packages (due to the quantity or time it takes to run the tests) like `stackslib` or `stacks-node`. -These mutations are run separately from the others, with one or more parallel jobs, depending on the amount of mutations found. - -Once all the jobs have finished testing mutants, the last job collects all the tested mutations from the previous jobs, combines them and outputs them to the `Summary` section of the workflow, at the bottom of the page. -There, you can find all mutants on categories, with links to the function they tested, and a short description on how to fix the issue. -The PR should only be approved/merged after all the mutants tested are in the `Caught` category. - -### Time required to run the workflow based on mutants outcome and packages' size - -- Small packages typically completed in under 30 minutes, aided by the use of shards. -- Large packages like stackslib and stacks-node initially required about 20-25 minutes for build and test processes. - - Each "missed" and "caught" mutant took approximately 15 minutes. Using shards, this meant about 50-55 minutes for processing around 32 mutants (10-16 functions modified). Every additional 8 mutants added another 15 minutes to the runtime. - - "Unviable" mutants, which are functions lacking a Default implementation for their returned struct type, took less than a minute each. - - "Timeout" mutants typically required more time. However, these should be marked to be skipped (by adding a skip flag to their header) since they indicate functions unable to proceed in their test workflow with mutated values, as opposed to the original implementations. - -File: - -- [PR Differences Mutants](../.github/workflows/pr-differences-mutants.yml) - -### Mutant Outcomes - -- caught — A test failed with this mutant applied. -This is a good sign about test coverage. - -- missed — No test failed with this mutation applied, which seems to indicate a gap in test coverage. -Or, it may be that the mutant is undistinguishable from the correct code. -In any case, you may wish to add a better test. - -- unviable — The attempted mutation doesn't compile. -This is inconclusive about test coverage, since the function's return structure may not implement `Default::default()` (one of the mutations applied), hence causing the compile to fail. -It is recommended to add `Default` implementation for the return structures of these functions, only mark that the function should be skipped as a last resort. - -- timeout — The mutation caused the test suite to run for a long time, until it was eventually killed. -You might want to investigate the cause and only mark the function to be skipped if necessary. - -### Skipping Mutations - -Some functions may be inherently hard to cover with tests, for example if: - -- Generated mutants cause tests to hang. -- You've chosen to test the functionality by human inspection or some higher-level integration tests. -- The function has side effects or performance characteristics that are hard to test. -- You've decided that the function is not important to test. - -To mark functions as skipped, so they are not mutated: - -- Add a Cargo dependency of the [mutants](https://crates.io/crates/mutants) crate, version `0.0.3` or later (this must be a regular `dependency`, not a `dev-dependency`, because the annotation will be on non-test code) and mark functions with `#[mutants::skip]`, or - -- You can avoid adding the dependency by using the slightly longer `#[cfg_attr(test, mutants::skip)]`. - -### Example - -```rust -use std::time::{Duration, Instant}; - -/// Returns true if the program should stop -#[cfg_attr(test, mutants::skip)] // Returning false would cause a hang -fn should_stop() -> bool { - true -} - -pub fn controlled_loop() { - let start = Instant::now(); - for i in 0.. { - println!("{}", i); - if should_stop() { - break; - } - if start.elapsed() > Duration::from_secs(60 * 5) { - panic!("timed out"); - } - } -} - -mod test { - #[test] - fn controlled_loop_terminates() { - super::controlled_loop() - } -} -``` - ---- diff --git a/docs/ci-workflow.md b/docs/ci-workflow.md new file mode 100644 index 0000000000..df63ee8fa0 --- /dev/null +++ b/docs/ci-workflow.md @@ -0,0 +1,281 @@ +# CI Workflows + +All releases are built via a Github Actions workflow named [`CI`](../.github/workflows/ci.yml), and is responsible for: + +- Verifying code is formatted correctly +- Integration tests +- [Mutation tests](https://en.wikipedia.org/wiki/Mutation_testing) +- Creating releases + - Building binary archives and calculating checksums + - Publishing Docker images + +1. Releases are only created when the [CI workflow](../.github/workflows/ci.yml) is triggered against a release branch (ex: `release/X.Y.Z.A.n`). +2. [Caching](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) is used to speed up testing - a cache is created based on the type of data (i.e. cargo) and the commit sha. + Tests can be retried quickly since the cache will persist until the cleanup job is run. +3. [Nextest](https://nexte.st/) is used to run the tests from a cached build archive file (using commit sha as the cache key). + - Two [test archives](https://nexte.st/docs/ci-features/archiving/) are created, one for genesis tests and one for generic tests. + - Unit-tests are [partitioned](https://nexte.st/docs/ci-features/partitioning/) and parallelized to speed up execution time. +4. Most workflow steps are called from a separate actions repo to reduce duplication. + +## TL;DR + +- Pushing a new branch will not trigger a workflow +- An open/re-opened/synchronized PR will produce a docker image built from source on Debian with glibc with the following tags: + - `stacks-core:` + - `stacks-core:` +- An untagged build of any branch will produce a single image built from source on Debian with glibc: + - `stacks-core:` +- Running the [CI workflow](../.github/workflows/ci.yml) on a `release/X.Y.Z.A.n` branch will produce: + - Github Release of the branch with: + - Binary archives for several architectures + - Checksum file containing hashes for each archive + - Tag of the `release/X.Y.Z.A.n` version, in the format of: `X.Y.Z.A.n` + - Docker Debian images for several architectures tagged with: + - `stacks-core:latest` + - `stacks-core:X.Y.Z.A.n` + - `stacks-core:X.Y.Z.A.n-debian` + - Docker Alpine images for several architectures tagged with: + - `stacks-core:X.Y.Z.A.n-alpine` + +## Release workflow + +The process to build and tag a release is defined [here](./release-process.md) + +## Tests + +Tests are separated into several different workflows, with the intention that they can be _conditionally_ run depending upon the triggering operation. For example, when a PR is opened we don't want to run some identified "slow" tests, but we do want to run the [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) and [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml). + +There are also 2 different methods in use with regard to running tests: + +1. [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs) +2. [nextest partitioning](https://nexte.st/book/partitioning.html) + +A matrix is used when there are several known tests that need to be run in parallel. +Partitions (shards) are used when there is a large and unknown number of tests to run (ex: `cargo test` to run all tests). + +There is also a workflow designed to run tests that is manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). +This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. +For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). +Likewise, selecting `Release Tests` will run the same tests as a release workflow. + +### Adding/changing tests + +With the exception of `unit-tests` in [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml), adding/removing a test requires a change to the workflow matrix. Example from [Atlas Tests](../.github/workflows/atlas-tests.yml): + +```yaml +atlas-tests: + name: Atlas Test + ... + matrix: + test-name: + - tests::neon_integrations::atlas_integration_test + - tests::neon_integrations::atlas_stress_integration_test +``` + +Example of adding a new test `tests::neon_integrations::atlas_new_test`: + +```yaml +atlas-tests: + name: Atlas Test + ... + matrix: + test-name: + - tests::neon_integrations::atlas_integration_test + - tests::neon_integrations::atlas_stress_integration_test + - tests::neon_integrations::atlas_new_test +``` + +The separation of tests (outside of [Slow Tests](../.github/workflows/slow-tests.yml)) is performed by creating a separate workflow for each _type_ of test that is being run. +Using the example above, to add/remove any tests from being run - the workflow `matrix` will need to be adjusted. + +ex: + +- `Atlas Tests`: Tests related to Atlas +- `Bitcoin Tests`: Tests relating to burnchain operations +- `Epoch Tests`: Tests related to epoch changes +- `Slow Tests`: These tests have been identified as taking longer than others. The threshold used is if a test takes longer than `10 minutes` to complete successfully (or even times out intermittently), it should be added here. +- `Stacks Core Tests`: + - `full-genesis`: Tests related to full genesis + - `core-contracts`: Tests related to boot contracts + +### Checking the result of multiple tests at once + +You can use the [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action in order to check that multiple tests are successful in a workflow job. +If any of the tests given to the action (JSON string of `needs` field) fails, the step that calls the action will also fail. + +If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. + +In the following example, `unit-tests` is a matrix job with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. +If any of the jobs are failing, the `check-tests` job will also fail. + +```yaml +check-tests: + name: Check Tests + runs-on: ubuntu-latest + if: always() + needs: + - full-genesis + - unit-tests + - open-api-validation + - core-contracts-clarinet-test + steps: + - name: Check Tests Status + id: check_tests_status + uses: stacks-network/actions/check-jobs-status@main + with: + jobs: ${{ toJson(needs) }} + summary_print: "true" +``` + +## Triggering a workflow + +### Opening/Updating a PR + +- [Rust format](../.github/workflows/ci.yml) +- [Create Test Cache](../.github/workflows/create-cache.yml) +- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) +- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) +- [Docker Image (Source)](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name and PR number as tags + - Creates the following images (where branch is named `feat/fix-something` and the PR is numbered `5446`): + - `stacks-core:feat-fix-something` + - `stacks-core:pr-5446` + +--- + +### Merging a branch to develop + +Once a PR is added to the merge queue, the target branch is merged into the source branch. +Then, the same workflows are triggered as in the [previous step](#opening-a-pr-against-develop). + +--- + +### Manually triggering CI workflow (any branch not named `release/X.Y.Z.A.n`) + +- [Rust format](../.github/workflows/ci.yml) +- [Create Test Cache](../.github/workflows/create-cache.yml) +- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) +- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) +- [Docker Image (Source)](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name as a tag + - Creates the following images: + - `stacks-core:` + +--- + +### Manually triggering CI workflow with tag on a release branch + +ex: running the [`CI`](../.github/workflows/ci.yml) on a branch named `release/X.Y.Z.A.n` + +- [Rust format](../.github/workflows/ci.yml) +- [Create Test Cache](../.github/workflows/create-cache.yml) +- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) +- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) +- [Atlas Tests](../.github/workflows/atlas-tests.yml) +- [Epoch Tests](../.github/workflows/epoch-tests.yml) +- [Slow Tests](../.github/workflows/slow-tests.yml) +- [Github release](../.github/workflows/github-release.yml) (with artifacts/checksum) is created using the manually input tag +- [Binaries built for specified architectures](../.github/workflows/create-source-binary.yml) + - Archive and checksum files will be uploaded to the versioned github release. +- [Docker Image (Binary)](../.github/workflows/image-build-binary.yml) + - Built from binaries on debian/alpine distributions and pushed with a verrsion and `latest` tags. + - Creates the following images: + - `stacks-core:X.Y.Z.A.n` + - `stacks-core:X.Y.Z.A.n-alpine` + - `stacks-core:latest` + - `stacks-core:latest-alpine` + +## Mutation Testing + +When a new Pull Request (PR) is submitted, this feature evaluates the quality of the tests added or modified in the PR. +It checks the new and altered functions through mutation testing. +Mutation testing involves making small changes (mutations) to the code to check if the tests can detect these changes. + +The mutations are run with or without a [Github Actions matrix](https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs). +The matrix is used when there is a large number of mutations to run ([check doc specific cases](https://github.com/stacks-network/actions/blob/main/stacks-core/mutation-testing/check-packages-and-shards/README.md#outputs)). +We utilize a matrix strategy with shards to enable parallel execution in GitHub Actions. +This approach allows for the concurrent execution of multiple jobs across various runners. +The total workload is divided across all shards, effectively reducing the overall duration of a workflow because the time taken is approximately the total time divided by the number of shards (+ initial build & test time). +This is particularly advantageous for large packages that have significant build and test times, as it enhances efficiency and speeds up the process. + +Since mutation testing is directly correlated to the written tests, there are slower packages (due to the quantity or time it takes to run the tests) like `stackslib` or `stacks-node`. +These mutations are run separately from the others, with one or more parallel jobs, depending on the amount of mutations found. + +Once all the jobs have finished testing mutants, the last job collects all the tested mutations from the previous jobs, combines them and outputs them to the `Summary` section of the workflow, at the bottom of the page. +There, you can find all mutants on categories, with links to the function they tested, and a short description on how to fix the issue. +The PR should only be approved/merged after all the mutants tested are in the `Caught` category. + +### Time required to run the workflow based on mutants outcome and packages' size + +- Small packages typically completed in under 30 minutes, aided by the use of shards. +- Large packages like stackslib and stacks-node initially required about 20-25 minutes for build and test processes. + - Each "missed" and "caught" mutant took approximately 15 minutes. Using shards, this meant about 50-55 minutes for processing around 32 mutants (10-16 functions modified). Every additional 8 mutants added another 15 minutes to the runtime. + - "Unviable" mutants, which are functions lacking a Default implementation for their returned struct type, took less than a minute each. + - "Timeout" mutants typically required more time. However, these should be marked to be skipped (by adding a skip flag to their header) since they indicate functions unable to proceed in their test workflow with mutated values, as opposed to the original implementations. + +File: + +- [PR Differences Mutants](../.github/workflows/pr-differences-mutants.yml) + +### Mutant Outcomes + +- caught — A test failed with this mutant applied. + This is a good sign about test coverage. + +- missed — No test failed with this mutation applied, which seems to indicate a gap in test coverage. + Or, it may be that the mutant is undistinguishable from the correct code. + In any case, you may wish to add a better test. + +- unviable — The attempted mutation doesn't compile. + This is inconclusive about test coverage, since the function's return structure may not implement `Default::default()` (one of the mutations applied), hence causing the compile to fail. + It is recommended to add `Default` implementation for the return structures of these functions, only mark that the function should be skipped as a last resort. + +- timeout — The mutation caused the test suite to run for a long time, until it was eventually killed. + You might want to investigate the cause and only mark the function to be skipped if necessary. + +### Skipping Mutations + +Some functions may be inherently hard to cover with tests, for example if: + +- Generated mutants cause tests to hang. +- You've chosen to test the functionality by human inspection or some higher-level integration tests. +- The function has side effects or performance characteristics that are hard to test. +- You've decided that the function is not important to test. + +To mark functions as skipped, so they are not mutated: + +- Add a Cargo dependency of the [mutants](https://crates.io/crates/mutants) crate, version `0.0.3` or later (this must be a regular `dependency`, not a `dev-dependency`, because the annotation will be on non-test code) and mark functions with `#[mutants::skip]`, or + +- You can avoid adding the dependency by using the slightly longer `#[cfg_attr(test, mutants::skip)]`. + +### Example + +```rust +use std::time::{Duration, Instant}; + +/// Returns true if the program should stop +#[cfg_attr(test, mutants::skip)] // Returning false would cause a hang +fn should_stop() -> bool { + true +} + +pub fn controlled_loop() { + let start = Instant::now(); + for i in 0.. { + println!("{}", i); + if should_stop() { + break; + } + if start.elapsed() > Duration::from_secs(60 * 5) { + panic!("timed out"); + } + } +} + +mod test { + #[test] + fn controlled_loop_terminates() { + super::controlled_loop() + } +} +``` + +--- diff --git a/docs/community.md b/docs/community.md deleted file mode 100644 index ca842151f2..0000000000 --- a/docs/community.md +++ /dev/null @@ -1,23 +0,0 @@ -# Community - -Beyond this Github project, -Stacks maintains a public [forum](https://forum.stacks.org) and an -open [Discord](https://discord.com/invite/XYdRyhf) channel. In addition, the project -maintains a [mailing list](https://newsletter.stacks.org/) which sends out -community announcements. - -- [Forum](https://forum.stacks.org) -- [Discord](https://discord.com/invite/XYdRyhf) -- [Telegram](https://t.me/StacksChat) -- [Newsletter](https://newsletter.stacks.org/) - -The greater Stacks community regularly hosts in-person -[meetups](https://www.meetup.com/topics/blockstack/) as well as a [calendar of Stacks ecosystem events](https://community.stacks.org/events#calendar). The project's -[YouTube channel](https://www.youtube.com/channel/UC3J2iHnyt2JtOvtGVf_jpHQ) includes -videos from some of these meetups, as well as video tutorials to help new -users get started and help developers wrap their heads around the system's -design. - -- [Meetups](https://www.meetup.com/topics/blockstack/) -- [Events Calender](https://community.stacks.org/events#calendar) -- [YouTube channel](https://www.youtube.com/channel/UC3J2iHnyt2JtOvtGVf_jpHQ) diff --git a/docs/init.md b/docs/init.md index f3b98076c6..5bf157e721 100644 --- a/docs/init.md +++ b/docs/init.md @@ -14,9 +14,8 @@ The MacOS configuration assumes stacks-blockchain will be set up for the current ## Configuration -For an example configuration file that describes the configuration settings, -see [mainnet-follower-conf.toml](../testnet/stacks-node/conf/mainnet-follower-conf.toml). -Available configuration options are documented here: https://docs.stacks.co/references/stacks-node-configuration +For an example configuration file that describes the configuration settings, see [mainnet-follower-conf.toml](../testnet/stacks-node/conf/mainnet-follower-conf.toml). +Available configuration options are [documented here](https://docs.stacks.co/stacks-in-depth/nodes-and-miners/stacks-node-configuration). ## Paths diff --git a/docs/mining.md b/docs/mining.md index e113f12d93..2a59f051a9 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -9,8 +9,8 @@ you should make sure to add the following config fields to your config file: miner = True # Bitcoin private key to spend seed = "YOUR PRIVATE KEY" -# How long to wait for microblocks to arrive before mining a block to confirm them (in milliseconds) -wait_time_for_microblocks = 10000 +# Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) +mine_microblocks = false # Run as a mock-miner, to test mining without spending BTC. Needs miner=True. #mock_mining = True @@ -23,10 +23,18 @@ first_attempt_time_ms = 1000 # Time to spend on subsequent attempts to make a block, in milliseconds. # This can be bigger -- new block-commits will be RBF'ed. subsequent_attempt_time_ms = 60000 -# Time to spend mining a microblock, in milliseconds. -microblock_attempt_time_ms = 30000 # Time to spend mining a Nakamoto block, in milliseconds. nakamoto_attempt_time_ms = 20000 + +[burnchain] +# Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election +burn_fee_cap = 20000 +# Amount (in sats) per byte - Used to calculate the transaction fees +satoshis_per_byte = 25 +# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +rbf_fee_increment = 5 +# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +max_rbf = 150 ``` You can verify that your node is operating as a miner by checking its log output diff --git a/docs/release-process.md b/docs/release-process.md index 5e2be08b5d..d7dfb1ea52 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -11,18 +11,16 @@ | Linux ARMv7 | _builds are provided but not tested_ | | Linux ARM64 | _builds are provided but not tested_ | - ## Release Schedule and Hotfixes -Normal releases in this repository that add features such as improved RPC endpoints, improved boot-up time, new event -observer fields or event types, etc., are released on a monthly schedule. The currently staged changes for such releases -are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). It is generally safe to run -a `stacks-node` from that branch, though it has received less rigorous testing than release tags. If bugs are found in -the `develop` branch, please do report them as issues on this repository. +Normal releases in this repository that add new features are released on a monthly schedule. +The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-blockchain/tree/develop). +It is generally safe to run a `stacks-node` from that branch, though it has received less rigorous testing than release tags. +If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. -For fixes that impact the correct functioning or liveness of the network, _hotfixes_ may be issued. These are patches -to the main branch which are backported to the develop branch after merging. These hotfixes are categorized by priority -according to the following rubric: +For fixes that impact the correct functioning or liveness of the network, _hotfixes_ may be issued. +These are patches to the main branch which are backported to the develop branch after merging. +These hotfixes are categorized by priority according to the following rubric: - **High Priority**. Any fix for an issue that could deny service to the network as a whole, e.g., an issue where a particular kind of invalid transaction would cause nodes to stop processing requests or shut down unintentionally. Any fix for an issue that could cause honest miners to produce invalid blocks. - **Medium Priority**. Any fix for an issue that could cause miners to waste funds. @@ -30,90 +28,72 @@ according to the following rubric: ## Versioning -This repository uses a 5 part version number. +This repository uses a 5 part version number: ``` X.Y.Z.A.n -X = 2 and does not change in practice unless there’s another Stacks 2.0 type event +X major version - does not change in practice unless there’s another Stacks 2.0 type event Y increments on consensus-breaking changes Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) n increments on patches and hot-fixes (akin to semantic PATCH) ``` -For example, a node operator running version `2.0.10.0.0` would not need to wipe and refresh their chainstate -to upgrade to `2.0.10.1.0` or `2.0.10.0.1`. However, upgrading to `2.0.11.0.0` would require a new chainstate. +Optionally, an extra pre-release field may be appended to the version to specify a release candidate in the format `-rc[0-9]` ## Non-Consensus Breaking Release Process -For non-consensus breaking releases, this project uses the following release process: - -1. The release must be timed so that it does not interfere with a _prepare - phase_. The timing of the next Stacking cycle can be found - [here](https://stx.eco/dao/tools?tool=2). A release should happen - at least 24 hours before the start of a new cycle, to avoid interfering - with the prepare phase. So, start by being aware of when the release can - happen. - -1. Before creating the release, the release manager must determine the _version - number_ for this release, and create a release branch in the format: `release/X.Y.Z.A.n`. - The factors that determine the version number are - discussed in [Versioning](#versioning). We assume, in this section, - that the change is not consensus-breaking. So, the release manager must first - determine whether there are any "non-consensus-breaking changes that require a - fresh chainstate". This means, in other words, that the database schema has - changed, but an automatic migration was not implemented. Then, the release manager - should determine whether this is a feature release, as opposed to a hotfix or a - patch. Given the answers to these questions, the version number can be computed. - -1. The release manager enumerates the PRs or issues that would _block_ - the release. A label should be applied to each such issue/PR as - `X.Y.Z.A.n-blocker`. The release manager should ping these - issue/PR owners for updates on whether or not those issues/PRs have - any blockers or are waiting on feedback. - -1. The release manager must update the `CHANGELOG.md` file with summaries what - was `Added`, `Changed`, and `Fixed`. The pull requests merged into `develop` - can be found - [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). Note, however, that GitHub apparently does not allow sorting by - _merge time_, so, when sorting by some proxy criterion, some care should - be used to understand which PR's were _merged_ after the last release. - -1. Once the blocker PRs have merged, the release manager will create a new tag - by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) - against the `release/X.Y.Z.A.n` branch. - -1. Once the release candidate has been built, and docker images, etc. are available, - the release manager will notify various ecosystem participants to test the release - candidate on various staging infrastructure: - - 1. Stacks Foundation staging environments. - 1. Hiro PBC testnet network. - 1. Hiro PBC mainnet mock miner. - - The release candidate should be announced in the `#stacks-core-devs` channel in the - Stacks Discord. For coordinating rollouts on specific infrastructure, the release - manager should contact the above participants directly either through e-mail or - Discord DM. The release manager should also confirm that the built release on the - [Github releases](https://github.com/stacks-network/stacks-core/releases/) - page is marked as `Pre-Release`. - -1. The release manager will test that the release candidate successfully syncs with - the current chain from genesis both in testnet and mainnet. This requires starting - the release candidate with an empty chainstate and confirming that it synchronizes - with the current chain tip. - -1. If bugs or issues emerge from the rollout on staging infrastructure, the release - will be delayed until those regressions are resolved. As regressions are resolved, - additional release candidates should be tagged. The release manager is responsible - for updating the `develop -> master` PR with information about the discovered issues, - even if other community members and developers may be addressing the discovered - issues. - -1. Once the final release candidate has rolled out successfully without issue on staging - infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-core/releases/) page. - Announcements will then be shared in the `#stacks-core-devs` channel in the - Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). - -1. Finally, the release branch `release/X.Y.Z.A.n` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. +The release must be timed so that it does not interfere with a _prepare phase_. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2). +A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. + +1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). + + 1. First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". + - In other words, the database schema has changed, but an automatic migration was not implemented. + - Determine whether this a feature release, as opposed to a hotfix or a patch. + 2. A new branch in format `release/X.Y.Z.A.n` is created from the base branch `develop`. + +2. Enumerate PRs and/or issues that would _block_ the release. + + 1. A label should be applied to each such issue/PR as `X.Y.Z.A.n-blocker`. + +3. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick commits into the release branch. + + 1. Create a feature branch from `release/X.Y.Z.A.n`, ex: `feat/X.Y.Z.A.n-pr_number`. + 2. Add cherry-picked commits to the `feat/X.Y.Z.A.n-pr_number` branch + 3. Merge `feat/X.Y.Z.A.n-pr_number` back into `release/X.Y.Z.A.n`. + +4. Open a PR to update the `CHANGELOG.md` file in the `release/X.Y.Z.A.n` branch. + + 1. Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. + 2. Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. + - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). + - **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. + +5. Once `chore/X.Y.Z.A.n-changelog` has merged, a build may be started by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) against the `release/X.Y.Z.A.n` branch. + +6. Once the release has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. + +7. The release candidate will tested that it successfully syncs with the current chain from genesis both in testnet and mainnet. + +8. If bugs or issues emerge from the rollout on staging infrastructure, the release will be delayed until those regressions are resolved. + + - As regressions are resolved, additional release candidates should be tagged. + - Repeat steps 3-7 as necessary. + +9. Once the final release candidate has rolled out successfully without issue on staging infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-core/releases/) page. + Announcements will then be shared in the `#stacks-core-devs` channel in the Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). + +10. Finally, the following merges will happen to complete the release process: + 1. Release branch `release/X.Y.Z.A.n` will be merged into the `master` branch. + 2. Then, `master` will be merged back into `develop`. + +## Consensus Breaking Release Process + +Consensus breaking releases shall follow the same overall process as a non-consensus release, with the following considerations: + +- The release must be timed so that sufficient time is given to perform a genesis sync. +- The release must take into account the activation height at which the new consensus rules will take effect. + Generically, a few weeks lead time is required for consensus breaking changes. diff --git a/testnet/stacks-node/conf/local-follower-conf.toml b/testnet/stacks-node/conf/local-follower-conf.toml deleted file mode 100644 index c828c18373..0000000000 --- a/testnet/stacks-node/conf/local-follower-conf.toml +++ /dev/null @@ -1,47 +0,0 @@ -[node] -rpc_bind = "127.0.0.1:30443" -p2p_bind = "127.0.0.1:30444" -bootstrap_node = "04ee0b1602eb18fef7986887a7e8769a30c9df981d33c8380d255edef003abdcd243a0eb74afdf6740e6c423e62aec631519a24cf5b1d62bf8a3e06ddc695dcb77@127.0.0.1:20444" -pox_sync_sample_secs = 10 -wait_time_for_microblocks = 0 - -[burnchain] -chain = "bitcoin" -mode = "krypton" -peer_host = "127.0.0.1" -rpc_port = 18443 -peer_port = 18444 - -# Used for sending events to a local stacks-blockchain-api service -# [[events_observer]] -# endpoint = "localhost:3700" -# retry_count = 255 -# events_keys = ["*"] - -[[ustx_balance]] -# "mnemonic": "point approve language letter cargo rough similar wrap focus edge polar task olympic tobacco cinnamon drop lawn boring sort trade senior screen tiger climb", -# "privateKey": "539e35c740079b79f931036651ad01f76d8fe1496dbd840ba9e62c7e7b355db001", -# "btcAddress": "n1htkoYKuLXzPbkn9avC2DJxt7X85qVNCK", -address = "ST3EQ88S02BXXD0T5ZVT3KW947CRMQ1C6DMQY8H19" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "laugh capital express view pull vehicle cluster embark service clerk roast glance lumber glove purity project layer lyrics limb junior reduce apple method pear", -# "privateKey": "075754fb099a55e351fe87c68a73951836343865cd52c78ae4c0f6f48e234f3601", -# "btcAddress": "n2ZGZ7Zau2Ca8CLHGh11YRnLw93b4ufsDR", -address = "ST3KCNDSWZSFZCC6BE4VA9AXWXC9KEB16FBTRK36T" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "level garlic bean design maximum inhale daring alert case worry gift frequent floor utility crowd twenty burger place time fashion slow produce column prepare", -# "privateKey": "374b6734eaff979818c5f1367331c685459b03b1a2053310906d1408dc928a0001", -# "btcAddress": "mhY4cbHAFoXNYvXdt82yobvVuvR6PHeghf", -address = "STB2BWB0K5XZGS3FXVTG3TKS46CQVV66NAK3YVN8" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "drop guess similar uphold alarm remove fossil riot leaf badge lobster ability mesh parent lawn today student olympic model assault syrup end scorpion lab", -# "privateKey": "26f235698d02803955b7418842affbee600fc308936a7ca48bf5778d1ceef9df01", -# "btcAddress": "mkEDDqbELrKYGUmUbTAyQnmBAEz4V1MAro", -address = "STSTW15D618BSZQB85R058DS46THH86YQQY6XCB7" -amount = 100000000000000 diff --git a/testnet/stacks-node/conf/local-leader-conf.toml b/testnet/stacks-node/conf/local-leader-conf.toml deleted file mode 100644 index 8e10f179d6..0000000000 --- a/testnet/stacks-node/conf/local-leader-conf.toml +++ /dev/null @@ -1,44 +0,0 @@ -[node] -rpc_bind = "127.0.0.1:20443" -p2p_bind = "127.0.0.1:20444" -seed = "0000000000000000000000000000000000000000000000000000000000000000" -local_peer_seed = "0000000000000000000000000000000000000000000000000000000000000000" -miner = true -prometheus_bind = "127.0.0.1:4000" -pox_sync_sample_secs = 10 -wait_time_for_microblocks = 0 - -[burnchain] -chain = "bitcoin" -mode = "krypton" -peer_host = "127.0.0.1" -rpc_port = 18443 -peer_port = 18444 - -[[ustx_balance]] -# "mnemonic": "point approve language letter cargo rough similar wrap focus edge polar task olympic tobacco cinnamon drop lawn boring sort trade senior screen tiger climb", -# "privateKey": "539e35c740079b79f931036651ad01f76d8fe1496dbd840ba9e62c7e7b355db001", -# "btcAddress": "n1htkoYKuLXzPbkn9avC2DJxt7X85qVNCK", -address = "ST3EQ88S02BXXD0T5ZVT3KW947CRMQ1C6DMQY8H19" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "laugh capital express view pull vehicle cluster embark service clerk roast glance lumber glove purity project layer lyrics limb junior reduce apple method pear", -# "privateKey": "075754fb099a55e351fe87c68a73951836343865cd52c78ae4c0f6f48e234f3601", -# "btcAddress": "n2ZGZ7Zau2Ca8CLHGh11YRnLw93b4ufsDR", -address = "ST3KCNDSWZSFZCC6BE4VA9AXWXC9KEB16FBTRK36T" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "level garlic bean design maximum inhale daring alert case worry gift frequent floor utility crowd twenty burger place time fashion slow produce column prepare", -# "privateKey": "374b6734eaff979818c5f1367331c685459b03b1a2053310906d1408dc928a0001", -# "btcAddress": "mhY4cbHAFoXNYvXdt82yobvVuvR6PHeghf", -address = "STB2BWB0K5XZGS3FXVTG3TKS46CQVV66NAK3YVN8" -amount = 100000000000000 - -[[ustx_balance]] -# "mnemonic": "drop guess similar uphold alarm remove fossil riot leaf badge lobster ability mesh parent lawn today student olympic model assault syrup end scorpion lab", -# "privateKey": "26f235698d02803955b7418842affbee600fc308936a7ca48bf5778d1ceef9df01", -# "btcAddress": "mkEDDqbELrKYGUmUbTAyQnmBAEz4V1MAro", -address = "STSTW15D618BSZQB85R058DS46THH86YQQY6XCB7" -amount = 100000000000000 diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 02379c65d9..4377993ed4 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -1,8 +1,9 @@ [node] -# working_dir = "/dir/to/save/chainstate" +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" +prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" @@ -10,7 +11,6 @@ mode = "mainnet" peer_host = "bitcoin.hiro.so" username = "hirosystems" password = "hirosystems" -rpc_port = 8332 peer_port = 8333 # Used for sending events to a local stacks-blockchain-api service diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index 5b836b01c4..ee5e262d46 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -1,11 +1,14 @@ [node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +rpc_bind = "127.0.0.1:20443" +p2p_bind = "127.0.0.1:20444" +bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" +prometheus_bind = "127.0.0.1:9153" seed = "" local_peer_seed = "" miner = true -bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" +# Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) +mine_microblocks = false [burnchain] chain = "bitcoin" @@ -15,5 +18,11 @@ username = "" password = "" rpc_port = 8332 peer_port = 8333 -satoshis_per_byte = 100 +# Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 +# Amount (in sats) per byte - Used to calculate the transaction fees +satoshis_per_byte = 25 +# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +rbf_fee_increment = 5 +# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +max_rbf = 150 \ No newline at end of file diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index e3c93bfd2b..2c98499d59 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -1,10 +1,11 @@ [node] -# working_dir = "/dir/to/save/chainstate" +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" miner = true mock_mining = true bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" +prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/mocknet-follower-conf.toml b/testnet/stacks-node/conf/mocknet-follower-conf.toml deleted file mode 100644 index 3cb9beb5d7..0000000000 --- a/testnet/stacks-node/conf/mocknet-follower-conf.toml +++ /dev/null @@ -1,33 +0,0 @@ -[node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" -bootstrap_node = "04ee0b1602eb18fef7986887a7e8769a30c9df981d33c8380d255edef003abdcd243a0eb74afdf6740e6c423e62aec631519a24cf5b1d62bf8a3e06ddc695dcb77@127.0.0.1:20444" -wait_time_for_microblocks = 10000 -use_test_genesis_chainstate = true - -[burnchain] -chain = "bitcoin" -mode = "mocknet" - -# Used for sending events to a local stacks-blockchain-api service -# [[events_observer]] -# endpoint = "localhost:3700" -# retry_count = 255 -# events_keys = ["*"] - -[[ustx_balance]] -address = "ST3EQ88S02BXXD0T5ZVT3KW947CRMQ1C6DMQY8H19" -amount = 100000000000000 - -[[ustx_balance]] -address = "ST3KCNDSWZSFZCC6BE4VA9AXWXC9KEB16FBTRK36T" -amount = 100000000000000 - -[[ustx_balance]] -address = "STB2BWB0K5XZGS3FXVTG3TKS46CQVV66NAK3YVN8" -amount = 100000000000000 - -[[ustx_balance]] -address = "STSTW15D618BSZQB85R058DS46THH86YQQY6XCB7" -amount = 100000000000000 diff --git a/testnet/stacks-node/conf/mocknet-miner-conf.toml b/testnet/stacks-node/conf/mocknet-miner-conf.toml deleted file mode 100644 index 71add782b1..0000000000 --- a/testnet/stacks-node/conf/mocknet-miner-conf.toml +++ /dev/null @@ -1,32 +0,0 @@ -[node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" -seed = "0000000000000000000000000000000000000000000000000000000000000000" -local_peer_seed = "0000000000000000000000000000000000000000000000000000000000000000" -miner = true -wait_time_for_microblocks = 10000 -use_test_genesis_chainstate = true - -[connection_options] -public_ip_address = "127.0.0.1:20444" - -[burnchain] -chain = "bitcoin" -mode = "mocknet" - -[[ustx_balance]] -address = "ST3EQ88S02BXXD0T5ZVT3KW947CRMQ1C6DMQY8H19" -amount = 100000000000000 - -[[ustx_balance]] -address = "ST3KCNDSWZSFZCC6BE4VA9AXWXC9KEB16FBTRK36T" -amount = 100000000000000 - -[[ustx_balance]] -address = "STB2BWB0K5XZGS3FXVTG3TKS46CQVV66NAK3YVN8" -amount = 100000000000000 - -[[ustx_balance]] -address = "STSTW15D618BSZQB85R058DS46THH86YQQY6XCB7" -amount = 100000000000000 diff --git a/testnet/stacks-node/conf/prometheus.yml b/testnet/stacks-node/conf/prometheus.yml deleted file mode 100644 index ad3a063ba7..0000000000 --- a/testnet/stacks-node/conf/prometheus.yml +++ /dev/null @@ -1,13 +0,0 @@ -global: - scrape_interval: 15s - evaluation_interval: 15s -scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['127.0.0.1:9090'] - - job_name: 'stacks-node-leader' - static_configs: - - targets: ['127.0.0.1:4000'] - - job_name: 'stacks-node-follower' - static_configs: - - targets: ['127.0.0.1:5000'] diff --git a/testnet/stacks-node/conf/regtest-follower-conf.toml b/testnet/stacks-node/conf/regtest-follower-conf.toml deleted file mode 100644 index 5677551264..0000000000 --- a/testnet/stacks-node/conf/regtest-follower-conf.toml +++ /dev/null @@ -1,37 +0,0 @@ -[node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" -bootstrap_node = "048dd4f26101715853533dee005f0915375854fd5be73405f679c1917a5d4d16aaaf3c4c0d7a9c132a36b8c5fe1287f07dad8c910174d789eb24bdfb5ae26f5f27@regtest.stacks.co:20444" -wait_time_for_microblocks = 10000 - -[burnchain] -chain = "bitcoin" -mode = "krypton" -peer_host = "bitcoin.regtest.hiro.so" -username = "hirosystems" -password = "hirosystems" -rpc_port = 18443 -peer_port = 18444 - -# Used for sending events to a local stacks-blockchain-api service -# [[events_observer]] -# endpoint = "localhost:3700" -# retry_count = 255 -# events_keys = ["*"] - -[[ustx_balance]] -address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" -amount = 10000000000000000 diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 46c70a0198..d237aafd61 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -1,24 +1,20 @@ [node] -# working_dir = "/dir/to/save/chainstate" +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" -wait_time_for_microblocks = 10000 +prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" -mode = "xenon" +mode = "krypton" peer_host = "bitcoin.regtest.hiro.so" username = "hirosystems" password = "hirosystems" rpc_port = 18443 peer_port = 18444 - -# Used for sending events to a local stacks-blockchain-api service -# [[events_observer]] -# endpoint = "localhost:3700" -# retry_count = 255 -# events_keys = ["*"] +pox_prepare_length = 100 +pox_reward_length = 900 [[ustx_balance]] address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" @@ -35,3 +31,39 @@ amount = 10000000000000000 [[ustx_balance]] address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" amount = 10000000000000000 + +[[burnchain.epochs]] +epoch_name = "1.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.05" +start_height = 1 + +[[burnchain.epochs]] +epoch_name = "2.1" +start_height = 2 + +[[burnchain.epochs]] +epoch_name = "2.2" +start_height = 3 + +[[burnchain.epochs]] +epoch_name = "2.3" +start_height = 4 + +[[burnchain.epochs]] +epoch_name = "2.4" +start_height = 5 + +[[burnchain.epochs]] +epoch_name = "2.5" +start_height = 6 + +[[burnchain.epochs]] +epoch_name = "3.0" +start_height = 2000701 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml deleted file mode 100644 index 7e1ce1bf5e..0000000000 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ /dev/null @@ -1,34 +0,0 @@ -[node] -# working_dir = "/dir/to/save/chainstate" -rpc_bind = "0.0.0.0:20443" -p2p_bind = "0.0.0.0:20444" -seed = "" -local_peer_seed = "" -miner = true -bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" -wait_time_for_microblocks = 10000 - -[burnchain] -chain = "bitcoin" -mode = "xenon" -peer_host = "127.0.0.1" -username = "" -password = "" -rpc_port = 18443 -peer_port = 18444 - -[[ustx_balance]] -address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" -amount = 10000000000000000 - -[[ustx_balance]] -address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" -amount = 10000000000000000 From efed50ef0319feb3e41c66d6302ca3b58c8579de Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 22 Jul 2024 11:38:26 -0700 Subject: [PATCH 0535/1400] Add branching doc/minor fixes --- CONTRIBUTING.md | 269 ++++++++---------- README.md | 1 - SECURITY.md | 2 +- docs/branching.md | 35 +++ docs/ci-workflow.md | 14 +- docs/mining.md | 4 +- docs/profiling.md | 2 +- docs/release-process.md | 36 +-- .../stacks-node/conf/mainnet-miner-conf.toml | 3 +- .../conf/testnet-follower-conf.toml | 16 +- 10 files changed, 199 insertions(+), 183 deletions(-) create mode 100644 docs/branching.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 22507d6f33..53fcf8a168 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,7 +11,7 @@ could not only have catastrophic consequences for users (i.e. they lose all their money), but also be intractable to fix, mitigate, or remove. This is because unlike nearly every other kind of networked software, **the state of the blockchain is what the users' computers -say it is.** If you want to make changes, you _must_ get _user_ +say it is.** If you want to make changes, you _must_ get _user_ buy-in, and this is necessarily time-consuming and not at all guaranteed to succeed. @@ -25,28 +25,7 @@ This project and everyone participating in it is governed by this [Code of Condu ## Development Workflow -- For typical development, branch off of the `develop` branch. -- For consensus breaking changes, branch off of the `next` branch. -- For hotfixes, branch off of `master`. - -If you have commit access, use a branch in this repository. If you do -not, then you must use a github fork of the repository. - -### Branch naming - -Branch names should use a prefix that conveys the overall goal of the branch: - -- `feat/some-fancy-new-thing` for new features -- `fix/some-broken-thing` for hot fixes and bug fixes -- `docs/something-needs-a-comment` for documentation -- `ci/build-changes` for continuous-integration changes -- `test/more-coverage` for branches that only add more tests -- `refactor/formatting-fix` for refactors - -The branch suffix must only include ASCII lowercase and uppercase letters, -digits, underscores, periods and dashes. - -The full branch name must be max 128 characters long. +See the branching document in [branching.md](./docs/branching.md). ### Merging PRs from Forks @@ -67,7 +46,6 @@ For an example of this process, see PRs [#3598](https://github.com/stacks-network/stacks-core/pull/3598) and [#3626](https://github.com/stacks-network/stacks-core/pull/3626). - ### Documentation Updates - Any major changes should be added to the [CHANGELOG](CHANGELOG.md). @@ -79,14 +57,17 @@ For an example of this process, see PRs to our [coding guidelines](#Coding-Guidelines). ## Git Commit Messages + Aim to use descriptive git commit messages. We try to follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/). The general format is as follows: + ``` [optional scope]: [optional body] [optional footer(s)] ``` + Common types include build, ci, docs, fix, feat, test, refactor, etc. When a commit is addressing or related to a particular Github issue, it @@ -97,6 +78,7 @@ fix: incorporate unlocks in mempool admitter, #3623 ``` ## Recommended developer setup + ### Recommended githooks It is helpful to set up the pre-commit git hook set up, so that Rust formatting issues are caught before @@ -104,6 +86,7 @@ you push your code. Follow these instruction to set it up: 1. Rename `.git/hooks/pre-commit.sample` to `.git/hooks/pre-commit` 2. Change the content of `.git/hooks/pre-commit` to be the following + ```sh #!/bin/sh git diff --name-only --staged | grep '\.rs$' | xargs -P 8 -I {} rustfmt {} --edition 2021 --check --config group_imports=StdExternalCrate,imports_granularity=Module || ( @@ -111,52 +94,53 @@ git diff --name-only --staged | grep '\.rs$' | xargs -P 8 -I {} rustfmt {} --edi exit 1 ) ``` + 3. Make it executable by running `chmod +x .git/hooks/pre-commit` That's it! Now your pre-commit hook should be configured on your local machine. # Creating and Reviewing PRs -This section describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. +This section describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. ## Overview -Blockchain software development requires a much higher degree of rigor than most other kinds of software. This is because with blockchains, **there is no roll-back** from a bad deployment. +Blockchain software development requires a much higher degree of rigor than most other kinds of software. This is because with blockchains, **there is no roll-back** from a bad deployment. -Therefore, making changes to the codebase is necessarily a review-intensive process. No one wants bugs, but **no one can afford consensus bugs**. This page describes how to make and review _non-consensus_ changes. The process for consensus changes includes not only the entirety of this document, but also the [SIP process](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md). +Therefore, making changes to the codebase is necessarily a review-intensive process. No one wants bugs, but **no one can afford consensus bugs**. This page describes how to make and review _non-consensus_ changes. The process for consensus changes includes not only the entirety of this document, but also the [SIP process](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md). -A good PR review sets both the submitter and reviewers up for success. It minimizes the time required by both parties to get the code into an acceptable state, without sacrificing quality or safety. Unlike most other software development practices, _safety_ is the primary concern. A PR can and will be delayed or closed if there is any concern that it will lead to unintended consensus-breaking changes. +A good PR review sets both the submitter and reviewers up for success. It minimizes the time required by both parties to get the code into an acceptable state, without sacrificing quality or safety. Unlike most other software development practices, _safety_ is the primary concern. A PR can and will be delayed or closed if there is any concern that it will lead to unintended consensus-breaking changes. -This document is formatted like a checklist. Each paragraph is one goal or action item that the reviewer and/or submitter must complete. The **key take-away** from each paragraph is bolded. +This document is formatted like a checklist. Each paragraph is one goal or action item that the reviewer and/or submitter must complete. The **key take-away** from each paragraph is bolded. ## Reviewer Expectations -The overall task of a reviewer is to create an **acceptance plan** for the submitter. This is simply the list of things that the submitter _must_ do in order for the PR to be merged. The acceptance plan should be coherent, cohesive, succinct, and complete enough that the reviewer will understand exactly what they need to do to make the PR worthy of merging, without further reviews. The _lack of ambiguity_ is the most important trait of an acceptance plan. +The overall task of a reviewer is to create an **acceptance plan** for the submitter. This is simply the list of things that the submitter _must_ do in order for the PR to be merged. The acceptance plan should be coherent, cohesive, succinct, and complete enough that the reviewer will understand exactly what they need to do to make the PR worthy of merging, without further reviews. The _lack of ambiguity_ is the most important trait of an acceptance plan. -Reviewers should **complete the review in one round**. The reviewer should provide enough detail to the submitter that the submitter can make all of the requested changes without further supervision. Whenever possible, the reviewer should provide all of these details publicly as comments, so that _other_ reviewers can vet them as well. If a reviewer _cannot_ complete the review in one round due to its size and complexity, then the reviewer may request that the PR be simplified or broken into multiple PRs. +Reviewers should **complete the review in one round**. The reviewer should provide enough detail to the submitter that the submitter can make all of the requested changes without further supervision. Whenever possible, the reviewer should provide all of these details publicly as comments, so that _other_ reviewers can vet them as well. If a reviewer _cannot_ complete the review in one round due to its size and complexity, then the reviewer may request that the PR be simplified or broken into multiple PRs. Reviewers should make use of Github's "pending comments" feature. This ensures that the review is "atomic": when the reviewer submits the review, all the comments are published at once. -Reviewers should aim to **perform a review in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. +Reviewers should aim to **perform a review in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. -Code reviews should be timely. Reviewers should start no more than +Code reviews should be timely. Reviewers should start no more than **2 business days** after reviewers are assigned. This applies to each reviewer: i.e., we expect all reviewers to respond within two days. The `develop` and `next` branches in particular often change quickly, so letting a PR languish only creates more merge work for the -submitter. If a review cannot be started within this timeframe, then +submitter. If a review cannot be started within this timeframe, then the reviewers should **tell the submitter when they can begin**. This gives the reviewer the opportunity to keep working on the PR (if needed) or even withdraw and resubmit it. -Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. +Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. **As a reviewer, if you do not understand the PR's code or the potential consequences of the code, it is the submitter's responsibility to simplify the code, provide better documentation, or withdraw the PR.** ## Submitter Expectations -Everyone is busy all the time with a host of different tasks. Consequently, a PR's size and scope should be constrained so that **a review can be written for it no more than 2 hours.** This time block starts when the reviewer opens the patch, and ends when the reviewer hits the "submit review" button. If it takes more than 2 hours, then the PR should be broken into multiple PRs unless the reviewers agree to spend more time on it. A PR can be rejected if the reviewers believe they will need longer than this. +Everyone is busy all the time with a host of different tasks. Consequently, a PR's size and scope should be constrained so that **a review can be written for it no more than 2 hours.** This time block starts when the reviewer opens the patch, and ends when the reviewer hits the "submit review" button. If it takes more than 2 hours, then the PR should be broken into multiple PRs unless the reviewers agree to spend more time on it. A PR can be rejected if the reviewers believe they will need longer than this. -The size and scale of a PR depend on the reviewers' abilities to process the change. Different reviewers and submitters have different levels of familiarity with the codebase. Moreover, everyone has a different schedule -- sometimes, some people are more busy than others. +The size and scale of a PR depend on the reviewers' abilities to process the change. Different reviewers and submitters have different levels of familiarity with the codebase. Moreover, everyone has a different schedule -- sometimes, some people are more busy than others. A successful PR submitter **takes the reviewers' familiarity and availability into account** when crafting the PR, even going so far as to ask in advance if a particular person could be available for review. @@ -172,13 +156,13 @@ Weekly Blockchain Engineering Meeting (information can be found in Discord). A PR submission's text should **answer the following questions** for reviewers: -* What problem is being solved by this PR? -* What does the solution do to address them? -* Why is this the best solution? What alternatives were considered, and why are they worse? -* What do reviewers need to be familiar with in order to provide useful feedback? -* What issue(s) are addressed by this PR? -* What are some hints to understanding some of the more intricate or clever parts of the PR? -* Does this PR change any database schemas? Does a node need to re-sync from genesis when this PR is applied? +- What problem is being solved by this PR? +- What does the solution do to address them? +- Why is this the best solution? What alternatives were considered, and why are they worse? +- What do reviewers need to be familiar with in order to provide useful feedback? +- What issue(s) are addressed by this PR? +- What are some hints to understanding some of the more intricate or clever parts of the PR? +- Does this PR change any database schemas? Does a node need to re-sync from genesis when this PR is applied? In addition, the PR submission should **answer the prompts of the Github template** we use for PRs. @@ -195,7 +179,7 @@ the immediate problem they are meant to solve will be rejected. #### Type simplicity -Simplicity of implementation includes simplicity of types. Type parameters +Simplicity of implementation includes simplicity of types. Type parameters and associated types should only be used if there are at least two possible implementations of those types. @@ -204,17 +188,17 @@ on its own. ### Builds with a stable Rust compiler -We use a recent, stable Rust compiler. Contributions should _not_ +We use a recent, stable Rust compiler. Contributions should _not_ require nightly Rust features to build and run. ### Minimal dependencies -Adding new package dependencies is very much discouraged. Exceptions will be +Adding new package dependencies is very much discouraged. Exceptions will be granted on a case-by-case basis, and only if deemed absolutely necessary. ### Minimal global macros -Adding new global macros is discouraged. Exceptions will only be given if +Adding new global macros is discouraged. Exceptions will only be given if absolutely necessary. ### No compiler warnings @@ -230,162 +214,160 @@ Contributions should not contain `unsafe` blocks if at all possible. ## Documentation -* Each file must have a **copyright statement**. -* Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole ([example](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L17)). -* Any new files must have some **top-of-file documentation** that describes what the contained code does, and how it fits into the overall module. +- Each file must have a **copyright statement**. +- Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole ([example](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L17)). +- Any new files must have some **top-of-file documentation** that describes what the contained code does, and how it fits into the overall module. Within the source files, the following **code documentation** standards are expected: -* Each public function, struct, enum, and trait should have a Rustdoc comment block describing the API contract it offers. This goes for private structs and traits as well. -* Each _non-trivial_ private function should likewise have a Rustdoc comment block. Trivial ones that are self-explanatory, like getters and setters, do not need documentation. If you are unsure if your function needs a docstring, err on the side of documenting it. -* Each struct and enum member must have a Rustdoc comment string indicating what it does, and how it is used. This can be as little as a one-liner, as long as the relevant information is communicated. +- Each public function, struct, enum, and trait should have a Rustdoc comment block describing the API contract it offers. This goes for private structs and traits as well. +- Each _non-trivial_ private function should likewise have a Rustdoc comment block. Trivial ones that are self-explanatory, like getters and setters, do not need documentation. If you are unsure if your function needs a docstring, err on the side of documenting it. +- Each struct and enum member must have a Rustdoc comment string indicating what it does, and how it is used. This can be as little as a one-liner, as long as the relevant information is communicated. ## Factoring -* **Each non-`mod.rs` file implements at most one subsystem**. It may include multiple struct implementations and trait implementations. The filename should succinctly identify the subsystem, and the file-level documentation must succinctly describe it and how it relates to other subsystems it interacts with. +- **Each non-`mod.rs` file implements at most one subsystem**. It may include multiple struct implementations and trait implementations. The filename should succinctly identify the subsystem, and the file-level documentation must succinctly describe it and how it relates to other subsystems it interacts with. -* Directories represent collections of related but distinct subsystems. +- Directories represent collections of related but distinct subsystems. -* To the greatest extent possible, **business logic and I/O should be - separated**. A common pattern used in the codebase is to place the +- To the greatest extent possible, **business logic and I/O should be + separated**. A common pattern used in the codebase is to place the business logic into an "inner" function that does not do I/O, and - handle I/O reads and writes in an "outer" function. The "outer" + handle I/O reads and writes in an "outer" function. The "outer" function only does the needful I/O and passes the data into the - "inner" function. The "inner" function is often private, whereas + "inner" function. The "inner" function is often private, whereas the "outer" function is often public. For example, [`inner_try_mine_microblock` and `try_mine_microblock`](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L1148-L1216). ## Refactoring -* **Any PR that does a large-scale refactoring must be in its own PR**. This includes PRs that touch multiple subsystems. Refactoring often adds line noise that obscures the new functional changes that the PR proposes. Small-scale refactorings are permitted to ship with functional changes. +- **Any PR that does a large-scale refactoring must be in its own PR**. This includes PRs that touch multiple subsystems. Refactoring often adds line noise that obscures the new functional changes that the PR proposes. Small-scale refactorings are permitted to ship with functional changes. -* Refactoring PRs can generally be bigger, because they are easier to review. However, **large refactorings that could impact the functional behavior of the system should be discussed first** before carried out. This is because it is imperative that they do not stay open for very long (to keep the submitter's maintenance burden low), but nevertheless reviewing them must still take at most 2 hours. Discussing them first front-loads part of the review process. +- Refactoring PRs can generally be bigger, because they are easier to review. However, **large refactorings that could impact the functional behavior of the system should be discussed first** before carried out. This is because it is imperative that they do not stay open for very long (to keep the submitter's maintenance burden low), but nevertheless reviewing them must still take at most 2 hours. Discussing them first front-loads part of the review process. ## Databases -* If at all possible, **the database schema should be preserved**. Exceptions can be made on a case-by-case basis. The reason for this is that it's a big ask for people to re-sync nodes from genesis when they upgrade to a new point release. +- If at all possible, **the database schema should be preserved**. Exceptions can be made on a case-by-case basis. The reason for this is that it's a big ask for people to re-sync nodes from genesis when they upgrade to a new point release. -* Any changes to a database schema must also ship with a **new schema version and new schema migration logic**, as well as _test coverage_ for it. +- Any changes to a database schema must also ship with a **new schema version and new schema migration logic**, as well as _test coverage_ for it. -* The submitter must verify that **any new database columns are indexed**, as relevant to the queries performed on them. Table scans are not permitted if they can be avoided (and they almost always can be). You can find table scans manually by setting the environment variable `BLOCKSTACK_DB_TRACE` when running your tests (this will cause every query executed to be preceded by the output of `EXPLAIN QUERY PLAN` on it). +- The submitter must verify that **any new database columns are indexed**, as relevant to the queries performed on them. Table scans are not permitted if they can be avoided (and they almost always can be). You can find table scans manually by setting the environment variable `BLOCKSTACK_DB_TRACE` when running your tests (this will cause every query executed to be preceded by the output of `EXPLAIN QUERY PLAN` on it). -* Database changes **cannot be consensus-critical** unless part of a hard fork (see below). +- Database changes **cannot be consensus-critical** unless part of a hard fork (see below). -* If the database schema changes and no migration can be feasibly done, then the submitter **must spin up a node from genesis to verify that it works** _before_ submitting the PR. This genesis spin-up will be tested again before the next node release is made. +- If the database schema changes and no migration can be feasibly done, then the submitter **must spin up a node from genesis to verify that it works** _before_ submitting the PR. This genesis spin-up will be tested again before the next node release is made. ## Data Input -* **Data from the network, from Bitcoin, and from the config file is untrusted.** Code that ingests such data _cannot assume anything_ about its structure, and _must_ handle any possible byte sequence that can be submitted to the Stacks node. +- **Data from the network, from Bitcoin, and from the config file is untrusted.** Code that ingests such data _cannot assume anything_ about its structure, and _must_ handle any possible byte sequence that can be submitted to the Stacks node. -* **Data previously written to disk by the node is trusted.** If data loaded from the database that was previously stored by the node is invalid or corrupt, it is appropriate to panic. +- **Data previously written to disk by the node is trusted.** If data loaded from the database that was previously stored by the node is invalid or corrupt, it is appropriate to panic. -* **All input processing is space-bound.** Every piece of code that ingests data must impose a maximum size on its byte representation. Any inputs that exceed this size _must be discarded with as little processing as possible_. +- **All input processing is space-bound.** Every piece of code that ingests data must impose a maximum size on its byte representation. Any inputs that exceed this size _must be discarded with as little processing as possible_. -* **All input deserialization is resource-bound.** Every piece of code +- **All input deserialization is resource-bound.** Every piece of code that ingests data must impose a maximum amount of RAM and CPU - required to decode it into a structured representation. If the data + required to decode it into a structured representation. If the data does not decode with the allotted resources, then no further processing may be done and the data is discarded. For an example, see how the parsing functions in the http module use `BoundReader` and `MAX_PAYLOAD_LEN` in [http.rs](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/src/net/http.rs#L2260-L2285). -* **All network input reception is time-bound.** Every piece of code that ingests data _from the network_ must impose a maximum amount of time that ingestion can take. If the data takes too long to arrive, then it must be discarded without any further processing. There is no time bound for data ingested from disk or passed as an argument; this requirement is meant by the space-bound requirement. +- **All network input reception is time-bound.** Every piece of code that ingests data _from the network_ must impose a maximum amount of time that ingestion can take. If the data takes too long to arrive, then it must be discarded without any further processing. There is no time bound for data ingested from disk or passed as an argument; this requirement is meant by the space-bound requirement. -* **Untrusted data ingestion must not panic.** Every piece of code that ingests untrusted data must gracefully handle errors. Panicking failures are forbidden for such data. Panics are only allowed if the ingested data was previously written by the node (and thus trusted). +- **Untrusted data ingestion must not panic.** Every piece of code that ingests untrusted data must gracefully handle errors. Panicking failures are forbidden for such data. Panics are only allowed if the ingested data was previously written by the node (and thus trusted). ## Non-consensus Changes to Blocks, Microblocks, Transactions, and Clarity -Any changes to code that alters how a block, microblock, or transaction is processed by the node should be **treated as a breaking change until proven otherwise**. This includes changes to the Clarity VM. The reviewer _must_ flag any such changes in the PR, and the submitter _must_ convince _all_ reviewers that they will _not_ break consensus. +Any changes to code that alters how a block, microblock, or transaction is processed by the node should be **treated as a breaking change until proven otherwise**. This includes changes to the Clarity VM. The reviewer _must_ flag any such changes in the PR, and the submitter _must_ convince _all_ reviewers that they will _not_ break consensus. -Changes that touch any of these four code paths must be treated with the utmost care. If _any_ core developer suspects that a given PR would break consensus, then they _must_ act to prevent the PR from merging. +Changes that touch any of these four code paths must be treated with the utmost care. If _any_ core developer suspects that a given PR would break consensus, then they _must_ act to prevent the PR from merging. ## Changes to the Peer Network -Any changes to the peer networking code **must be run on both mainnet and testnet before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. +Any changes to the peer networking code **must be run on both mainnet and testnet before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. Changes to the peer network should be deployed incrementally and tested by multiple parties when possible to verify that they function properly in a production setting. ## Performance Improvements -Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. +Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. For an example, see [PR #3075](https://github.com/stacks-network/stacks-core/pull/3075). ## Error Handling -* **Results must use `Error` types**. Fallible functions in the -codebase must use `Error` types in their `Result`s. If a new module's -errors are sufficiently different from existing `Error` types in the -codebaes, the new module must define a new `Error` type. Errors that -are caused by other `Error` types should be wrapped in a variant of -the new `Error` type. You should provide conversions via a `From` -trait implementation. +- **Results must use `Error` types**. Fallible functions in the + codebase must use `Error` types in their `Result`s. If a new module's + errors are sufficiently different from existing `Error` types in the + codebaes, the new module must define a new `Error` type. Errors that + are caused by other `Error` types should be wrapped in a variant of + the new `Error` type. You should provide conversions via a `From` + trait implementation. -* Functions that act on externally-submitted data **must never panic**. This includes code that acts on incoming network messages, blockchain data, and burnchain (Bitcoin) data. +- Functions that act on externally-submitted data **must never panic**. This includes code that acts on incoming network messages, blockchain data, and burnchain (Bitcoin) data. -* **Runtime panics should be used sparingly**. Generally speaking, a runtime panic is only appropriate if there is no reasonable way to recover from the error condition. For example, this includes (but is not limited to) disk I/O errors, database corruption, and unreachable code. +- **Runtime panics should be used sparingly**. Generally speaking, a runtime panic is only appropriate if there is no reasonable way to recover from the error condition. For example, this includes (but is not limited to) disk I/O errors, database corruption, and unreachable code. -* If a runtime panic is desired, it **must have an appropriate error message**. +- If a runtime panic is desired, it **must have an appropriate error message**. ## Logging -* Log messages should be informative and context-free as possible. They are used mainly to help us identify and diagnose problems. They are _not_ used to help you verify that your code works; that's the job of a unit test. +- Log messages should be informative and context-free as possible. They are used mainly to help us identify and diagnose problems. They are _not_ used to help you verify that your code works; that's the job of a unit test. -* **DO NOT USE println!() OR eprintln!()**. Instead, use the logging macros (`test_debug!()`, `trace!()`, `debug!()`, `info!()`, `warn!()`, `error!()`). +- **DO NOT USE println!() OR eprintln!()**. Instead, use the logging macros (`test_debug!()`, `trace!()`, `debug!()`, `info!()`, `warn!()`, `error!()`). -* Use **structured logging** to include dynamic data in your log entry. For example, `info!("Append block"; "block_id" => %block_id)` as opposed to `info!("Append block with block_id = {}", block_id)`. +- Use **structured logging** to include dynamic data in your log entry. For example, `info!("Append block"; "block_id" => %block_id)` as opposed to `info!("Append block with block_id = {}", block_id)`. -* Use `trace!()` and `test_debug!()` liberally. It only runs in tests. +- Use `trace!()` and `test_debug!()` liberally. It only runs in tests. -* Use `debug!()` for information that is relevant for diagnosing problems at runtime. This is off by default, but can be turned on with the `BLOCKSTACK_DEBUG` environment variable. +- Use `debug!()` for information that is relevant for diagnosing problems at runtime. This is off by default, but can be turned on with the `BLOCKSTACK_DEBUG` environment variable. -* Use `info!()` sparingly. +- Use `info!()` sparingly. -* Use `warn!()` or `error!()` only when there really is a problem. +- Use `warn!()` or `error!()` only when there really is a problem. ## Consensus-Critical Code -A **consensus-critical change** is a change that affects how the Stacks blockchain processes blocks, microblocks, or transactions, such that a node with the patch _could_ produce a different state root hash than a node without the patch. If this is even _possible_, then the PR is automatically treated as a consensus-critical change and must ship as part of a hard fork. It must also be described in a SIP. +A **consensus-critical change** is a change that affects how the Stacks blockchain processes blocks, microblocks, or transactions, such that a node with the patch _could_ produce a different state root hash than a node without the patch. If this is even _possible_, then the PR is automatically treated as a consensus-critical change and must ship as part of a hard fork. It must also be described in a SIP. -* **All changes to consensus-critical code must be opened against `next`**. It is _never acceptable_ to open them against `develop` or `master`. +- **All changes to consensus-critical code must be opened against `next`**. It is _never acceptable_ to open them against `develop` or `master`. -* **All consensus-critical changes must be gated on the Stacks epoch**. They may only take effect once the system enters a specific epoch (and this must be documented). +- **All consensus-critical changes must be gated on the Stacks epoch**. They may only take effect once the system enters a specific epoch (and this must be documented). A non-exhaustive list of examples of consensus-critical changes include: -* Adding or changing block, microblock, or transaction wire formats -* Changing the criteria under which a burnchain operation will be accepted by the node -* Changing the data that gets stored to a MARF key/value pair in the Clarity or Stacks chainstate MARFs -* Changing the order in which data gets stored in the above -* Adding, changing, or removing Clarity functions -* Changing the cost of a Clarity function -* Adding new kinds of transactions, or enabling certain transaction data field values that were previously forbidden. +- Adding or changing block, microblock, or transaction wire formats +- Changing the criteria under which a burnchain operation will be accepted by the node +- Changing the data that gets stored to a MARF key/value pair in the Clarity or Stacks chainstate MARFs +- Changing the order in which data gets stored in the above +- Adding, changing, or removing Clarity functions +- Changing the cost of a Clarity function +- Adding new kinds of transactions, or enabling certain transaction data field values that were previously forbidden. ## Testing -* **Unit tests should focus on the business logic with mocked data**. To the greatest extent possible, each error path should be tested _in addition to_ the success path. A submitter should expect to spend most of their test-writing time focusing on error paths; getting the success path to work is often much easier than the error paths. +- **Unit tests should focus on the business logic with mocked data**. To the greatest extent possible, each error path should be tested _in addition to_ the success path. A submitter should expect to spend most of their test-writing time focusing on error paths; getting the success path to work is often much easier than the error paths. -* **Unit tests should verify that the I/O code paths work**, but do so in a way that does not "clobber" other tests or prevent other tests from running in parallel (if it can be avoided). This means that unit tests should use their own directories for storing transient state (in `/tmp`), and should bind on ports that are not used anywhere else. +- **Unit tests should verify that the I/O code paths work**, but do so in a way that does not "clobber" other tests or prevent other tests from running in parallel (if it can be avoided). This means that unit tests should use their own directories for storing transient state (in `/tmp`), and should bind on ports that are not used anywhere else. -* If randomness is needed, **tests should use a seeded random number generator if possible**. This ensures that they will reliably pass in CI. +- If randomness is needed, **tests should use a seeded random number generator if possible**. This ensures that they will reliably pass in CI. -* When testing a consensus-critical code path, the test coverage should verify that the new behavior is only possible within the epoch(s) in which the behavior is slated to activate. Above all else, **backwards-compatibility is a hard requirement.** +- When testing a consensus-critical code path, the test coverage should verify that the new behavior is only possible within the epoch(s) in which the behavior is slated to activate. Above all else, **backwards-compatibility is a hard requirement.** -* **Integration tests are necessary when the PR has a consumer-visible effect**. For example, changes to the RESTful API, event stream, and mining behavior all require integration tests. +- **Integration tests are necessary when the PR has a consumer-visible effect**. For example, changes to the RESTful API, event stream, and mining behavior all require integration tests. -* Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates. +- Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates. PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel (which is the default operation of the `cargo test` command), these tests should be decorated with `#[ignore]`. A test should be marked `#[ignore]` if: - 1. It does not _always_ pass `cargo test` in a vanilla environment - (i.e., it does not need to run with `--test-threads 1`). - - 2. Or, it runs for over a minute via a normal `cargo test` execution - (the `cargo test` command will warn if this is not the case). - +1. It does not _always_ pass `cargo test` in a vanilla environment + (i.e., it does not need to run with `--test-threads 1`). +2. Or, it runs for over a minute via a normal `cargo test` execution + (the `cargo test` command will warn if this is not the case). ## Formatting @@ -406,17 +388,18 @@ cargo fmt-stacks ``` ## Comments + Comments are very important for the readability and correctness of the codebase. The purpose of comments is: -* Allow readers to understand the roles of components and functions without having to check how they are used. -* Allow readers to check the correctness of the code against the comments. -* Allow readers to follow tests. +- Allow readers to understand the roles of components and functions without having to check how they are used. +- Allow readers to check the correctness of the code against the comments. +- Allow readers to follow tests. In the limit, if there are no comments, the problems that arise are: -* Understanding one part of the code requires understanding *many* parts of the code. This is because the reader is forced to learn the meanings of constructs inductively through their use. Learning how one construct is used requires understanding its neighbors, and then their neighbors, and so on, recursively. Instead, with a good comment, the reader can understand the role of a construct with `O(1)` work by reading the comment. -* The user cannot be certain if there is a bug in the code, because there is no distinction between the contract of a function, and its definition. -* The user cannot be sure if a test is correct, because the logic of the test is not specified, and the functions do not have contracts. +- Understanding one part of the code requires understanding _many_ parts of the code. This is because the reader is forced to learn the meanings of constructs inductively through their use. Learning how one construct is used requires understanding its neighbors, and then their neighbors, and so on, recursively. Instead, with a good comment, the reader can understand the role of a construct with `O(1)` work by reading the comment. +- The user cannot be certain if there is a bug in the code, because there is no distinction between the contract of a function, and its definition. +- The user cannot be sure if a test is correct, because the logic of the test is not specified, and the functions do not have contracts. ### Comment Formatting @@ -430,14 +413,13 @@ Comments are to be formatted in typical `rust` style, specifically: - When documenting panics, errors, or other conceptual sections, introduce a Markdown section with a single `#`, e.g.: - ```rust - # Errors - * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. - ``` + ```rust + # Errors + * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. + ``` ### Content of Comments - #### Component Comments Comments for a component (`struct`, `trait`, or `enum`) should explain what the overall @@ -485,7 +467,7 @@ impl<'a, 'b> ReadOnlyChecker<'a, 'b> { This comment is considered positive because it explains the contract of the function in pseudo-code. Someone who understands the constructs mentioned could, e.g., write a test for this method from this description. -#### Comments on Implementations of Virtual Methods +#### Comments on Implementations of Virtual Methods Note that, if a function implements a virtual function on an interface, the comments should not repeat what was specified on the interface declaration. The comment should only add information specific to that implementation. @@ -507,7 +489,7 @@ pub struct ReadOnlyChecker<'a, 'b> { defined_functions: HashMap, ``` -This comment is considered positive because it clarifies users might have about the content and role of this member. E.g., it explains that the `bool` indicates whether the function is *read-only*, whereas this cannot be gotten from the signature alone. +This comment is considered positive because it clarifies users might have about the content and role of this member. E.g., it explains that the `bool` indicates whether the function is _read-only_, whereas this cannot be gotten from the signature alone. #### Test Comments @@ -543,14 +525,14 @@ This comment is considered positive because it explains the purpose of the test Contributors should strike a balance between commenting "too much" and commenting "too little". Commenting "too much" primarily includes commenting things that are clear from the context. Commenting "too little" primarily includes writing no comments at all, or writing comments that leave important questions unresolved. -Human judgment and creativity must be used to create good comments, which convey important information with small amounts of text. There is no single rule which can determine what a good comment is. Longer comments are *not* always better, since needlessly long comments have a cost: they require the reader to read more, take up whitespace, and take longer to write and review. +Human judgment and creativity must be used to create good comments, which convey important information with small amounts of text. There is no single rule which can determine what a good comment is. Longer comments are _not_ always better, since needlessly long comments have a cost: they require the reader to read more, take up whitespace, and take longer to write and review. ### Don't Restate Names in Comments The contracts of functions should be implemented precisely enough that tests could be written looking only at the declaration and the comments (and without looking at the definition!). However: -* **the author should assume that the reader has already read and understood the function name, variable names, type names, etc.** -* **the author should only state information that is new** +- **the author should assume that the reader has already read and understood the function name, variable names, type names, etc.** +- **the author should only state information that is new** So, if a function and its variables have very descriptive names, then there may be nothing to add in the comments at all! @@ -561,7 +543,7 @@ So, if a function and its variables have very descriptive names, then there may fn append_transaction_to_block(transaction:Transaction, &mut Block) -> Result<()> ``` -This is considered bad because the function name already says "append transaction to block", so it doesn't add anything to restate it in the comments. However, *do* add anything that is not redundant, such as elaborating what it means to "append" (if there is more to say), or what conditions will lead to an error. +This is considered bad because the function name already says "append transaction to block", so it doesn't add anything to restate it in the comments. However, _do_ add anything that is not redundant, such as elaborating what it means to "append" (if there is more to say), or what conditions will lead to an error. **Good Example** @@ -573,39 +555,40 @@ This is considered bad because the function name already says "append transactio fn append_transaction_to_block(transaction:Transaction, block:&mut Block) -> Result<()> ``` -This is considered good because the reader builds on the context created by the function and variable names. Rather than restating them, the function just adds elements of the contract that are not implicit in the declaration. +This is considered good because the reader builds on the context created by the function and variable names. Rather than restating them, the function just adds elements of the contract that are not implicit in the declaration. ### Do's and Dont's of Comments -*Don't* over-comment by documenting things that are clear from the context. E.g.: +_Don't_ over-comment by documenting things that are clear from the context. E.g.: - Don't document the types of inputs or outputs, since these are parts of the type signature in `rust`. - Don't necessarily document standard "getters" and "setters", like `get_clarity_version()`, unless there is unexpected information to add with the comment. - Don't explain that a specific test does type-checking, if it is in a file that is dedicated to type-checking. -*Do* document things that are not clear, e.g.: +_Do_ document things that are not clear, e.g.: - For a function called `process_block`, explain what it means to "process" a block. - For a function called `process_block`, make clear whether we mean anchored blocks, microblocks, or both. - For a function called `run`, explain the steps involved in "running". - For a function that takes arguments `peer1` and `peer2`, explain the difference between the two. -- For a function that takes an argument `height`, either explain in the comment what this is the *height of*. Alternatively, expand the variable name to remove the ambiguity. +- For a function that takes an argument `height`, either explain in the comment what this is the _height of_. Alternatively, expand the variable name to remove the ambiguity. - For a test, document what it is meant to test, and why the expected answers are, in fact, expected. ### Changing Code Instead of Comments Keep in mind that better variable names can reduce the need for comments, e.g.: -* `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height -* `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks -* `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment +- `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height +- `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks +- `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment # Licensing and contributor license agreement -`stacks-core` is released under the terms of the GPL version 3. Contributions -that are not licensed under compatible terms will be rejected. Moreover, +`stacks-core` is released under the terms of the GPL version 3. Contributions +that are not licensed under compatible terms will be rejected. Moreover, contributions will not be accepted unless _all_ authors accept the project's contributor license agreement. ## Use of AI-code Generation + The Stacks Foundation has a very strict policy of not accepting AI-generated code PRs due to uncertainly about licensing issues. diff --git a/README.md b/README.md index 6cdb42857f..0279b25116 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,6 @@ Stacks is a layer-2 blockchain that uses Bitcoin as a base layer for security an [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg?style=flat)](https://www.gnu.org/licenses/gpl-3.0) [![Release](https://img.shields.io/github/v/release/stacks-network/stacks-core?style=flat)](https://github.com/stacks-network/stacks-core/releases/latest) -[![Build Status](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml/badge.svg?branch=master&event=workflow_dispatch&style=flat)](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml?query=event%3Aworkflow_dispatch+branch%3Amaster) [![Discord Chat](https://img.shields.io/discord/621759717756370964.svg)](https://stacks.chat) ## Building diff --git a/SECURITY.md b/SECURITY.md index e59229b3a1..e9001abe0a 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,7 +2,7 @@ ## Supported Versions -Please see [Releases](https://github.com/stacks-network/stacks-blockchain/releases). It is recommended to use the [most recently released version](https://github.com/stacks-network/stacks-blockchain/releases/latest). +Please see [Releases](https://github.com/stacks-network/stacks-core/releases). It is recommended to use the [most recently released version](https://github.com/stacks-network/stacks-core/releases/latest). ## Reporting a vulnerability diff --git a/docs/branching.md b/docs/branching.md new file mode 100644 index 0000000000..38db57e3e5 --- /dev/null +++ b/docs/branching.md @@ -0,0 +1,35 @@ +# Git Branching + +The following is a modified version of the gitflow branching strategy described in + +## Main Branches + +- **master** - `origin/master` is the main branch where the source code of HEAD always reflects a production-ready state. +- **develop** - `origin/develop` is the branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. +- **next** - `origin/next` may contain consensus-breaking changes. +- **release/X.Y.Z.A.n** is the release branch. + +When the source code in the develop branch reaches a stable point and is ready to be released, a release branch is created as `release/X.Y.Z.A.n` (see [release-process.md](./release-process.md)). +After release, the following will happen: + +- `release/X.Y.Z.A.n` branch is merged back to `origin/master`. +- `origin/master` is then merged into `origin/develop`, and development continues in the `origin/develop` branch. +- `origin/develop` is then merged into `origin/next`. + +## Supporting Branches + +Branch names should use a prefix that conveys the overall goal of the branch. +All branches should be based off of `origin/develop`, with the exception being a hotfix branch which may be based off of `origin/master`. + +- `feat/some-fancy-new-thing`: For new features. +- `fix/some-broken-thing`: For hot fixes and bug fixes. +- `chore/some-update`: Any non code related change (ex: updating CHANGELOG.md, adding comments to code). +- `docs/something-needs-a-comment`: For documentation. +- `ci/build-changes`: For continuous-integration changes. +- `test/more-coverage`: For branches that only add more tests. +- `refactor/formatting-fix`: For refactors of the codebase. + +The full branch name **must**: + +- Have a maximum of 128 characters. +- Only includes ASCII lowercase and uppercase letters, digits, underscores, periods and dashes. diff --git a/docs/ci-workflow.md b/docs/ci-workflow.md index df63ee8fa0..16d020985d 100644 --- a/docs/ci-workflow.md +++ b/docs/ci-workflow.md @@ -1,4 +1,4 @@ -# CI Workflows +# CI Workflow All releases are built via a Github Actions workflow named [`CI`](../.github/workflows/ci.yml), and is responsible for: @@ -11,11 +11,11 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor 1. Releases are only created when the [CI workflow](../.github/workflows/ci.yml) is triggered against a release branch (ex: `release/X.Y.Z.A.n`). 2. [Caching](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) is used to speed up testing - a cache is created based on the type of data (i.e. cargo) and the commit sha. - Tests can be retried quickly since the cache will persist until the cleanup job is run. + Tests can be retried quickly since the cache will persist until the cleanup job is run or the cache is evicted. 3. [Nextest](https://nexte.st/) is used to run the tests from a cached build archive file (using commit sha as the cache key). - - Two [test archives](https://nexte.st/docs/ci-features/archiving/) are created, one for genesis tests and one for generic tests. + - Two [test archives](https://nexte.st/docs/ci-features/archiving/) are created, one for genesis tests and one for non-genesis tests. - Unit-tests are [partitioned](https://nexte.st/docs/ci-features/partitioning/) and parallelized to speed up execution time. -4. Most workflow steps are called from a separate actions repo to reduce duplication. +4. Most workflow steps are called from a separate actions repo to enforce DRY. ## TL;DR @@ -55,7 +55,7 @@ Partitions (shards) are used when there is a large and unknown number of tests t There is also a workflow designed to run tests that is manually triggered: [Standalone Tests](../.github/workflows/standalone-tests.yml). This workflow requires you to select which test(s) you want to run, which then triggers a reusable workflow via conditional. -For example, selecting "Epoch Tests" will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). +For example, selecting `Epoch Tests` will run the tests defined in [Epoch Tests](../.github/workflows/epoch-tests.yml). Likewise, selecting `Release Tests` will run the same tests as a release workflow. ### Adding/changing tests @@ -105,7 +105,7 @@ If any of the tests given to the action (JSON string of `needs` field) fails, th If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. -In the following example, `unit-tests` is a matrix job with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. +In the following example, `unit-tests` is a matrix job from [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. If any of the jobs are failing, the `check-tests` job will also fail. ```yaml @@ -145,7 +145,7 @@ check-tests: ### Merging a branch to develop Once a PR is added to the merge queue, the target branch is merged into the source branch. -Then, the same workflows are triggered as in the [previous step](#opening-a-pr-against-develop). +Then, the same workflows are triggered as in the [previous step](#openingupdating-a-pr). --- diff --git a/docs/mining.md b/docs/mining.md index 2a59f051a9..8b40eb8cc8 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -1,7 +1,7 @@ # Stacks Mining Stacks tokens (STX) are mined by transferring BTC via PoX. To run as a miner, -you should make sure to add the following config fields to your config file: +you should make sure to add the following config fields to your [config file](../testnet/stacks-node/conf/mainnet-miner-conf.toml): ```toml [node] @@ -80,4 +80,4 @@ Estimates are then randomly "fuzzed" using uniform random fuzz of size up to ## Further Reading - [stacksfoundation/miner-docs](https://github.com/stacksfoundation/miner-docs) -- [Mining Documentation](https://docs.stacks.co/docs/nodes-and-miners/miner-mainnet) +- [Mining Documentation](https://docs.stacks.co/stacks-in-depth/nodes-and-miners/mine-mainnet-stacks-tokens) diff --git a/docs/profiling.md b/docs/profiling.md index 3e43cf9b63..26d1c119ae 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -9,7 +9,7 @@ This document describes several techniques to profile (i.e. find performance bot - generating flame graphs, and - profiling sqlite queries. -Note that all bash commands in this document are run from the stacks-blockchain repository root directory. +Note that all bash commands in this document are run from the [stacks-core repository](https://github.com/stacks-network/stacks-core) root directory. ## Logging tips diff --git a/docs/release-process.md b/docs/release-process.md index d7dfb1ea52..b9e2be8748 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -14,8 +14,8 @@ ## Release Schedule and Hotfixes Normal releases in this repository that add new features are released on a monthly schedule. -The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-blockchain/tree/develop). -It is generally safe to run a `stacks-node` from that branch, though it has received less rigorous testing than release tags. +The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). +It is generally safe to run a `stacks-node` from that branch, though it has received less rigorous testing than release tags or the [master branch](https://github.com/stacks-network/stacks-core/tree/master). If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. For fixes that impact the correct functioning or liveness of the network, _hotfixes_ may be issued. @@ -40,7 +40,7 @@ A increments on non-consensus-breaking changes that do not require a fresh chain n increments on patches and hot-fixes (akin to semantic PATCH) ``` -Optionally, an extra pre-release field may be appended to the version to specify a release candidate in the format `-rc[0-9]` +Optionally, an extra pre-release field may be appended to the version to specify a release candidate in the format `-rc[0-9]`. ## Non-Consensus Breaking Release Process @@ -50,31 +50,32 @@ A release should happen at least 24 hours before the start of a new cycle, to av 1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). - 1. First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". + - First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". - In other words, the database schema has changed, but an automatic migration was not implemented. - Determine whether this a feature release, as opposed to a hotfix or a patch. - 2. A new branch in format `release/X.Y.Z.A.n` is created from the base branch `develop`. + - A new branch in the format `release/X.Y.Z.A.n` is created from the base branch `develop`. 2. Enumerate PRs and/or issues that would _block_ the release. - 1. A label should be applied to each such issue/PR as `X.Y.Z.A.n-blocker`. + - A label should be applied to each such issue/PR as `X.Y.Z.A.n-blocker`. -3. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick commits into the release branch. +3. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick some commits into the release branch. - 1. Create a feature branch from `release/X.Y.Z.A.n`, ex: `feat/X.Y.Z.A.n-pr_number`. - 2. Add cherry-picked commits to the `feat/X.Y.Z.A.n-pr_number` branch - 3. Merge `feat/X.Y.Z.A.n-pr_number` back into `release/X.Y.Z.A.n`. + - Create a feature branch from `release/X.Y.Z.A.n`, ex: `feat/X.Y.Z.A.n-pr_number`. + - Add cherry-picked commits to the `feat/X.Y.Z.A.n-pr_number` branch + - Merge `feat/X.Y.Z.A.n-pr_number` into `release/X.Y.Z.A.n`. 4. Open a PR to update the `CHANGELOG.md` file in the `release/X.Y.Z.A.n` branch. - 1. Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. - 2. Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. + - Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. + - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). - - **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. + + **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. -5. Once `chore/X.Y.Z.A.n-changelog` has merged, a build may be started by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) against the `release/X.Y.Z.A.n` branch. +5. Once `chore/X.Y.Z.A.n-changelog` has merged, a build may be started by manually triggering the [`CI` workflow](../.github/workflows/ci.yml) against the `release/X.Y.Z.A.n` branch. -6. Once the release has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. +6. Once the release candidate has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. 7. The release candidate will tested that it successfully syncs with the current chain from genesis both in testnet and mainnet. @@ -87,8 +88,8 @@ A release should happen at least 24 hours before the start of a new cycle, to av Announcements will then be shared in the `#stacks-core-devs` channel in the Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). 10. Finally, the following merges will happen to complete the release process: - 1. Release branch `release/X.Y.Z.A.n` will be merged into the `master` branch. - 2. Then, `master` will be merged back into `develop`. + - Release branch `release/X.Y.Z.A.n` will be merged into the `master` branch. + - Then, `master` will be merged into `develop`. ## Consensus Breaking Release Process @@ -96,4 +97,3 @@ Consensus breaking releases shall follow the same overall process as a non-conse - The release must be timed so that sufficient time is given to perform a genesis sync. - The release must take into account the activation height at which the new consensus rules will take effect. - Generically, a few weeks lead time is required for consensus breaking changes. diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index ee5e262d46..3fdf293a4f 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -7,8 +7,7 @@ prometheus_bind = "127.0.0.1:9153" seed = "" local_peer_seed = "" miner = true -# Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) -mine_microblocks = false +mine_microblocks = false # Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index d237aafd61..376f669893 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -38,32 +38,32 @@ start_height = 0 [[burnchain.epochs]] epoch_name = "2.0" -start_height = 0 +start_height = 230 [[burnchain.epochs]] epoch_name = "2.05" -start_height = 1 +start_height = 240 [[burnchain.epochs]] epoch_name = "2.1" -start_height = 2 +start_height = 240 [[burnchain.epochs]] epoch_name = "2.2" -start_height = 3 +start_height = 241 [[burnchain.epochs]] epoch_name = "2.3" -start_height = 4 +start_height = 242 [[burnchain.epochs]] epoch_name = "2.4" -start_height = 5 +start_height = 243 [[burnchain.epochs]] epoch_name = "2.5" -start_height = 6 +start_height = 244 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 +start_height = 2_000_250 \ No newline at end of file From 890ab288bd435295fee079041ba8034379fa7351 Mon Sep 17 00:00:00 2001 From: Dean Chi <21262275+deantchi@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:43:22 -0700 Subject: [PATCH 0536/1400] testnet-docs: update conf --- .../stacks-node/conf/testnet-follower-conf.toml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 376f669893..b327fbb001 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -38,32 +38,32 @@ start_height = 0 [[burnchain.epochs]] epoch_name = "2.0" -start_height = 230 +start_height = 0 [[burnchain.epochs]] epoch_name = "2.05" -start_height = 240 +start_height = 1 [[burnchain.epochs]] epoch_name = "2.1" -start_height = 240 +start_height = 2 [[burnchain.epochs]] epoch_name = "2.2" -start_height = 241 +start_height = 3 [[burnchain.epochs]] epoch_name = "2.3" -start_height = 242 +start_height = 4 [[burnchain.epochs]] epoch_name = "2.4" -start_height = 243 +start_height = 5 [[burnchain.epochs]] epoch_name = "2.5" -start_height = 244 +start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2_000_250 \ No newline at end of file +start_height = 2000701 \ No newline at end of file From 172a0fdd402b00354e64291c83168097b237e5ff Mon Sep 17 00:00:00 2001 From: Dean Chi <21262275+deantchi@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:43:22 -0700 Subject: [PATCH 0537/1400] testnet-docs: update conf --- .../stacks-node/conf/testnet-follower-conf.toml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 376f669893..b327fbb001 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -38,32 +38,32 @@ start_height = 0 [[burnchain.epochs]] epoch_name = "2.0" -start_height = 230 +start_height = 0 [[burnchain.epochs]] epoch_name = "2.05" -start_height = 240 +start_height = 1 [[burnchain.epochs]] epoch_name = "2.1" -start_height = 240 +start_height = 2 [[burnchain.epochs]] epoch_name = "2.2" -start_height = 241 +start_height = 3 [[burnchain.epochs]] epoch_name = "2.3" -start_height = 242 +start_height = 4 [[burnchain.epochs]] epoch_name = "2.4" -start_height = 243 +start_height = 5 [[burnchain.epochs]] epoch_name = "2.5" -start_height = 244 +start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2_000_250 \ No newline at end of file +start_height = 2000701 \ No newline at end of file From 8c2d778381dc7733339371a15dbc81f35b95e244 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 19 Jul 2024 14:47:40 -0500 Subject: [PATCH 0538/1400] test: add nakamoto bitcoin reorg test * add bitcoind reorg test to bitcoind / stacks-node / signers integration test suite * add test to bitcoin-tests.yml --- .github/workflows/bitcoin-tests.yml | 1 + .../stacks-node/src/nakamoto_node/relayer.rs | 21 ++- testnet/stacks-node/src/tests/signer/v0.rs | 153 +++++++++++++++++- 3 files changed, 166 insertions(+), 9 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index c35406fd3b..a5604efd7d 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -89,6 +89,7 @@ jobs: - tests::signer::v0::end_of_tenure - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid + - tests::signer::v0::bitcoind_forking_test - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a614043472..931a8c5f68 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -110,6 +110,7 @@ pub struct LastCommit { /// the tenure consensus hash for the tip's tenure tenure_consensus_hash: ConsensusHash, /// the start-block hash of the tip's tenure + #[allow(dead_code)] start_block_hash: BlockHeaderHash, /// What is the epoch in which this was sent? epoch_id: StacksEpochId, @@ -836,14 +837,20 @@ impl RelayerThread { })? }; - if last_winner_snapshot.miner_pk_hash != Some(mining_pkh) { - debug!("Relayer: the miner did not win the last sortition. No tenure to continue."; - "current_mining_pkh" => %mining_pkh, - "last_winner_snapshot.miner_pk_hash" => ?last_winner_snapshot.miner_pk_hash, - ); + let won_last_sortition = last_winner_snapshot.miner_pk_hash == Some(mining_pkh); + debug!( + "Relayer: Current burn block had no sortition. Checking for tenure continuation."; + "won_last_sortition" => won_last_sortition, + "current_mining_pkh" => %mining_pkh, + "last_winner_snapshot.miner_pk_hash" => ?last_winner_snapshot.miner_pk_hash, + "canonical_stacks_tip_id" => %canonical_stacks_tip, + "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, + "block_election_ch" => %block_election_snapshot.consensus_hash, + "burn_view_ch" => %new_burn_view, + ); + + if !won_last_sortition { return Ok(()); - } else { - debug!("Relayer: the miner won the last sortition. Continuing tenure."); } match self.start_new_tenure( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a9da738d3..2ef66eec52 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -46,10 +46,10 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; use crate::tests::neon_integrations::{ - get_chain_info, next_block_and_wait, submit_tx, test_observer, + get_account, get_chain_info, next_block_and_wait, submit_tx, test_observer, }; use crate::tests::{self, make_stacks_transfer}; -use crate::{nakamoto_node, BurnchainController}; +use crate::{nakamoto_node, BurnchainController, Keychain}; impl SignerTest { /// Run the test until the epoch 3 boundary @@ -787,6 +787,155 @@ fn forked_tenure_testing( } } +#[test] +#[ignore] +fn bitcoind_forking_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |_config| {}, + ); + let conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let miner_address = Keychain::default(conf.node.seed.clone()) + .origin_address(conf.is_mainnet()) + .unwrap(); + + signer_test.boot_to_epoch_3(); + info!("------------------------- Reached Epoch 3.0 -------------------------"); + let pre_epoch_3_nonce = get_account(&http_origin, &miner_address).nonce; + let pre_fork_tenures = 10; + + for _i in 0..pre_fork_tenures { + let _mined_block = signer_test.mine_nakamoto_block(Duration::from_secs(30)); + } + + let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; + + assert_eq!(pre_fork_1_nonce, pre_epoch_3_nonce + 2 * pre_fork_tenures); + + info!("------------------------- Triggering Bitcoin Fork -------------------------"); + + let burn_block_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let burn_header_hash_to_fork = signer_test + .running_nodes + .btc_regtest_controller + .get_block_hash(burn_block_height); + signer_test + .running_nodes + .btc_regtest_controller + .invalidate_block(&burn_header_hash_to_fork); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + info!("Wait for block off of shallow fork"); + thread::sleep(Duration::from_secs(15)); + + // we need to mine some blocks to get back to being considered a frequent miner + for _i in 0..3 { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst) + > commits_count) + }, + ) + .unwrap(); + } + + let post_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; + + assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); + + for _i in 0..5 { + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + } + + let pre_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; + assert_eq!(pre_fork_2_nonce, post_fork_1_nonce + 2 * 5); + + info!( + "New chain info: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("------------------------- Triggering Deeper Bitcoin Fork -------------------------"); + + let burn_block_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let burn_header_hash_to_fork = signer_test + .running_nodes + .btc_regtest_controller + .get_block_hash(burn_block_height - 3); + signer_test + .running_nodes + .btc_regtest_controller + .invalidate_block(&burn_header_hash_to_fork); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(4); + + info!("Wait for block off of shallow fork"); + thread::sleep(Duration::from_secs(15)); + + // we need to mine some blocks to get back to being considered a frequent miner + for _i in 0..3 { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst) + > commits_count) + }, + ) + .unwrap(); + } + + let post_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; + + assert_eq!(post_fork_2_nonce, pre_fork_2_nonce - 4 * 2); + + for _i in 0..5 { + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + } + + let test_end_nonce = get_account(&http_origin, &miner_address).nonce; + assert_eq!(test_end_nonce, post_fork_2_nonce + 2 * 5); + + info!( + "New chain info: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behavior at the end of a tenure. Specifically: From 17e39e0d536fc1395cb4a304699a12a906caccce Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:03:36 -0700 Subject: [PATCH 0539/1400] placeholder for testnet miner config --- .../stacks-node/conf/testnet-miner-conf.toml | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 testnet/stacks-node/conf/testnet-miner-conf.toml diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml new file mode 100644 index 0000000000..f3a49a33d4 --- /dev/null +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -0,0 +1,77 @@ +[node] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +rpc_bind = "0.0.0.0:20443" +p2p_bind = "0.0.0.0:20444" +bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" +prometheus_bind = "0.0.0.0:9153" + +[burnchain] +chain = "bitcoin" +mode = "krypton" +peer_host = "127.0.0.1" +username = "" +password = "" +rpc_port = 18443 +peer_port = 18444 +pox_prepare_length = 100 +pox_reward_length = 900 +# Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election +burn_fee_cap = 20000 +# Amount (in sats) per byte - Used to calculate the transaction fees +satoshis_per_byte = 25 +# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +rbf_fee_increment = 5 +# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +max_rbf = 150 + +[[ustx_balance]] +address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" +amount = 10000000000000000 + +[[burnchain.epochs]] +epoch_name = "1.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.05" +start_height = 1 + +[[burnchain.epochs]] +epoch_name = "2.1" +start_height = 2 + +[[burnchain.epochs]] +epoch_name = "2.2" +start_height = 3 + +[[burnchain.epochs]] +epoch_name = "2.3" +start_height = 4 + +[[burnchain.epochs]] +epoch_name = "2.4" +start_height = 5 + +[[burnchain.epochs]] +epoch_name = "2.5" +start_height = 6 + +[[burnchain.epochs]] +epoch_name = "3.0" +start_height = 2000701 \ No newline at end of file From b7471578ae34e6276ebf4f809c146f0a3d8f53f3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:47:12 -0700 Subject: [PATCH 0540/1400] Updating stacks-signer release process --- docs/release-process.md | 2 +- stacks-signer/release-process.md | 88 +++++++++++++++++--------------- 2 files changed, 48 insertions(+), 42 deletions(-) diff --git a/docs/release-process.md b/docs/release-process.md index b9e2be8748..ac7bd60d56 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -65,7 +65,7 @@ A release should happen at least 24 hours before the start of a new cycle, to av - Add cherry-picked commits to the `feat/X.Y.Z.A.n-pr_number` branch - Merge `feat/X.Y.Z.A.n-pr_number` into `release/X.Y.Z.A.n`. -4. Open a PR to update the `CHANGELOG.md` file in the `release/X.Y.Z.A.n` branch. +4. Open a PR to update the [CHANGELOG](../CHANGELOG.md) file in the `release/X.Y.Z.A.n` branch. - Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md index 599d8c7af4..bb0f36fadf 100644 --- a/stacks-signer/release-process.md +++ b/stacks-signer/release-process.md @@ -11,27 +11,30 @@ | Linux ARMv7 | _builds are provided but not tested_ | | Linux ARM64 | _builds are provided but not tested_ | - ## Release Schedule and Hotfixes -Normal releases in this repository that add new or updated features shall be released in an ad-hoc manner. The currently staged changes for such releases -are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). It is generally safe to run a `stacks-signer` from that branch, though it has received less rigorous testing than release branches. If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. +Normal releases in this repository that add new or updated features shall be released in an ad-hoc manner. +The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). +It is generally safe to run a `stacks-signer` from that branch, though it has received less rigorous testing than release branches. +If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. For fixes that impact the correct functioning or liveness of the signer, _hotfixes_ may be issued. These hotfixes are categorized by priority according to the following rubric: -- **High Priority**. Any fix for an issue that could deny service to the network as a whole, e.g., an issue where a particular kind of invalid transaction would cause nodes to stop processing requests or shut down unintentionally. -- **Medium Priority**. ny fix for an issue that could deny service to individual nodes. -- **Low Priority**. Any fix for an issue that is not high or medium priority. +- **High Priority**. Any fix for an issue that could deny service to the network as a whole, e.g., an issue where a particular kind of invalid transaction would cause nodes to stop processing requests or shut down unintentionally. +- **Medium Priority**. Any fix for an issue that could deny service to individual nodes. +- **Low Priority**. Any fix for an issue that is not high or medium priority. ## Versioning -This project uses a 6 part version number. When there is a stacks-core release, `stacks-signer` will assume the same version as the tagged `stacks-core` release (5 part version). When there are changes in-between stacks-core releases, the signer binary will assume a 6 part version. +This project uses a 6 part version number. +When there is a stacks-core release, `stacks-signer` will assume the same version as the tagged `stacks-core` release ([5 part version](../docs/release-process.md#versioning)). +When there are changes in-between `stacks-core` releases, the `stacks-signer` binary will assume a 6 part version: ``` X.Y.Z.A.n.x -X = 2 and does not change in practice unless there’s another Stacks 2.0 type event +X major version - does not change in practice unless there’s another Stacks 2.0 type event Y increments on consensus-breaking changes Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) @@ -39,47 +42,50 @@ n increments on patches and hot-fixes (akin to semantic PATCH) x increments on the current stacks-core release version ``` -For example, if there is a stacks-core release of 2.6.0.0.0, `stacks-signer` will also be versioned as 2.6.0.0.0. If a change is needed in the signer, it may be released apart from the stacks-core as version 2.6.0.0.0.1 and will increment until the next stacks-core release. +## Non-Consensus Breaking Release Process + +The release must be timed so that it does not interfere with a _prepare phase_. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2). +A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. + +1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). + + - First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". + - In other words, the database schema has changed, but an automatic migration was not implemented. + - Determine whether this a feature release, as opposed to a hotfix or a patch. + - A new branch in the format `release/signer-X.Y.Z.A.n.x` is created from the base branch `develop`. + +2. Enumerate PRs and/or issues that would _block_ the release. + + - A label should be applied to each such issue/PR as `signer-X.Y.Z.A.n.x-blocker`. + +3. Since development is continuing in the `develop` branch, it may be necessary to cherry-pick some commits into the release branch. -## Release Process + - Create a feature branch from `release/signer-X.Y.Z.A.n.x`, ex: `feat/signer-X.Y.Z.A.n.x-pr_number`. + - Add cherry-picked commits to the `feat/signer-X.Y.Z.A.n.x-pr_number` branch + - Merge `feat/signer-X.Y.Z.A.n.x-pr_number` into `release/signer-X.Y.Z.A.n.x`. +4. Open a PR to update the [CHANGELOG](./CHANGELOG.md) file in the `release/signer-X.Y.Z.A.n.x` branch. -1. The release must be timed so that it does not interfere with a _prepare - phase_. The timing of the next Stacking cycle can be found - [here](https://stx.eco/dao/tools?tool=2). A release should happen - at least 48 hours before the start of a new cycle, to avoid interfering - with the prepare phase. + - Create a chore branch from `release/signer-X.Y.Z.A.n.x`, ex: `chore/signer-X.Y.Z.A.n.x-changelog`. + - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. -2. Before creating the release, the release manager must determine the _version - number_ for this release, and create a release branch in the format: `release/signer-X.Y.Z.A.n.x`. - The factors that determine the version number are discussed in [Versioning](#versioning). + - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). -3. _Blocking_ PRs or issues are enumerated and a label should be applied to each - issue/PR such as `signer-X.Y.Z.A.n.x-blocker`. The Issue/PR owners for each should be pinged - for updates on whether or not those issues/PRs have any blockers or are waiting on feedback. - __Note__: It may be necessary to cherry-pick these PR's into the target branch `release/signer-X.Y.Z.A.n.x` + **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. -4. The [CHANGELOG.md](./CHANGELOG.md) file shall be updated with summaries of what - was `Added`, `Changed`, and `Fixed` in the base branch. For example, pull requests - merged into `develop` can be found [here](https://github.com/stacks-network/stacks-blockchain/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). - Note, however, that GitHub apparently does not allow sorting by _merge time_, - so, when sorting by some proxy criterion, some care should be used to understand - which PR's were _merged_ after the last release. +5. Once `chore/signer-X.Y.Z.A.n.x-changelog` has merged, a build may be started by manually triggering the [`CI` workflow](../.github/workflows/ci.yml) against the `release/signer-X.Y.Z.A.n.x` branch. -5. Once any blocker PRs have merged, a new tag will be created - by manually triggering the [`CI` Github Actions workflow](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml) - against the `release/signer-X.Y.Z.A.n.x` branch. +6. Once the release candidate has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. -6. Ecosystem participants will be notified of the release candidate in order - to test the release on various staging infrastructure. +7. If bugs or issues emerge from the rollout on staging infrastructure, the release will be delayed until those regressions are resolved. -7. If bugs or issues emerge from the rollout on staging infrastructure, the release - will be delayed until those regressions are resolved. As regressions are resolved, - additional release candidates shall be tagged. + - As regressions are resolved, additional release candidates should be tagged. + - Repeat steps 3-6 as necessary. -8. Once the final release candidate has rolled out successfully without issue on staging - infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-blockchain/releases/) - page. Announcements will then be shared in the `#stacks-core-devs` channel in the - Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). +8. Once the final release candidate has rolled out successfully without issue on staging infrastructure, the tagged release shall no longer marked as Pre-Release on the [Github releases](https://github.com/stacks-network/stacks-core/releases/) page. + Announcements will then be shared in the `#stacks-core-devs` channel in the Stacks Discord, as well as the [mailing list](https://groups.google.com/a/stacks.org/g/announce). -9. Finally, the release branch `release/signer-X.Y.Z.A.n.x` will be PR'ed into the `master` branch, and once merged, a PR for `master->develop` will be opened. +9. Finally, the following merges will happen to complete the release process: + - Release branch `release/signer-X.Y.Z.A.n.x` will be merged into the `master` branch. + - Then, `master` will be merged into `develop`. From 0f6eca4e344d68f3cda50265e38ec653fccefab1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 22 Jul 2024 22:34:28 -0400 Subject: [PATCH 0541/1400] feat: support resuming from a saved VRF key Fixes: #4994 --- .../stacks-node/src/nakamoto_node/relayer.rs | 53 ++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a614043472..6f796b7fe8 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -15,6 +15,8 @@ // along with this program. If not, see . use core::fmt; use std::collections::HashSet; +use std::fs; +use std::io::Read; use std::sync::mpsc::{Receiver, RecvTimeoutError}; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -1095,6 +1097,36 @@ impl RelayerThread { debug!("Relayer exit!"); } + /// Try loading up a saved VRF key + pub(crate) fn load_saved_vrf_key(path: &str) -> Option { + let mut f = match fs::File::open(path) { + Ok(f) => f, + Err(e) => { + warn!("Could not open {}: {:?}", &path, &e); + return None; + } + }; + let mut registered_key_bytes = vec![]; + if let Err(e) = f.read_to_end(&mut registered_key_bytes) { + warn!( + "Failed to read registered key bytes from {}: {:?}", + path, &e + ); + return None; + } + + let Ok(registered_key) = serde_json::from_slice(®istered_key_bytes) else { + warn!( + "Did not load registered key from {}: could not decode JSON", + &path + ); + return None; + }; + + info!("Loaded registered key from {}", &path); + Some(registered_key) + } + /// Top-level dispatcher pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { debug!("Relayer: handling directive"; "directive" => %directive); @@ -1113,7 +1145,26 @@ impl RelayerThread { info!("In initial block download, will not submit VRF registration"); return true; } - self.rotate_vrf_and_register(&last_burn_block); + let mut saved_key_opt = None; + let mut restored = false; + if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { + saved_key_opt = Self::load_saved_vrf_key(&path); + } + if let Some(saved_key) = saved_key_opt { + let pubkey_hash = self.keychain.get_nakamoto_pkh(); + if pubkey_hash.as_ref() == &saved_key.memo { + debug!("Relayer: resuming VRF key"); + self.globals.resume_leader_key(saved_key); + restored = true; + } else { + warn!("Relayer: directive Saved VRF key does not match current key"); + } + } + if !restored { + debug!("Relayer: directive Register VRF key"); + self.rotate_vrf_and_register(&last_burn_block); + debug!("Relayer: directive Registered VRF key"); + } self.globals.counters.bump_blocks_processed(); true } From 6211ffd1cc55e42203f3589d6c5549f636fab33f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 23 Jul 2024 11:19:09 -0400 Subject: [PATCH 0542/1400] fix: make signer's warning log more accurate This log can be hit in the case where a nakamoto block does not build off of the most recently signed nakamoto block, so the warning message should be less specific about the other case. --- stacks-signer/src/chainstate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 95c60d3a3c..3473997c11 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -438,7 +438,7 @@ impl SortitionsView { Ok(true) } else { warn!( - "Miner block proposal's tenure change transaction does not confirm as many blocks as we expect in the parent tenure"; + "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, From 269e5c029d2c99bd19966f10d4e96127b42729d6 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 23 Jul 2024 09:09:20 -0700 Subject: [PATCH 0543/1400] Update stacks-signer/release-process.md Co-authored-by: Adriano Di Luzio --- stacks-signer/release-process.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md index 556aa811c0..8d56332081 100644 --- a/stacks-signer/release-process.md +++ b/stacks-signer/release-process.md @@ -13,8 +13,7 @@ ## Release Schedule and Hotfixes -Normal releases in this repository that add new or updated features shall be released in an ad-hoc manner. -The currently staged changes for such releases are in the [develop branch](https://github.com/stacks-network/stacks-core/tree/develop). +`stack-signer` releases that add new or updated features shall be released in an ad-hoc manner. It is generally safe to run a `stacks-signer` from that branch, though it has received less rigorous testing than release branches. If bugs are found in the `develop` branch, please do [report them as issues](https://github.com/stacks-network/stacks-core/issues) on this repository. From 696e78f4e79bd53482e5a6ebede67d46fad36d55 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 23 Jul 2024 09:16:03 -0700 Subject: [PATCH 0544/1400] Addressing PR comments --- docs/branching.md | 14 +++++++------- docs/release-process.md | 13 ++++++------- stacks-signer/release-process.md | 4 +--- 3 files changed, 14 insertions(+), 17 deletions(-) diff --git a/docs/branching.md b/docs/branching.md index 38db57e3e5..5b9a96b12a 100644 --- a/docs/branching.md +++ b/docs/branching.md @@ -4,22 +4,22 @@ The following is a modified version of the gitflow branching strategy described ## Main Branches -- **master** - `origin/master` is the main branch where the source code of HEAD always reflects a production-ready state. -- **develop** - `origin/develop` is the branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. -- **next** - `origin/next` may contain consensus-breaking changes. +- **master** - `master` is the main branch where the source code of HEAD always reflects a production-ready state. +- **develop** - `develop` is the branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. +- **next** - `next` may contain consensus-breaking changes. - **release/X.Y.Z.A.n** is the release branch. When the source code in the develop branch reaches a stable point and is ready to be released, a release branch is created as `release/X.Y.Z.A.n` (see [release-process.md](./release-process.md)). After release, the following will happen: -- `release/X.Y.Z.A.n` branch is merged back to `origin/master`. -- `origin/master` is then merged into `origin/develop`, and development continues in the `origin/develop` branch. -- `origin/develop` is then merged into `origin/next`. +- `release/X.Y.Z.A.n` branch is merged back to `master`. +- `master` is then merged into `develop`, and development continues in the `develop` branch. +- `develop` is then merged into `next`. ## Supporting Branches Branch names should use a prefix that conveys the overall goal of the branch. -All branches should be based off of `origin/develop`, with the exception being a hotfix branch which may be based off of `origin/master`. +All branches should be based off of `develop`, with the exception being a hotfix branch which may be based off of `master`. - `feat/some-fancy-new-thing`: For new features. - `fix/some-broken-thing`: For hot fixes and bug fixes. diff --git a/docs/release-process.md b/docs/release-process.md index ac7bd60d56..27e5b0ac4a 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -45,14 +45,13 @@ Optionally, an extra pre-release field may be appended to the version to specify ## Non-Consensus Breaking Release Process The release must be timed so that it does not interfere with a _prepare phase_. -The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2). -A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2); to avoid interfering with the prepare phase, releases should happen at least 24 hours before the start of a new cycle. 1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). - First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". - - In other words, the database schema has changed, but an automatic migration was not implemented. - - Determine whether this a feature release, as opposed to a hotfix or a patch. + - In other words, the database schema has changed, but an automatic migration was not implemented. + - Determine whether this a feature release, as opposed to a hotfix or a patch. - A new branch in the format `release/X.Y.Z.A.n` is created from the base branch `develop`. 2. Enumerate PRs and/or issues that would _block_ the release. @@ -69,9 +68,9 @@ A release should happen at least 24 hours before the start of a new cycle, to av - Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. - - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). - - **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. + - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). + + **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. 5. Once `chore/X.Y.Z.A.n-changelog` has merged, a build may be started by manually triggering the [`CI` workflow](../.github/workflows/ci.yml) against the `release/X.Y.Z.A.n` branch. diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md index 8d56332081..9d3f2cb5e1 100644 --- a/stacks-signer/release-process.md +++ b/stacks-signer/release-process.md @@ -44,8 +44,7 @@ x increments on the current stacks-core release version ## Non-Consensus Breaking Release Process The release must be timed so that it does not interfere with a _prepare phase_. -The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2). -A release should happen at least 24 hours before the start of a new cycle, to avoid interfering with the prepare phase. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2); to avoid interfering with the prepare phase, releases should happen at least 24 hours before the start of a new cycle. 1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). @@ -88,4 +87,3 @@ A release should happen at least 24 hours before the start of a new cycle, to av 9. Finally, the following merges will happen to complete the release process: - Release branch `release/signer-X.Y.Z.A.n.x` will be merged into the `master` branch. - Then, `master` will be merged into `develop`. - From f95e131e31fb61b89e9d7b6749df6ab25cff4dbf Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 23 Jul 2024 09:42:01 -0700 Subject: [PATCH 0545/1400] Update stacks-signer changelog for 2.5.0.0.5.1 --- stacks-signer/CHANGELOG.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index fcc7ab17f5..6b28b15e8f 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -7,3 +7,18 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +## [2.5.0.0.5.1] + +### Added + +- Adds signerdb schema versioning (#4965) +- Added voting cli commands `generate-vote` and `verify-vote` (#4934) +- Add soritiion tracking cache (#4905) +- Push blocks to signer set and adds `/v3/blocks/upload` (#4902) + +### Changed + +- Fix an issue of poorly timed tenure and bitcoin blocks (#4956) +- Process pending blocks before ending tenure (#4952) +- Update rusqlite/sqlite versions (#4948) +- return last block sortition in `/v3/sortitions` (#4939) From c388af4fcafc3fe3a5f853e711a4158b6edb34d4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 23 Jul 2024 13:02:01 -0400 Subject: [PATCH 0546/1400] Add block proposal timeout configuration option to signer Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 3 ++- stacks-signer/src/config.rs | 14 ++++++++++++++ stacks-signer/src/runloop.rs | 1 + 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index e3e3dd1dc5..71720a015b 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -565,7 +565,8 @@ pub(crate) mod tests { tx_fee_ustx: config.tx_fee_ustx, max_tx_fee_ustx: config.max_tx_fee_ustx, db_path: config.db_path.clone(), - first_proposal_burn_block_timing: Duration::from_secs(30), + first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, + block_proposal_timeout: config.block_proposal_timeout, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 4c7bc565d3..66cf5a5f7d 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -36,6 +36,7 @@ use wsts::curve::scalar::Scalar; use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; +const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; // Default transaction fee to use in microstacks (if unspecificed in the config file) const TX_FEE_USTX: u64 = 10_000; @@ -154,6 +155,8 @@ pub struct SignerConfig { /// How much time must pass between the first block proposal in a tenure and the next bitcoin block /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing: Duration, + /// How much time to wait for a miner to propose a block following a sortition + pub block_proposal_timeout: Duration, } /// The parsed configuration for the signer @@ -196,6 +199,8 @@ pub struct GlobalConfig { /// How much time between the first block proposal in a tenure and the next bitcoin block /// must pass before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing: Duration, + /// How much time to wait for a miner to propose a block following a sortition + pub block_proposal_timeout: Duration, } /// Internal struct for loading up the config file @@ -236,6 +241,8 @@ struct RawConfigFile { /// How much time must pass between the first block proposal in a tenure and the next bitcoin block /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing_secs: Option, + /// How much time to wait for a miner to propose a block following a sortition in milliseconds + pub block_proposal_timeout_ms: Option, } impl RawConfigFile { @@ -324,6 +331,12 @@ impl TryFrom for GlobalConfig { None => None, }; + let block_proposal_timeout = Duration::from_millis( + raw_data + .block_proposal_timeout_ms + .unwrap_or(BLOCK_PROPOSAL_TIMEOUT_MS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -343,6 +356,7 @@ impl TryFrom for GlobalConfig { db_path, metrics_endpoint, first_proposal_burn_block_timing, + block_proposal_timeout, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 6795f0cfee..2909c9383a 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -270,6 +270,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo tx_fee_ustx: self.config.tx_fee_ustx, max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), + block_proposal_timeout: self.config.block_proposal_timeout, }) } From 4a460979ee7c13131628330c62ba38237df3bffc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 23 Jul 2024 16:11:41 -0400 Subject: [PATCH 0547/1400] refactor: improve VRF key load code --- .../stacks-node/src/nakamoto_node/relayer.rs | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 6f796b7fe8..f3914609e2 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1098,7 +1098,7 @@ impl RelayerThread { } /// Try loading up a saved VRF key - pub(crate) fn load_saved_vrf_key(path: &str) -> Option { + pub(crate) fn load_saved_vrf_key(path: &str, pubkey_hash: &Hash160) -> Option { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { @@ -1115,7 +1115,8 @@ impl RelayerThread { return None; } - let Ok(registered_key) = serde_json::from_slice(®istered_key_bytes) else { + let Ok(registered_key) = serde_json::from_slice::(®istered_key_bytes) + else { warn!( "Did not load registered key from {}: could not decode JSON", &path @@ -1123,6 +1124,12 @@ impl RelayerThread { return None; }; + // Check that the loaded key's memo matches the current miner's key + if registered_key.memo != pubkey_hash.as_ref() { + warn!("Loaded VRF key does not match mining key"); + return None; + } + info!("Loaded registered key from {}", &path); Some(registered_key) } @@ -1146,21 +1153,14 @@ impl RelayerThread { return true; } let mut saved_key_opt = None; - let mut restored = false; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { - saved_key_opt = Self::load_saved_vrf_key(&path); + saved_key_opt = + Self::load_saved_vrf_key(&path, &self.keychain.get_nakamoto_pkh()); } if let Some(saved_key) = saved_key_opt { - let pubkey_hash = self.keychain.get_nakamoto_pkh(); - if pubkey_hash.as_ref() == &saved_key.memo { - debug!("Relayer: resuming VRF key"); - self.globals.resume_leader_key(saved_key); - restored = true; - } else { - warn!("Relayer: directive Saved VRF key does not match current key"); - } - } - if !restored { + debug!("Relayer: resuming VRF key"); + self.globals.resume_leader_key(saved_key); + } else { debug!("Relayer: directive Register VRF key"); self.rotate_vrf_and_register(&last_burn_block); debug!("Relayer: directive Registered VRF key"); From 4777310ee12966cfa10fecc61f4c32049e8f61cb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 23 Jul 2024 17:57:06 -0400 Subject: [PATCH 0548/1400] feat: write activated VRF key to path from config file This allows the key to be reused on miner restart. This functionality is already available in the neon node, and is now added to the nakamoto node. --- testnet/stacks-node/src/nakamoto_node.rs | 39 +++++++++++++++++++- testnet/stacks-node/src/run_loop/nakamoto.rs | 9 +++-- 2 files changed, 43 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 22ba5f2d7e..c57d630a58 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -14,9 +14,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::HashSet; +use std::io::Write; use std::sync::mpsc::Receiver; -use std::thread; use std::thread::JoinHandle; +use std::{fs, thread}; use stacks::burnchains::{BurnchainSigner, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -277,6 +278,7 @@ impl StacksNode { /// Called from the main thread. pub fn process_burnchain_state( &mut self, + config: &Config, sortdb: &SortitionDB, sort_id: &SortitionId, ibd: bool, @@ -316,9 +318,18 @@ impl StacksNode { let num_key_registers = key_registers.len(); - self.globals + let activated_key_opt = self + .globals .try_activate_leader_key_registration(block_height, key_registers); + // save the registered VRF key + if let (Some(activated_key), Some(path)) = ( + activated_key_opt, + config.miner.activated_vrf_key_path.as_ref(), + ) { + save_activated_vrf_key(path, &activated_key); + } + debug!( "Processed burnchain state"; "burn_height" => block_height, @@ -339,3 +350,27 @@ impl StacksNode { self.p2p_thread_handle.join().unwrap(); } } + +pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) { + info!("Activated VRF key; saving to {}", path); + + let Ok(key_json) = serde_json::to_string(&activated_key) else { + warn!("Failed to serialize VRF key"); + return; + }; + + let mut f = match fs::File::create(&path) { + Ok(f) => f, + Err(e) => { + warn!("Failed to create {}: {:?}", &path, &e); + return; + } + }; + + if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { + warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + return; + } + + info!("Saved activated VRF key to {}", &path); +} diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 65c36e926c..511b6c84b2 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -635,9 +635,12 @@ impl RunLoop { let sortition_id = &block.sortition_id; // Have the node process the new block, that can include, or not, a sortition. - if let Err(e) = - node.process_burnchain_state(burnchain.sortdb_mut(), sortition_id, ibd) - { + if let Err(e) = node.process_burnchain_state( + self.config(), + burnchain.sortdb_mut(), + sortition_id, + ibd, + ) { // relayer errored, exit. error!("Runloop: Block relayer and miner errored, exiting."; "err" => ?e); return; From ad11ffe3184a62dfc4bcc203053b7160ee869e57 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 23 Jul 2024 17:58:08 -0400 Subject: [PATCH 0549/1400] test: add tests for storing and loading VRF keys --- .../stacks-node/src/nakamoto_node/relayer.rs | 118 ++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index f3914609e2..fcaebb10b0 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1205,3 +1205,121 @@ impl RelayerThread { continue_running } } + +#[cfg(test)] +pub mod test { + use std::fs::File; + use std::io::Write; + use std::path::Path; + + use stacks::util::hash::Hash160; + use stacks::util::secp256k1::Secp256k1PublicKey; + use stacks::util::vrf::VRFPublicKey; + + use super::RelayerThread; + use crate::nakamoto_node::save_activated_vrf_key; + use crate::run_loop::RegisteredKey; + use crate::Keychain; + + #[test] + fn load_nonexistent_vrf_key() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + + let path = "/tmp/does_not_exist.json"; + _ = std::fs::remove_file(&path); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_none()); + } + + #[test] + fn load_empty_vrf_key() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + + let path = "/tmp/empty.json"; + File::create(&path).expect("Failed to create test file"); + assert!(Path::new(&path).exists()); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_none()); + + std::fs::remove_file(&path).expect("Failed to delete test file"); + } + + #[test] + fn load_bad_vrf_key() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + + let path = "/tmp/invalid_saved_key.json"; + let json_content = r#"{ "hello": "world" }"#; + + // Write the JSON content to the file + let mut file = File::create(&path).expect("Failed to create test file"); + file.write_all(json_content.as_bytes()) + .expect("Failed to write to test file"); + assert!(Path::new(&path).exists()); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_none()); + + std::fs::remove_file(&path).expect("Failed to delete test file"); + } + + #[test] + fn save_load_vrf_key() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + let key = RegisteredKey { + target_block_height: 101, + block_height: 102, + op_vtxindex: 1, + vrf_public_key: VRFPublicKey::from_hex( + "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71", + ) + .unwrap(), + memo: pubkey_hash.as_ref().to_vec(), + }; + let path = "/tmp/vrf_key.json"; + save_activated_vrf_key(path, &key); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_some()); + + std::fs::remove_file(&path).expect("Failed to delete test file"); + } + + #[test] + fn invalid_saved_memo() { + let keychain = Keychain::default(vec![0u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + let key = RegisteredKey { + target_block_height: 101, + block_height: 102, + op_vtxindex: 1, + vrf_public_key: VRFPublicKey::from_hex( + "1da75863a7e1ef86f0f550d92b1f77dc60af23694b884b2816b703137ff94e71", + ) + .unwrap(), + memo: pubkey_hash.as_ref().to_vec(), + }; + let path = "/tmp/vrf_key.json"; + save_activated_vrf_key(path, &key); + + let keychain = Keychain::default(vec![1u8; 32]); + let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); + let pubkey_hash = Hash160::from_node_public_key(&pk); + + let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + assert!(res.is_none()); + + std::fs::remove_file(&path).expect("Failed to delete test file"); + } +} From 11b17be60352a3d52a4a4c1c8768b451521b84df Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 24 Jul 2024 11:53:19 -0400 Subject: [PATCH 0550/1400] chore: remove excess logging --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 1 - testnet/stacks-node/src/neon_node.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index fcaebb10b0..7b61c7aad3 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1161,7 +1161,6 @@ impl RelayerThread { debug!("Relayer: resuming VRF key"); self.globals.resume_leader_key(saved_key); } else { - debug!("Relayer: directive Register VRF key"); self.rotate_vrf_and_register(&last_burn_block); debug!("Relayer: directive Registered VRF key"); } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index a5ef405f94..ed31540f20 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -3971,7 +3971,6 @@ impl RelayerThread { if let Some(saved_key) = saved_key_opt { self.globals.resume_leader_key(saved_key); } else { - debug!("Relayer: directive Register VRF key"); self.rotate_vrf_and_register(&last_burn_block); debug!("Relayer: directive Registered VRF key"); } From e2828ace804988589980a124925d2eaf0b65fcbb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 24 Jul 2024 11:53:45 -0400 Subject: [PATCH 0551/1400] chore: improve logging of signer signature hash --- testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index e44327d3fc..30f73e75be 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -653,7 +653,7 @@ impl SignCoordinator { let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); debug!("Sending block proposal message to signers"; - "signer_signature_hash" => ?&block.header.signer_signature_hash().0, + "signer_signature_hash" => %block.header.signer_signature_hash(), ); Self::send_miners_message_scalar::( &self.message_key, From 9844895154da1473c66dfcac82d61d98f121b1d5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 24 Jul 2024 11:57:52 -0400 Subject: [PATCH 0552/1400] Add block proposal timeout check to handle_block_proposal Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 3 ++ stacks-signer/src/tests/chainstate.rs | 1 + stacks-signer/src/v0/signer.rs | 30 ++++++++++++++++++- .../stacks-node/src/nakamoto_node/relayer.rs | 6 ++-- .../src/tests/nakamoto_integrations.rs | 3 ++ testnet/stacks-node/src/tests/signer/v0.rs | 1 + 6 files changed, 40 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 95c60d3a3c..c4bed351b4 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -84,12 +84,15 @@ pub struct ProposalEvalConfig { /// How much time must pass between the first block proposal in a tenure and the next bitcoin block /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing: Duration, + /// Time between processing a sortition and proposing a block before the block is considered invalid + pub block_proposal_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { fn from(value: &SignerConfig) -> Self { Self { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing.clone(), + block_proposal_timeout: value.block_proposal_timeout.clone(), } } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index c2c65f265c..62ce751f94 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -86,6 +86,7 @@ fn setup_test_environment( last_sortition, config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), + block_proposal_timeout: Duration::from_secs(30), }, }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 93927b03fd..a6554ee27d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -14,6 +14,7 @@ // along with this program. If not, see . use std::fmt::Debug; use std::sync::mpsc::Sender; +use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; @@ -26,7 +27,7 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; -use crate::chainstate::{ProposalEvalConfig, SortitionsView}; +use crate::chainstate::{ProposalEvalConfig, SortitionMinerStatus, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerResult}; @@ -311,6 +312,33 @@ impl Signer { // Check if proposal can be rejected now if not valid against sortition view let block_response = if let Some(sortition_state) = sortition_state { + // If this is the first block in the tenure, check if it was proposed after the timeout + if let Ok(None) = self + .signer_db + .get_last_signed_block_in_tenure(&block_proposal.block.header.consensus_hash) + { + if let Ok(Some(received_ts)) = self + .signer_db + .get_burn_block_receive_time(&sortition_state.cur_sortition.burn_block_hash) + { + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let elapsed = std::time::SystemTime::now() + .duration_since(received_time) + .unwrap_or_else(|_| { + panic!("{self}: Failed to calculate time since burn block received") + }); + if elapsed >= self.proposal_config.block_proposal_timeout { + warn!( + "{self}: miner proposed block after timeout."; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + ); + sortition_state.cur_sortition.miner_status = + SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + } + } + match sortition_state.check_proposal( stacks_client, &self.signer_db, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a614043472..36e93f3d7e 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -110,7 +110,7 @@ pub struct LastCommit { /// the tenure consensus hash for the tip's tenure tenure_consensus_hash: ConsensusHash, /// the start-block hash of the tip's tenure - start_block_hash: BlockHeaderHash, + _start_block_hash: BlockHeaderHash, /// What is the epoch in which this was sent? epoch_id: StacksEpochId, /// commit txid (to be filled in on submission) @@ -123,7 +123,7 @@ impl LastCommit { burn_tip: BlockSnapshot, stacks_tip: StacksBlockId, tenure_consensus_hash: ConsensusHash, - start_block_hash: BlockHeaderHash, + _start_block_hash: BlockHeaderHash, epoch_id: StacksEpochId, ) -> Self { Self { @@ -131,7 +131,7 @@ impl LastCommit { burn_tip, stacks_tip, tenure_consensus_hash, - start_block_hash, + _start_block_hash, epoch_id, txid: None, } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 816e130a59..c13fdf5bf2 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4653,6 +4653,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), }; let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -4766,6 +4767,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), }; let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let valid = sortitions_view @@ -4832,6 +4834,7 @@ fn signer_chainstate() { // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a9da738d3..3543fb1ce8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -268,6 +268,7 @@ fn block_proposal_rejection() { let reward_cycle = signer_test.get_current_reward_cycle(); let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), }; let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); let mut block = NakamotoBlock { From cf2566560c38c97ebf4f99acc90f50e458f2d3a2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 24 Jul 2024 13:06:22 -0400 Subject: [PATCH 0553/1400] Move block proposal timeout check to check_proposal and add unit tests Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 36 +++++++++++++-- stacks-signer/src/signerdb.rs | 4 +- stacks-signer/src/tests/chainstate.rs | 44 +++++++++++++++++-- stacks-signer/src/v0/signer.rs | 30 +------------ .../src/tests/nakamoto_integrations.rs | 6 ++- 5 files changed, 79 insertions(+), 41 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index c4bed351b4..4c04d798d8 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::time::Duration; +use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::TenureChangePayload; @@ -91,8 +91,8 @@ pub struct ProposalEvalConfig { impl From<&SignerConfig> for ProposalEvalConfig { fn from(value: &SignerConfig) -> Self { Self { - first_proposal_burn_block_timing: value.first_proposal_burn_block_timing.clone(), - block_proposal_timeout: value.block_proposal_timeout.clone(), + first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, + block_proposal_timeout: value.block_proposal_timeout, } } } @@ -150,12 +150,40 @@ impl<'a> ProposedBy<'a> { impl SortitionsView { /// Apply checks from the SortitionsView on the block proposal. pub fn check_proposal( - &self, + &mut self, client: &StacksClient, signer_db: &SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, ) -> Result { + // If this is the first block in the tenure, check if it was proposed after the timeout + if signer_db + .get_last_signed_block_in_tenure(&block.header.consensus_hash)? + .is_none() + { + if let Some(received_ts) = + signer_db.get_burn_block_receive_time(&self.cur_sortition.burn_block_hash)? + { + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let elapsed = std::time::SystemTime::now() + .duration_since(received_time) + .unwrap_or_else(|_| { + panic!("Failed to calculate time since burn block received") + }); + if elapsed >= self.config.block_proposal_timeout { + warn!( + "Miner proposed first block after block proposal timeout."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + "burn_block_received_time" => ?received_time, + ); + self.cur_sortition.miner_status = + SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + } + } let bitvec_all_1s = block.header.pox_treatment.iter().all(|entry| entry); if !bitvec_all_1s { warn!( diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5ecef398d4..09c17ed40d 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -189,7 +189,7 @@ impl SignerDb { }) .optional(); match result { - Ok(x) => Ok(x.unwrap_or_else(|| 0)), + Ok(x) => Ok(x.unwrap_or(0)), Err(e) => Err(DBError::from(e)), } } @@ -294,7 +294,7 @@ impl SignerDb { tenure: &ConsensusHash, ) -> Result, DBError> { let query = "SELECT block_info FROM blocks WHERE consensus_hash = ? AND signed_over = 1 ORDER BY stacks_height ASC LIMIT 1"; - let result: Option = query_row(&self.db, query, &[tenure])?; + let result: Option = query_row(&self.db, query, [tenure])?; try_deserialize(result) } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 62ce751f94..e469cbdeb9 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -86,7 +86,7 @@ fn setup_test_environment( last_sortition, config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), - block_proposal_timeout: Duration::from_secs(30), + block_proposal_timeout: Duration::from_secs(5), }, }; @@ -111,7 +111,7 @@ fn setup_test_environment( parent_block_id: StacksBlockId([0; 32]), tx_merkle_root: Sha512Trunc256Sum([0; 32]), state_index_root: TrieHash([0; 32]), - timestamp: 11, + timestamp: 3, miner_signature: MessageSignature::empty(), signer_signature: vec![], pox_treatment: BitVec::ones(1).unwrap(), @@ -140,7 +140,7 @@ fn check_proposal_units() { #[test] fn check_proposal_miner_pkh_mismatch() { - let (stacks_client, signer_db, _block_pk, view, mut block) = + let (stacks_client, signer_db, _block_pk, mut view, mut block) = setup_test_environment("miner_pkh_mismatch"); block.header.consensus_hash = view.cur_sortition.consensus_hash; let different_block_pk = StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 3])); @@ -328,7 +328,7 @@ fn make_tenure_change_tx(payload: TenureChangePayload) -> StacksTransaction { #[test] fn check_proposal_tenure_extend_invalid_conditions() { - let (stacks_client, signer_db, block_pk, view, mut block) = + let (stacks_client, signer_db, block_pk, mut view, mut block) = setup_test_environment("tenure_extend"); block.header.consensus_hash = view.cur_sortition.consensus_hash; let mut extend_payload = make_tenure_change_payload(); @@ -351,3 +351,39 @@ fn check_proposal_tenure_extend_invalid_conditions() { .check_proposal(&stacks_client, &signer_db, &block, &block_pk) .unwrap()); } + +#[test] +fn check_block_proposal_timeout() { + let (stacks_client, mut signer_db, block_pk, mut view, mut curr_sortition_block) = + setup_test_environment("block_proposal_timeout"); + curr_sortition_block.header.consensus_hash = view.cur_sortition.consensus_hash; + let mut last_sortition_block = curr_sortition_block.clone(); + last_sortition_block.header.consensus_hash = + view.last_sortition.as_ref().unwrap().consensus_hash; + + // Ensure we have a burn height to compare against + let burn_hash = view.cur_sortition.burn_block_hash; + let burn_height = 1; + let received_time = SystemTime::now(); + signer_db + .insert_burn_block(&burn_hash, burn_height, &received_time) + .unwrap(); + + assert!(view + .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .unwrap()); + + assert!(!view + .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .unwrap()); + + // Sleep a bit to time out the block proposal + std::thread::sleep(Duration::from_secs(5)); + assert!(!view + .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .unwrap()); + + assert!(view + .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .unwrap()); +} diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index a6554ee27d..93927b03fd 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -14,7 +14,6 @@ // along with this program. If not, see . use std::fmt::Debug; use std::sync::mpsc::Sender; -use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; @@ -27,7 +26,7 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; -use crate::chainstate::{ProposalEvalConfig, SortitionMinerStatus, SortitionsView}; +use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerResult}; @@ -312,33 +311,6 @@ impl Signer { // Check if proposal can be rejected now if not valid against sortition view let block_response = if let Some(sortition_state) = sortition_state { - // If this is the first block in the tenure, check if it was proposed after the timeout - if let Ok(None) = self - .signer_db - .get_last_signed_block_in_tenure(&block_proposal.block.header.consensus_hash) - { - if let Ok(Some(received_ts)) = self - .signer_db - .get_burn_block_receive_time(&sortition_state.cur_sortition.burn_block_hash) - { - let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); - let elapsed = std::time::SystemTime::now() - .duration_since(received_time) - .unwrap_or_else(|_| { - panic!("{self}: Failed to calculate time since burn block received") - }); - if elapsed >= self.proposal_config.block_proposal_timeout { - warn!( - "{self}: miner proposed block after timeout."; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id(), - ); - sortition_state.cur_sortition.miner_status = - SortitionMinerStatus::InvalidatedBeforeFirstBlock; - } - } - } - match sortition_state.check_proposal( stacks_client, &self.signer_db, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c13fdf5bf2..9173dd8645 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4655,7 +4655,8 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), }; - let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); + let mut sortitions_view = + SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); // check the prior tenure's proposals again, confirming that the sortitions_view // will reject them. @@ -4769,7 +4770,8 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), }; - let sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); + let mut sortitions_view = + SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let valid = sortitions_view .check_proposal( &signer_client, From dc56f84636c09f2fff278c7aa0a7fdc4353f22f9 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 24 Jul 2024 13:59:07 -0500 Subject: [PATCH 0554/1400] chore: more info in signer logging events --- libsigner/src/events.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 9a59221b14..6dbc10110a 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -393,7 +393,6 @@ fn process_stackerdb_event( local_addr: Option, mut request: HttpRequest, ) -> Result, EventError> { - debug!("Got stackerdb_chunks event"); let mut body = String::new(); if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); @@ -404,6 +403,7 @@ fn process_stackerdb_event( ))); } + debug!("Got stackerdb_chunks event"; "chunks_event_body" => %body); let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; From f6314e6b719096e07925e78d7658902e3648fb3c Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 19 Jul 2024 15:13:14 -0400 Subject: [PATCH 0555/1400] refactor(signer): Add enum for version-specific data to `BlockInfo` --- stacks-signer/src/signerdb.rs | 95 +++++++++++++++---- stacks-signer/src/v1/signer.rs | 19 ++-- .../src/tests/nakamoto_integrations.rs | 14 ++- 3 files changed, 101 insertions(+), 27 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5ecef398d4..8aec0cc048 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -34,6 +34,73 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::{debug, error}; use wsts::net::NonceRequest; +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +/// Information specific to Signer V1 +pub struct BlockInfoV1 { + /// The associated packet nonce request if we have one + pub nonce_request: Option, + /// Whether this block is already being signed over + pub signed_over: bool, +} + +impl From for BlockInfoV1 { + fn from(value: NonceRequest) -> Self { + Self { + nonce_request: Some(value), + signed_over: true, + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +/// Store extra version-specific info in `BlockInfo` +pub enum ExtraBlockInfo { + #[default] + /// Don't know what version + None, + /// Extra data for Signer V0 + V0, + /// Extra data for Signer V1 + V1(BlockInfoV1), +} + +impl ExtraBlockInfo { + /// Get `signed_over` if it exists + pub fn get_signed_over(&self) -> Option { + match self { + ExtraBlockInfo::None | ExtraBlockInfo::V0 => None, + ExtraBlockInfo::V1(v1) => Some(v1.signed_over), + } + } + /// Set `signed_over` if it exists + pub fn set_signed_over(&mut self, value: bool) -> Result<(), &str> { + match self { + ExtraBlockInfo::None | ExtraBlockInfo::V0 => Err("Field doesn't exist"), + ExtraBlockInfo::V1(v1) => { + v1.signed_over = value; + Ok(()) + } + } + } + /// Take `nonce_request` if it exists + pub fn take_nonce_request(&mut self) -> Option { + match self { + ExtraBlockInfo::None | ExtraBlockInfo::V0 => None, + ExtraBlockInfo::V1(v1) => v1.nonce_request.take(), + } + } + /// Set `nonce_request` if it exists + pub fn set_nonce_request(&mut self, value: NonceRequest) -> Result<(), &str> { + match self { + ExtraBlockInfo::None | ExtraBlockInfo::V0 => Err("Field doesn't exist"), + ExtraBlockInfo::V1(v1) => { + v1.nonce_request = Some(value); + Ok(()) + } + } + } +} + /// Additional Info about a proposed block #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct BlockInfo { @@ -47,16 +114,14 @@ pub struct BlockInfo { pub vote: Option, /// Whether the block contents are valid pub valid: Option, - /// The associated packet nonce request if we have one - pub nonce_request: Option, - /// Whether this block is already being signed over - pub signed_over: bool, /// Time at which the proposal was received by this signer (epoch time in seconds) pub proposed_time: u64, /// Time at which the proposal was signed by this signer (epoch time in seconds) pub signed_self: Option, /// Time at which the proposal was signed by a threshold in the signer set (epoch time in seconds) pub signed_group: Option, + /// Extra data specific to v0, v1, etc. + pub ext: ExtraBlockInfo, } impl From for BlockInfo { @@ -67,20 +132,18 @@ impl From for BlockInfo { reward_cycle: value.reward_cycle, vote: None, valid: None, - nonce_request: None, - signed_over: false, proposed_time: get_epoch_time_secs(), signed_self: None, signed_group: None, + ext: ExtraBlockInfo::default(), } } } impl BlockInfo { /// Create a new BlockInfo with an associated nonce request packet - pub fn new_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { + pub fn new_v1_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { let mut block_info = BlockInfo::from(block_proposal); - block_info.nonce_request = Some(nonce_request); - block_info.signed_over = true; + block_info.ext = ExtraBlockInfo::V1(BlockInfoV1::from(nonce_request)); block_info } @@ -88,10 +151,8 @@ impl BlockInfo { /// already set. pub fn mark_signed_and_valid(&mut self) { self.valid = Some(true); - self.signed_over = true; - if self.signed_self.is_none() { - self.signed_self = Some(get_epoch_time_secs()); - } + self.signed_self.get_or_insert(get_epoch_time_secs()); + _ = self.ext.set_signed_over(true); } /// Return the block's signer signature hash @@ -115,7 +176,7 @@ CREATE TABLE IF NOT EXISTS blocks ( block_info TEXT NOT NULL, consensus_hash TEXT NOT NULL, signed_over INTEGER NOT NULL, - stacks_height INTEGER NOT NULL, + stacks_height INTEGER NOT NULL, burn_block_height INTEGER NOT NULL, PRIMARY KEY (reward_cycle, signer_signature_hash) ) STRICT"; @@ -189,7 +250,7 @@ impl SignerDb { }) .optional(); match result { - Ok(x) => Ok(x.unwrap_or_else(|| 0)), + Ok(x) => Ok(x.unwrap_or(0)), Err(e) => Err(DBError::from(e)), } } @@ -346,7 +407,7 @@ impl SignerDb { serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); - let signed_over = &block_info.signed_over; + let signed_over = &block_info.ext.get_signed_over().unwrap_or(false); let vote = block_info .vote .as_ref() @@ -543,6 +604,8 @@ mod tests { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let (mut block_info, block_proposal) = create_block(); + // We'll need the V1 data fields for this + block_info.ext = ExtraBlockInfo::V1(BlockInfoV1::default()); db.insert_block(&block_info).unwrap(); assert!(db diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 83dae5254b..190c740688 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -586,7 +586,7 @@ impl Signer { .block_lookup(self.reward_cycle, &signer_signature_hash) .unwrap_or_else(|_| Some(BlockInfo::from(block_proposal.clone()))) .unwrap_or_else(|| BlockInfo::from(block_proposal.clone())); - if block_info.signed_over { + if block_info.ext.get_signed_over().unwrap_or(false) { debug!("{self}: Received a sign command for a block we are already signing over. Ignore it."); return; } @@ -603,7 +603,9 @@ impl Signer { Ok(msg) => { let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); debug!("{self}: ACK: {ack:?}",); - block_info.signed_over = true; + block_info.ext.set_signed_over(true).unwrap_or_else(|e| { + error!("{self}: `set_signed_over()` failed: {e:?}"); + }); self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| { @@ -692,7 +694,7 @@ impl Signer { block_info } }; - if let Some(mut nonce_request) = block_info.nonce_request.take() { + if let Some(mut nonce_request) = block_info.ext.take_nonce_request() { debug!("{self}: Received a block validate response from the stacks node for a block we already received a nonce request for. Responding to the nonce request..."); // We have received validation from the stacks node. Determine our vote and update the request message self.determine_vote(&mut block_info, &mut nonce_request); @@ -707,7 +709,7 @@ impl Signer { "{self}: Received a block validate response"; "block_hash" => block_info.block.header.block_hash(), "valid" => block_info.valid, - "signed_over" => block_info.signed_over, + "signed_over" => block_info.ext.get_signed_over(), ); self.signer_db .insert_block(&block_info) @@ -916,7 +918,7 @@ impl Signer { "{self}: received a nonce request for a new block. Submit block for validation. "; "signer_sighash" => %signer_signature_hash, ); - let block_info = BlockInfo::new_with_request(block_proposal, nonce_request.clone()); + let block_info = BlockInfo::new_v1_with_request(block_proposal, nonce_request.clone()); stacks_client .submit_block_for_validation(block_info.block.clone()) .unwrap_or_else(|e| { @@ -928,7 +930,12 @@ impl Signer { if block_info.valid.is_none() { // We have not yet received validation from the stacks node. Cache the request and wait for validation debug!("{self}: We have yet to receive validation from the stacks node for a nonce request. Cache the nonce request and wait for block validation..."); - block_info.nonce_request = Some(nonce_request.clone()); + block_info + .ext + .set_nonce_request(nonce_request.clone()) + .unwrap_or_else(|e| { + warn!("{self}: Failed to set nonce_request: {e:?}",); + }); return Some(block_info); } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 816e130a59..7b77cb9a84 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -89,7 +89,7 @@ use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; -use stacks_signer::signerdb::{BlockInfo, SignerDb}; +use stacks_signer::signerdb::{BlockInfo, BlockInfoV1, ExtraBlockInfo, SignerDb}; use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; @@ -4714,11 +4714,13 @@ fn signer_chainstate() { reward_cycle, vote: None, valid: Some(true), - nonce_request: None, - signed_over: true, proposed_time: get_epoch_time_secs(), signed_self: None, signed_group: None, + ext: ExtraBlockInfo::V1(BlockInfoV1 { + nonce_request: None, + signed_over: true, + }), }) .unwrap(); @@ -4789,11 +4791,13 @@ fn signer_chainstate() { reward_cycle, vote: None, valid: Some(true), - nonce_request: None, - signed_over: true, proposed_time: get_epoch_time_secs(), signed_self: None, signed_group: None, + ext: ExtraBlockInfo::V1(BlockInfoV1 { + nonce_request: None, + signed_over: true, + }), }) .unwrap(); From ef45a383f88d98a05a457bd550ab7bc3934ebef3 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 24 Jul 2024 14:32:51 -0400 Subject: [PATCH 0556/1400] chore: Address Aaron's comments --- stacks-signer/src/signerdb.rs | 30 ++++--------------- stacks-signer/src/v1/signer.rs | 8 ++--- .../src/tests/nakamoto_integrations.rs | 14 ++++----- 3 files changed, 14 insertions(+), 38 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 8aec0cc048..74cefbc44b 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -39,15 +39,12 @@ use wsts::net::NonceRequest; pub struct BlockInfoV1 { /// The associated packet nonce request if we have one pub nonce_request: Option, - /// Whether this block is already being signed over - pub signed_over: bool, } impl From for BlockInfoV1 { fn from(value: NonceRequest) -> Self { Self { nonce_request: Some(value), - signed_over: true, } } } @@ -65,23 +62,6 @@ pub enum ExtraBlockInfo { } impl ExtraBlockInfo { - /// Get `signed_over` if it exists - pub fn get_signed_over(&self) -> Option { - match self { - ExtraBlockInfo::None | ExtraBlockInfo::V0 => None, - ExtraBlockInfo::V1(v1) => Some(v1.signed_over), - } - } - /// Set `signed_over` if it exists - pub fn set_signed_over(&mut self, value: bool) -> Result<(), &str> { - match self { - ExtraBlockInfo::None | ExtraBlockInfo::V0 => Err("Field doesn't exist"), - ExtraBlockInfo::V1(v1) => { - v1.signed_over = value; - Ok(()) - } - } - } /// Take `nonce_request` if it exists pub fn take_nonce_request(&mut self) -> Option { match self { @@ -114,6 +94,8 @@ pub struct BlockInfo { pub vote: Option, /// Whether the block contents are valid pub valid: Option, + /// Whether this block is already being signed over + pub signed_over: bool, /// Time at which the proposal was received by this signer (epoch time in seconds) pub proposed_time: u64, /// Time at which the proposal was signed by this signer (epoch time in seconds) @@ -132,6 +114,7 @@ impl From for BlockInfo { reward_cycle: value.reward_cycle, vote: None, valid: None, + signed_over: false, proposed_time: get_epoch_time_secs(), signed_self: None, signed_group: None, @@ -144,6 +127,7 @@ impl BlockInfo { pub fn new_v1_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { let mut block_info = BlockInfo::from(block_proposal); block_info.ext = ExtraBlockInfo::V1(BlockInfoV1::from(nonce_request)); + block_info.signed_over = true; block_info } @@ -151,8 +135,8 @@ impl BlockInfo { /// already set. pub fn mark_signed_and_valid(&mut self) { self.valid = Some(true); + self.signed_over = true; self.signed_self.get_or_insert(get_epoch_time_secs()); - _ = self.ext.set_signed_over(true); } /// Return the block's signer signature hash @@ -407,7 +391,7 @@ impl SignerDb { serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); - let signed_over = &block_info.ext.get_signed_over().unwrap_or(false); + let signed_over = &block_info.signed_over; let vote = block_info .vote .as_ref() @@ -604,8 +588,6 @@ mod tests { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let (mut block_info, block_proposal) = create_block(); - // We'll need the V1 data fields for this - block_info.ext = ExtraBlockInfo::V1(BlockInfoV1::default()); db.insert_block(&block_info).unwrap(); assert!(db diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 190c740688..f78f3b9e29 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -586,7 +586,7 @@ impl Signer { .block_lookup(self.reward_cycle, &signer_signature_hash) .unwrap_or_else(|_| Some(BlockInfo::from(block_proposal.clone()))) .unwrap_or_else(|| BlockInfo::from(block_proposal.clone())); - if block_info.ext.get_signed_over().unwrap_or(false) { + if block_info.signed_over { debug!("{self}: Received a sign command for a block we are already signing over. Ignore it."); return; } @@ -603,9 +603,7 @@ impl Signer { Ok(msg) => { let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); debug!("{self}: ACK: {ack:?}",); - block_info.ext.set_signed_over(true).unwrap_or_else(|e| { - error!("{self}: `set_signed_over()` failed: {e:?}"); - }); + block_info.signed_over = true; self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| { @@ -709,7 +707,7 @@ impl Signer { "{self}: Received a block validate response"; "block_hash" => block_info.block.header.block_hash(), "valid" => block_info.valid, - "signed_over" => block_info.ext.get_signed_over(), + "signed_over" => block_info.signed_over, ); self.signer_db .insert_block(&block_info) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7b77cb9a84..492646a84d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -89,7 +89,7 @@ use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; -use stacks_signer::signerdb::{BlockInfo, BlockInfoV1, ExtraBlockInfo, SignerDb}; +use stacks_signer::signerdb::{BlockInfo, ExtraBlockInfo, SignerDb}; use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; @@ -4714,13 +4714,11 @@ fn signer_chainstate() { reward_cycle, vote: None, valid: Some(true), + signed_over: true, proposed_time: get_epoch_time_secs(), signed_self: None, signed_group: None, - ext: ExtraBlockInfo::V1(BlockInfoV1 { - nonce_request: None, - signed_over: true, - }), + ext: ExtraBlockInfo::None, }) .unwrap(); @@ -4791,13 +4789,11 @@ fn signer_chainstate() { reward_cycle, vote: None, valid: Some(true), + signed_over: true, proposed_time: get_epoch_time_secs(), signed_self: None, signed_group: None, - ext: ExtraBlockInfo::V1(BlockInfoV1 { - nonce_request: None, - signed_over: true, - }), + ext: ExtraBlockInfo::None, }) .unwrap(); From 99fd60878990aa04c155efce8d1c5fde7e601248 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 25 Jul 2024 08:26:03 -0400 Subject: [PATCH 0557/1400] chore: add debug log with event dispatcher payload --- testnet/stacks-node/src/event_dispatcher.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 9badfda567..5a72e4ca0a 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -296,6 +296,9 @@ impl RewardSetEventPayload { impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { + debug!( + "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload + ); let body = match serde_json::to_vec(&payload) { Ok(body) => body, Err(err) => { From 5b6eae44d7baa9a1f9dd54c4f2ca6d5955bff97d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 25 Jul 2024 12:42:06 -0400 Subject: [PATCH 0558/1400] CRC: move sortition time out check into its own function Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 66 ++++++++++++++--------- stacks-signer/src/tests/chainstate.rs | 77 +++++++++++++++++++++++++++ 2 files changed, 118 insertions(+), 25 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4c04d798d8..ee747407f0 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -78,6 +78,39 @@ pub struct SortitionState { pub burn_block_hash: BurnchainHeaderHash, } +impl SortitionState { + /// Check if the sortition is timed out (i.e., the miner did not propose a block in time) + pub fn is_timed_out( + &self, + timeout: Duration, + signer_db: &SignerDb, + ) -> Result { + // if the miner has already been invalidated, we don't need to check if they've timed out. + if self.miner_status != SortitionMinerStatus::Valid { + return Ok(false); + } + // if we've already signed a block in this tenure, the miner can't have timed out. + let has_blocks = signer_db + .get_last_signed_block_in_tenure(&self.consensus_hash)? + .is_some(); + if has_blocks { + return Ok(false); + } + let Some(received_ts) = signer_db.get_burn_block_receive_time(&self.burn_block_hash)? + else { + return Ok(false); + }; + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let Ok(elapsed) = std::time::SystemTime::now().duration_since(received_time) else { + return Ok(false); + }; + if elapsed > timeout { + return Ok(true); + } + Ok(false) + } +} + /// Captures the configuration settings used by the signer when evaluating block proposals. #[derive(Debug, Clone)] pub struct ProposalEvalConfig { @@ -156,32 +189,15 @@ impl SortitionsView { block: &NakamotoBlock, block_pk: &StacksPublicKey, ) -> Result { - // If this is the first block in the tenure, check if it was proposed after the timeout - if signer_db - .get_last_signed_block_in_tenure(&block.header.consensus_hash)? - .is_none() + if self + .cur_sortition + .is_timed_out(self.config.block_proposal_timeout, signer_db)? { - if let Some(received_ts) = - signer_db.get_burn_block_receive_time(&self.cur_sortition.burn_block_hash)? - { - let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); - let elapsed = std::time::SystemTime::now() - .duration_since(received_time) - .unwrap_or_else(|_| { - panic!("Failed to calculate time since burn block received") - }); - if elapsed >= self.config.block_proposal_timeout { - warn!( - "Miner proposed first block after block proposal timeout."; - "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, - "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), - "burn_block_received_time" => ?received_time, - ); - self.cur_sortition.miner_status = - SortitionMinerStatus::InvalidatedBeforeFirstBlock; - } + self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + if let Some(last_sortition) = self.last_sortition.as_mut() { + if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { + last_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; } } let bitvec_all_1s = block.header.pox_treatment.iter().all(|entry| entry); diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index e469cbdeb9..d0c7f1d9f3 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -387,3 +387,80 @@ fn check_block_proposal_timeout() { .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) .unwrap()); } + +#[test] +fn check_sortition_timeout() { + let signer_db_dir = "/tmp/stacks-node-tests/signer-units/"; + let signer_db_path = format!( + "{signer_db_dir}/sortition_timeout.{}.sqlite", + get_epoch_time_secs() + ); + fs::create_dir_all(signer_db_dir).unwrap(); + let mut signer_db = SignerDb::new(signer_db_path).unwrap(); + + let mut sortition = SortitionState { + miner_pkh: Hash160([0; 20]), + miner_pubkey: None, + prior_sortition: ConsensusHash([0; 20]), + parent_tenure_id: ConsensusHash([0; 20]), + consensus_hash: ConsensusHash([1; 20]), + miner_status: SortitionMinerStatus::Valid, + burn_header_timestamp: 2, + burn_block_hash: BurnchainHeaderHash([1; 32]), + }; + // Ensure we have a burn height to compare against + let burn_hash = sortition.burn_block_hash; + let burn_height = 1; + let received_time = SystemTime::now(); + signer_db + .insert_burn_block(&burn_hash, burn_height, &received_time) + .unwrap(); + + std::thread::sleep(Duration::from_secs(1)); + // We have not yet timed out + assert!(!sortition + .is_timed_out(Duration::from_secs(10), &signer_db) + .unwrap()); + // We are a valid sortition, have an empty tenure, and have now timed out + assert!(sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); + // This will not be marked as timed out as the status is no longer valid + sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; + assert!(!sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); + + // Revert the status to continue other checks + sortition.miner_status = SortitionMinerStatus::Valid; + // Insert a signed over block so its no longer an empty tenure + let block_proposal = BlockProposal { + block: NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: sortition.consensus_hash, + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 11, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }, + burn_height: 2, + reward_cycle: 1, + }; + + let mut block_info = BlockInfo::from(block_proposal); + block_info.signed_over = true; + signer_db.insert_block(&block_info).unwrap(); + + // This will no longer be timed out as we have a non-empty tenure + assert!(!sortition + .is_timed_out(Duration::from_secs(1), &signer_db) + .unwrap()); +} From 65d401dcaae04389792858d326bf20c5f68ac48b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 25 Jul 2024 14:19:37 -0400 Subject: [PATCH 0559/1400] fix: remove bad feature gating We do not have a `std` feature, so this causes an error. Before Rust 1.80, this was not an error. --- stacks-common/src/deps_common/httparse/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index 5d572585b8..90a08bf2f1 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -1280,7 +1280,6 @@ mod tests { ); } - #[cfg(feature = "std")] #[test] fn test_std_error() { use std::error::Error as StdError; From b78afdca36fa64427b9dd0d1871ad2ecc9d4335a Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 25 Jul 2024 09:29:38 -0400 Subject: [PATCH 0560/1400] test: Eliminate extra block between Epochs 2.5 and 3.0 --- .../src/tests/nakamoto_integrations.rs | 135 +++++------------- .../src/tests/neon_integrations.rs | 34 +++++ 2 files changed, 67 insertions(+), 102 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 492646a84d..9b15b83afb 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1024,6 +1024,18 @@ pub fn boot_to_epoch_3_reward_set( info!("Bootstrapped to Epoch 3.0 reward set calculation height: {epoch_3_reward_set_calculation}."); } +/// Wait for a block commit, without producing a block +fn wait_for_first_naka_block_commit(timeout_secs: u64, naka_commits_submitted: &Arc) { + let start = Instant::now(); + while naka_commits_submitted.load(Ordering::SeqCst) < 1 { + if start.elapsed() > Duration::from_secs(timeout_secs) { + error!("Timed out waiting for block commit"); + panic!(); + } + thread::sleep(Duration::from_millis(100)); + } +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. @@ -1134,12 +1146,7 @@ fn simple_neon_integration() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 15 nakamoto tenures for _i in 0..15 { @@ -1232,6 +1239,10 @@ fn simple_neon_integration() { assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + // Check that we aren't missing burn blocks + let bhh = u64::from(tip.burn_header_height); + test_observer::contains_burn_block_range(220..=bhh).unwrap(); + // make sure prometheus returns an updated height #[cfg(feature = "monitoring_prom")] { @@ -1355,12 +1366,7 @@ fn mine_multiple_per_tenure_integration() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { @@ -1672,12 +1678,7 @@ fn correct_burn_outs() { ); assert_eq!(stacker_response.stacker_set.rewarded_addresses.len(), 1); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); info!("Bootstrapped to Epoch-3.0 boundary, mining nakamoto blocks"); @@ -1874,12 +1875,7 @@ fn block_proposal_api_endpoint() { info!("Nakamoto miner started..."); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 3 nakamoto tenures for _ in 0..3 { @@ -2218,12 +2214,7 @@ fn miner_writes_proposed_block_to_stackerdb() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure next_block_and_mine_commit( @@ -2353,12 +2344,7 @@ fn vote_for_aggregate_key_burn_op() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // submit a pre-stx op let mut miner_signer = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); @@ -2612,12 +2598,7 @@ fn follower_bootup() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let mut follower_conf = naka_conf.clone(); follower_conf.events_observers.clear(); @@ -2916,12 +2897,7 @@ fn stack_stx_burn_op_integration_test() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let block_height = btc_regtest_controller.get_headers_height(); @@ -3360,12 +3336,7 @@ fn forked_tenure_is_ignored() { blind_signer(&naka_conf, &signers, proposals_submitted); info!("Starting tenure A."); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // In the next block, the miner should win the tenure and submit a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); @@ -3662,12 +3633,7 @@ fn check_block_heights() { let preheights = heights0_value.expect_tuple().unwrap(); info!("Heights from pre-epoch 3.0: {}", preheights); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); @@ -4051,12 +4017,7 @@ fn nakamoto_attempt_time() { info!("Nakamoto miner started..."); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 3 nakamoto tenures for _ in 0..3 { @@ -4340,12 +4301,7 @@ fn clarity_burn_state() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let mut sender_nonce = 0; @@ -4624,12 +4580,7 @@ fn signer_chainstate() { false, ); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let mut signer_db = SignerDb::new(format!("{}/signer_db_path", naka_conf.node.working_dir)).unwrap(); @@ -5155,12 +5106,7 @@ fn continue_tenure_extend() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine a regular nakamoto tenure next_block_and_mine_commit( @@ -5443,12 +5389,7 @@ fn check_block_times() { .unwrap(); info!("Time from pre-epoch 3.0: {}", time0); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; @@ -5928,12 +5869,7 @@ fn check_block_info() { let tuple0 = result0.expect_tuple().unwrap().data_map; info!("Info from pre-epoch 3.0: {:?}", tuple0); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; @@ -6436,12 +6372,7 @@ fn check_block_info_rewards() { let tuple0 = result0.expect_tuple().unwrap().data_map; info!("Info from pre-epoch 3.0: {:?}", tuple0); - // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d173d02e3e..e1f72ba2e3 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -186,7 +186,9 @@ pub fn neon_integration_test_conf_with_seed(seed: Vec) -> (Config, StacksAdd } pub mod test_observer { + use std::collections::HashSet; use std::convert::Infallible; + use std::ops::{Bound, RangeBounds}; use std::sync::Mutex; use std::thread; @@ -566,6 +568,38 @@ pub mod test_observer { ATTACHMENTS.lock().unwrap().clear(); PROPOSAL_RESPONSES.lock().unwrap().clear(); } + + pub fn contains_burn_block_range(range: impl RangeBounds) -> Result<(), String> { + // Get set of all burn block heights + let burn_block_heights = get_blocks() + .iter() + .map(|x| x.get("burn_block_height").unwrap().as_u64().unwrap()) + .collect::>(); + + let start = match range.start_bound() { + Bound::Unbounded => return Err("Unbounded ranges not supported".into()), + Bound::Included(&x) => x, + Bound::Excluded(&x) => x.saturating_add(1), + }; + + let end = match range.end_bound() { + Bound::Unbounded => return Err("Unbounded ranges not supported".into()), + Bound::Included(&x) => x, + Bound::Excluded(&x) => x.saturating_sub(1), + }; + + // Find indexes in range for which we don't have burn block in set + let missing = (start..=end) + .into_iter() + .filter(|i| !burn_block_heights.contains(&i)) + .collect::>(); + + if missing.is_empty() { + Ok(()) + } else { + Err(format!("Missing the following burn blocks: {missing:?}")) + } + } } const PANIC_TIMEOUT_SECS: u64 = 600; From 77c6a0e2d99363b1043aec9a74ef04ff722e1cc8 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 26 Jul 2024 14:17:10 +0300 Subject: [PATCH 0561/1400] move main signer messages from debug to info --- stacks-signer/src/runloop.rs | 2 +- stacks-signer/src/v0/signer.rs | 6 +++--- stacks-signer/src/v1/signer.rs | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 6795f0cfee..3145cd6f54 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -365,7 +365,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo self.refresh_signer_config(next_reward_cycle); } } else { - debug!("Received a new burnchain block height ({current_burn_block_height}) but not in prepare phase."; + info!("Received a new burnchain block height ({current_burn_block_height}) but not in prepare phase."; "reward_cycle" => reward_cycle_info.reward_cycle, "reward_cycle_length" => reward_cycle_info.reward_cycle_length, "prepare_phase_block_length" => reward_cycle_info.prepare_phase_block_length, diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 93927b03fd..37ba216aa2 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -153,7 +153,7 @@ impl SignerTrait for Signer { burn_header_hash, received_time, } => { - debug!("{self}: Receved a new burn block event for block height {burn_height}"); + info!("{self}: Receved a new burn block event for block height {burn_height}"); if let Err(e) = self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) @@ -251,7 +251,7 @@ impl Signer { block_proposal: &BlockProposal, miner_pubkey: &Secp256k1PublicKey, ) { - debug!("{self}: Received a block proposal: {block_proposal:?}"); + info!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { // We are not signing for this reward cycle. Ignore the block. debug!( @@ -388,7 +388,7 @@ impl Signer { /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { - debug!("{self}: Received a block validate response: {block_validate_response:?}"); + info!("{self}: Received a block validate response: {block_validate_response:?}"); let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index f78f3b9e29..83a0c9a39b 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -201,7 +201,7 @@ impl SignerTrait for Signer { }; match event { SignerEvent::BlockValidationResponse(block_validate_response) => { - debug!("{self}: Received a block proposal result from the stacks node..."); + info!("{self}: Received a block proposal result from the stacks node..."); self.handle_block_validate_response( stacks_client, block_validate_response, @@ -244,7 +244,7 @@ impl SignerTrait for Signer { burn_header_hash, received_time, } => { - debug!("{self}: Receved a new burn block event for block height {burn_height}"); + info!("{self}: Received a new burn block event for block height {burn_height}"); if let Err(e) = self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) @@ -703,7 +703,7 @@ impl Signer { }; self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); } - debug!( + info!( "{self}: Received a block validate response"; "block_hash" => block_info.block.header.block_hash(), "valid" => block_info.valid, From 861bff076b051e0338b37dacbe0a17cd1b90b3b3 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 26 Jul 2024 14:22:00 +0300 Subject: [PATCH 0562/1400] moved from info to debug stacks block related logs --- stacks-signer/src/v0/signer.rs | 6 +++--- stacks-signer/src/v1/signer.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 37ba216aa2..72c8cc4dfe 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -153,7 +153,7 @@ impl SignerTrait for Signer { burn_header_hash, received_time, } => { - info!("{self}: Receved a new burn block event for block height {burn_height}"); + info!("{self}: Received a new burn block event for block height {burn_height}"); if let Err(e) = self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) @@ -251,7 +251,7 @@ impl Signer { block_proposal: &BlockProposal, miner_pubkey: &Secp256k1PublicKey, ) { - info!("{self}: Received a block proposal: {block_proposal:?}"); + debug!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { // We are not signing for this reward cycle. Ignore the block. debug!( @@ -388,7 +388,7 @@ impl Signer { /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { - info!("{self}: Received a block validate response: {block_validate_response:?}"); + debug!("{self}: Received a block validate response: {block_validate_response:?}"); let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 83a0c9a39b..bd4b36f489 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -201,7 +201,7 @@ impl SignerTrait for Signer { }; match event { SignerEvent::BlockValidationResponse(block_validate_response) => { - info!("{self}: Received a block proposal result from the stacks node..."); + debug!("{self}: Received a block proposal result from the stacks node..."); self.handle_block_validate_response( stacks_client, block_validate_response, @@ -703,7 +703,7 @@ impl Signer { }; self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); } - info!( + debug!( "{self}: Received a block validate response"; "block_hash" => block_info.block.header.block_hash(), "valid" => block_info.valid, From 030fa6434b55b49b1e4bfa31470647ce6ab3a095 Mon Sep 17 00:00:00 2001 From: janniks Date: Fri, 26 Jul 2024 17:09:04 +0200 Subject: [PATCH 0563/1400] fix: ensure minimum non-dust amount as change output on regtest --- .../burnchains/bitcoin_regtest_controller.rs | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 30f088a96f..0ef63b6dc0 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1715,6 +1715,7 @@ impl BitcoinRegtestController { spent_in_outputs + min_tx_size * fee_rate + estimated_rbf, &mut utxos_cloned, signer, + true, ); let serialized_tx = SerializedTx::new(tx_cloned); cmp::max(min_tx_size, serialized_tx.bytes.len() as u64) @@ -1731,6 +1732,7 @@ impl BitcoinRegtestController { spent_in_outputs + tx_size * fee_rate + rbf_fee, utxos_set, signer, + true, ); signer.dispose(); Some(()) @@ -1744,38 +1746,45 @@ impl BitcoinRegtestController { &mut self, epoch_id: StacksEpochId, tx: &mut Transaction, - total_to_spend: u64, + tx_cost: u64, utxos_set: &mut UTXOSet, signer: &mut BurnchainOpSigner, + force_change_output: bool, ) -> bool { let mut public_key = signer.get_public_key(); - let mut total_consumed = 0; + + let total_target = if force_change_output { + tx_cost + DUST_UTXO_LIMIT + } else { + tx_cost + }; // select UTXOs until we have enough to cover the cost + let mut total_consumed = 0; let mut available_utxos = vec![]; available_utxos.append(&mut utxos_set.utxos); for utxo in available_utxos.into_iter() { total_consumed += utxo.amount; utxos_set.utxos.push(utxo); - if total_consumed >= total_to_spend { + if total_consumed >= total_target { break; } } - if total_consumed < total_to_spend { + if total_consumed < total_target { warn!( "Consumed total {} is less than intended spend: {}", - total_consumed, total_to_spend + total_consumed, total_target ); return false; } // Append the change output - let value = total_consumed - total_to_spend; + let value = total_consumed - tx_cost; debug!( "Payments value: {:?}, total_consumed: {:?}, total_spent: {:?}", - value, total_consumed, total_to_spend + value, total_consumed, total_target ); if value >= DUST_UTXO_LIMIT { let change_output = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { From afc9c32648e81e3ab3a9073a070aead2ae221d70 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 26 Jul 2024 08:40:34 -0700 Subject: [PATCH 0564/1400] Removing unnecessary config fields for sample configs --- testnet/stacks-node/conf/mainnet-follower-conf.toml | 5 +---- testnet/stacks-node/conf/mainnet-miner-conf.toml | 12 ++++++------ testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 7 +------ testnet/stacks-node/conf/testnet-follower-conf.toml | 7 ++----- testnet/stacks-node/conf/testnet-miner-conf.toml | 12 ++++++------ 5 files changed, 16 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 4377993ed4..ba42fb6657 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" @@ -9,9 +9,6 @@ prometheus_bind = "0.0.0.0:9153" chain = "bitcoin" mode = "mainnet" peer_host = "bitcoin.hiro.so" -username = "hirosystems" -password = "hirosystems" -peer_port = 8333 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index 3fdf293a4f..fc526f0878 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "127.0.0.1:20443" p2p_bind = "127.0.0.1:20444" bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" @@ -13,10 +13,10 @@ mine_microblocks = false # Disable microblocks (ref: https://github.com/stacks-n chain = "bitcoin" mode = "mainnet" peer_host = "127.0.0.1" -username = "" -password = "" -rpc_port = 8332 -peer_port = 8333 +username = "" +password = "" +rpc_port = +peer_port = # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 # Amount (in sats) per byte - Used to calculate the transaction fees @@ -24,4 +24,4 @@ satoshis_per_byte = 25 # Amount of sats to add when RBF'ing bitcoin tx (default: 5) rbf_fee_increment = 5 # Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) -max_rbf = 150 \ No newline at end of file +max_rbf = 150 diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 2c98499d59..75785454dc 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" miner = true @@ -11,8 +11,3 @@ prometheus_bind = "0.0.0.0:9153" chain = "bitcoin" mode = "mainnet" peer_host = "bitcoin.hiro.so" -username = "hirosystems" -password = "hirosystems" -rpc_port = 8332 -peer_port = 8333 -burn_fee_cap = 1 diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index b327fbb001..f5fb2c04b0 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" @@ -9,9 +9,6 @@ prometheus_bind = "0.0.0.0:9153" chain = "bitcoin" mode = "krypton" peer_host = "bitcoin.regtest.hiro.so" -username = "hirosystems" -password = "hirosystems" -rpc_port = 18443 peer_port = 18444 pox_prepare_length = 100 pox_reward_length = 900 @@ -66,4 +63,4 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 \ No newline at end of file +start_height = 2000701 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index f3a49a33d4..e565fd0ee2 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -1,5 +1,5 @@ [node] -# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" @@ -9,10 +9,10 @@ prometheus_bind = "0.0.0.0:9153" chain = "bitcoin" mode = "krypton" peer_host = "127.0.0.1" -username = "" -password = "" -rpc_port = 18443 -peer_port = 18444 +username = "" +password = "" +rpc_port = +peer_port = pox_prepare_length = 100 pox_reward_length = 900 # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election @@ -74,4 +74,4 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 \ No newline at end of file +start_height = 2000701 From ba24d00285dcc8294957a4838271b87946669446 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Jul 2024 12:03:36 -0400 Subject: [PATCH 0565/1400] Add an integration test to ensure an empty sortition can time out Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 156 +++++++++++++++++++++ 2 files changed, 157 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index c35406fd3b..d12ae61f38 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -89,6 +89,7 @@ jobs: - tests::signer::v0::end_of_tenure - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid + - tests::signer::v0::empty_sortition - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3543fb1ce8..90e32fb6b8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,6 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::ops::Add; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -1068,3 +1069,158 @@ fn retry_on_timeout() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test checks the behaviour of signers when a sortition is empty. Specifically: +/// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition +fn empty_sortition() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(5); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + signer_test.boot_to_epoch_3(); + + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + info!("------------------------- Test Mine Regular Tenure A -------------------------"); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Mine a regular tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("------------------------- Test Mine Empty Tenure B -------------------------"); + info!("Pausing stacks block mining to trigger an empty sortition."); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Start new Tenure B + // In the next block, the miner should win the tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!("Pausing stacks block proposal to force an empty tenure"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + info!("Pausing commit op to prevent tenure C from starting..."); + TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + + let blocks_after = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before); + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + info!("------------------------- Test Delayed Block is Rejected -------------------------"); + let reward_cycle = signer_test.get_current_reward_cycle(); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + // The miner's proposed block should get rejected by the signers + let start_polling = Instant::now(); + let mut found_rejection = false; + while !found_rejection { + std::thread::sleep(Duration::from_secs(1)); + let messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + for message in messages { + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code, + .. + })) = message + { + assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + found_rejection = true; + } else { + panic!("Unexpected message type"); + } + } + assert!( + start_polling.elapsed() <= short_timeout, + "Timed out after waiting for response from signer" + ); + } + signer_test.shutdown(); +} From 3b38148dff35e931cb25fe36855a1db1fab91777 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Jul 2024 13:24:48 -0400 Subject: [PATCH 0566/1400] Add mock signature message type to SignerMessages Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 119 +++++++++++++++++- .../src/nakamoto_node/sign_coordinator.rs | 4 + 2 files changed, 121 insertions(+), 2 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index d15f566e16..aeedf76d68 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -38,12 +38,15 @@ use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::types::chainstate::{ConsensusHash, StacksPrivateKey, StacksPublicKey}; +use clarity::types::PrivateKey; use clarity::util::retry::BoundReader; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::QualifiedContractIdentifier; use hashbrown::{HashMap, HashSet}; use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha512_256}; use stacks_common::codec::{ read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, StacksMessageCodec, @@ -55,6 +58,7 @@ use tiny_http::{ }; use crate::http::{decode_http_body, decode_http_request}; +use crate::stacks_common::types::PublicKey; use crate::{ BlockProposal, EventError, MessageSlotID as MessageSlotIDTrait, SignerMessage as SignerMessageTrait, @@ -65,7 +69,9 @@ define_u8_enum!( /// the contract index in the signers contracts (i.e., X in signers-0-X) MessageSlotID { /// Block Response message from signers - BlockResponse = 1 + BlockResponse = 1, + /// Mock Signature message from Epoch 2.5 signers + MockSignature = 2 }); define_u8_enum!( @@ -100,7 +106,9 @@ SignerMessageTypePrefix { /// Block Response message from signers BlockResponse = 1, /// Block Pushed message from miners - BlockPushed = 2 + BlockPushed = 2, + /// Mock Signature message from Epoch 2.5 signers + MockSignature = 3 }); #[cfg_attr(test, mutants::skip)] @@ -143,6 +151,7 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::BlockProposal(_) => SignerMessageTypePrefix::BlockProposal, SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::BlockPushed(_) => SignerMessageTypePrefix::BlockPushed, + SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, } } } @@ -156,6 +165,8 @@ pub enum SignerMessage { BlockResponse(BlockResponse), /// A block pushed from miners to the signers set BlockPushed(NakamotoBlock), + /// A mock signature from the epoch 2.5 signers + MockSignature(MockSignature), } impl SignerMessage { @@ -167,6 +178,7 @@ impl SignerMessage { match self { Self::BlockProposal(_) | Self::BlockPushed(_) => None, Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), + Self::MockSignature(_) => Some(MessageSlotID::MockSignature), } } } @@ -180,6 +192,7 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::BlockProposal(block_proposal) => block_proposal.consensus_serialize(fd), SignerMessage::BlockResponse(block_response) => block_response.consensus_serialize(fd), SignerMessage::BlockPushed(block) => block.consensus_serialize(fd), + SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), }?; Ok(()) } @@ -201,6 +214,10 @@ impl StacksMessageCodec for SignerMessage { let block = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::BlockPushed(block) } + SignerMessageTypePrefix::MockSignature => { + let signature = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockSignature(signature) + } }; Ok(message) } @@ -214,6 +231,59 @@ pub trait StacksMessageCodecExtensions: Sized { fn inner_consensus_deserialize(fd: &mut R) -> Result; } +/// A signer's mock signature across its last seen Stacks Consensus Hash. This is only used +/// by Epoch 2.5 signers to simulate the signing of a block for every sortition. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignature { + /// The signature across the stacks consensus hash + pub signature: MessageSignature, + /// The block hash that the signature is across + pub stacks_consensus_hash: ConsensusHash, +} + +impl MockSignature { + /// The signature hash for the mock signature + pub fn signature_hash(&self) -> Result { + let mut hasher = Sha512_256::new(); + let fd = &mut hasher; + write_next(fd, &self.stacks_consensus_hash)?; + Ok(Sha512Trunc256Sum::from_hasher(hasher)) + } + /// Sign the mock signature and set the internal signature field + pub fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; + self.signature = private_key.sign(&signature_hash.0)?; + Ok(()) + } + /// Verify the mock signature against the provided public key + pub fn verify(&self, public_key: &StacksPublicKey) -> Result { + if self.signature == MessageSignature::empty() { + return Ok(false); + } + let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; + public_key + .verify(&signature_hash.0, &self.signature) + .map_err(|e| e.to_string()) + } +} + +impl StacksMessageCodec for MockSignature { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signature)?; + write_next(fd, &self.stacks_consensus_hash)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signature = read_next::(fd)?; + let stacks_consensus_hash = read_next::(fd)?; + Ok(Self { + signature, + stacks_consensus_hash, + }) + } +} + define_u8_enum!( /// Enum representing the reject code type prefix RejectCodeTypePrefix { @@ -508,6 +578,7 @@ mod test { }; use blockstack_lib::util_lib::strings::StacksString; use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; + use clarity::types::PrivateKey; use clarity::util::hash::MerkleTree; use clarity::util::secp256k1::MessageSignature; use rand::{thread_rng, Rng, RngCore}; @@ -622,4 +693,48 @@ mod test { .expect("Failed to deserialize SignerMessage"); assert_eq!(signer_message, deserialized_signer_message); } + + #[test] + fn verify_sign_mock_signature() { + let private_key = StacksPrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); + + let bad_private_key = StacksPrivateKey::new(); + let bad_public_key = StacksPublicKey::from_private(&bad_private_key); + + let byte: u8 = thread_rng().gen(); + let stacks_consensus_hash = ConsensusHash([byte; 20]); + let mut mock_signature = MockSignature { + signature: MessageSignature::empty(), + stacks_consensus_hash, + }; + assert!(!mock_signature + .verify(&public_key) + .expect("Failed to verify MockSignature")); + + mock_signature + .sign(&private_key) + .expect("Failed to sign MockSignature"); + + assert!(mock_signature + .verify(&public_key) + .expect("Failed to verify MockSignature")); + assert!(!mock_signature + .verify(&bad_public_key) + .expect("Failed to verify MockSignature")); + } + + #[test] + fn serde_mock_signature() { + let byte: u8 = thread_rng().gen(); + let stacks_consensus_hash = ConsensusHash([byte; 20]); + let mock_signature = MockSignature { + signature: MessageSignature::empty(), + stacks_consensus_hash, + }; + let serialized_signature = mock_signature.serialize_to_vec(); + let deserialized_signature = read_next::(&mut &serialized_signature[..]) + .expect("Failed to deserialize MockSignature"); + assert_eq!(mock_signature, deserialized_signature); + } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 30f73e75be..1990a382e9 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -758,6 +758,10 @@ impl SignCoordinator { debug!("Received block pushed message. Ignoring."); continue; } + SignerMessageV0::MockSignature(_) => { + debug!("Received mock signature message. Ignoring."); + continue; + } }; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { From a407c94373430e3b78cab7bc2945cc8a4220a97b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Jul 2024 13:43:07 -0400 Subject: [PATCH 0567/1400] Send a mock signature message per sortition Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 20 ++++++++++++--- stacks-signer/src/v0/signer.rs | 27 +++++++++++++++++-- testnet/stacks-node/src/tests/signer/v0.rs | 30 ++++++++++++++++++++++ 3 files changed, 72 insertions(+), 5 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index aeedf76d68..76dee99ded 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -236,12 +236,26 @@ pub trait StacksMessageCodecExtensions: Sized { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MockSignature { /// The signature across the stacks consensus hash - pub signature: MessageSignature, + signature: MessageSignature, /// The block hash that the signature is across - pub stacks_consensus_hash: ConsensusHash, + stacks_consensus_hash: ConsensusHash, } impl MockSignature { + /// Create a new mock signature with the provided stacks consensus hash and private key + pub fn new( + stacks_consensus_hash: ConsensusHash, + stacks_private_key: &StacksPrivateKey, + ) -> Self { + let mut sig = Self { + signature: MessageSignature::empty(), + stacks_consensus_hash, + }; + sig.sign(stacks_private_key) + .expect("Failed to sign MockSignature"); + sig + } + /// The signature hash for the mock signature pub fn signature_hash(&self) -> Result { let mut hasher = Sha512_256::new(); @@ -250,7 +264,7 @@ impl MockSignature { Ok(Sha512Trunc256Sum::from_hasher(hasher)) } /// Sign the mock signature and set the internal signature field - pub fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; self.signature = private_key.sign(&signature_hash.0)?; Ok(()) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 93927b03fd..0ab444b78f 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -17,10 +17,12 @@ use std::sync::mpsc::Sender; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; -use clarity::types::PrivateKey; +use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; -use libsigner::v0::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; +use libsigner::v0::messages::{ + BlockResponse, MessageSlotID, MockSignature, RejectCode, SignerMessage, +}; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; @@ -166,6 +168,10 @@ impl SignerTrait for Signer { ); } *sortition_state = None; + if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { + // We are in epoch 25, so we should mock mine to prove we are still alive. + self.mock_mine(stacks_client); + }; } } } @@ -462,4 +468,21 @@ impl Signer { .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } + + /// Send a mock signature to stackerdb to prove we are still alive + fn mock_mine(&mut self, stacks_client: &StacksClient) { + let Ok(peer_info) = stacks_client.get_peer_info() else { + warn!("{self}: Failed to get peer info. Cannot mock mine."); + return; + }; + let mock_signature = + MockSignature::new(peer_info.stacks_tip_consensus_hash, &self.private_key); + let message = SignerMessage::MockSignature(mock_signature); + if let Err(e) = self + .stackerdb + .send_message_with_retry::(message) + { + warn!("{self}: Failed to send mock signature to stacker-db: {e:?}",); + } + } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a9da738d3..75856593bb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1067,3 +1067,33 @@ fn retry_on_timeout() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test checks that the miner will retry when signature collection times out. +fn mock_mine_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + todo!("BOOT TO EPOCH 2.5 AND VERIFY WE RECEIVE A MOCK SIGNATURE PER SORTITION"); +} From 9d69e0ea4af28bb40f256e7c07fdb178ea6981b0 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 26 Jul 2024 11:07:47 -0700 Subject: [PATCH 0568/1400] Update docs/release-process.md Co-authored-by: Adriano Di Luzio --- docs/release-process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/release-process.md b/docs/release-process.md index 27e5b0ac4a..46b4bae621 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -76,7 +76,7 @@ The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/to 6. Once the release candidate has been built and binaries are available, ecosystem participants shall be notified to test the tagged release on various staging infrastructure. -7. The release candidate will tested that it successfully syncs with the current chain from genesis both in testnet and mainnet. +7. The release candidate will test that it successfully syncs with the current chain from genesis both in testnet and mainnet. 8. If bugs or issues emerge from the rollout on staging infrastructure, the release will be delayed until those regressions are resolved. From cd2cb5d2113a54bfcd7d5823616f032c4dedffe9 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Sun, 28 Jul 2024 09:26:55 -0700 Subject: [PATCH 0569/1400] Oneliner to set signer version at compile time --- stacks-signer/src/cli.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 3b63ebdd59..ecb1c24741 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -42,6 +42,8 @@ extern crate alloc; #[derive(Parser, Debug)] #[command(author, version, about)] +#[command(long_version = option_env!("SIGNER_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")))] + /// The CLI arguments for the stacks signer pub struct Cli { /// Subcommand action to take From e5cc717b5aaf9ec75621b7dec3f165b2ae6daa0e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 26 Jul 2024 14:39:04 -0400 Subject: [PATCH 0570/1400] fix: set miner stackerdb sync frequency to 1 sec; stackerdb sync pushchunks should not retry indefinitely; check for the absence of neighbors to sync to as a stopping condition for pushchunks --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- stackslib/src/net/stackerdb/mod.rs | 4 +++ stackslib/src/net/stackerdb/sync.rs | 44 +++++++++++++++++++----- 3 files changed, 40 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index a72a357b63..e0dd0c0afd 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4104,7 +4104,7 @@ impl NakamotoChainState { Ok(StackerDBConfig { chunk_size: MAX_PAYLOAD_LEN.into(), signers, - write_freq: 5, + write_freq: 0, max_writes: u32::MAX, // no limit on number of writes max_neighbors: 200, // TODO: const -- just has to be equal to or greater than the number of signers hint_replicas: vec![], // TODO: is there a way to get the IP addresses of stackers' preferred nodes? diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 754df3fba1..3e91c9b542 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -401,6 +401,10 @@ pub struct StackerDBSync { num_attempted_connections: u64, /// How many connections have been made in the last pass (gets reset) num_connections: u64, + /// Number of state machine passes + rounds: u128, + /// Round when we last pushed + push_round: u128, } impl StackerDBSyncResult { diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 02390211bc..7309ad40b2 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -74,6 +74,8 @@ impl StackerDBSync { stale_neighbors: HashSet::new(), num_connections: 0, num_attempted_connections: 0, + rounds: 0, + push_round: 0, }; dbsync.reset(None, config); dbsync @@ -215,6 +217,7 @@ impl StackerDBSync { self.state = StackerDBSyncState::ConnectBegin; self.num_connections = 0; self.num_attempted_connections = 0; + self.rounds += 1; result } @@ -407,6 +410,16 @@ impl StackerDBSync { thread_rng().gen::() % chunk_inv.num_outbound_replicas == 0 }; + debug!( + "{:?}: Can push chunk StackerDBChunk(db={},id={},ver={}) to {}. Replicate? {}", + &network.get_local_peer(), + &self.smart_contract_id, + our_chunk.chunk_data.slot_id, + our_chunk.chunk_data.slot_version, + &naddr, + do_replicate + ); + if !do_replicate { continue; } @@ -1000,9 +1013,11 @@ impl StackerDBSync { /// Returns true if there are no more chunks to push. /// Returns false if there are pub fn pushchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { - if self.chunk_push_priorities.len() == 0 { + if self.chunk_push_priorities.len() == 0 && self.push_round != self.rounds { + // only do this once per round let priorities = self.make_chunk_push_schedule(&network)?; self.chunk_push_priorities = priorities; + self.push_round = self.rounds; } if self.chunk_push_priorities.len() == 0 { // done @@ -1017,8 +1032,6 @@ impl StackerDBSync { self.chunk_push_priorities.len() ); - let mut pushed = 0; - // fill up our comms with $capacity requests for _i in 0..self.request_capacity { if self.comms.count_inflight() >= self.request_capacity { @@ -1030,7 +1043,8 @@ impl StackerDBSync { .1 .iter() .enumerate() - .find(|(_i, naddr)| !self.comms.has_inflight(naddr)); + // .find(|(_i, naddr)| !self.comms.has_inflight(naddr)); + .find(|(_i, _naddr)| true); let (idx, selected_neighbor) = if let Some(x) = selected_neighbor_opt { x @@ -1072,8 +1086,6 @@ impl StackerDBSync { continue; } - pushed += 1; - // record what we just sent self.chunk_push_receipts .insert(selected_neighbor.clone(), (slot_id, slot_version)); @@ -1088,7 +1100,13 @@ impl StackerDBSync { return Err(net_error::PeerNotConnected); } self.next_chunk_push_priority = cur_priority; - Ok(self.chunk_push_priorities.len() == 0) + Ok(self + .chunk_push_priorities + .iter() + .fold(0usize, |acc, (_chunk, num_naddrs)| { + acc.saturating_add(num_naddrs.len()) + }) + == 0) } /// Collect push-chunk replies from neighbors. @@ -1138,7 +1156,14 @@ impl StackerDBSync { } } - self.comms.count_inflight() == 0 + let inflight = self.comms.count_inflight(); + debug!( + "{:?}: inflight messages for {}: {:?}", + network.get_local_peer(), + &self.smart_contract_id, + inflight + ); + inflight == 0 } /// Recalculate the download schedule based on chunkinvs received on push @@ -1189,8 +1214,9 @@ impl StackerDBSync { loop { debug!( - "{:?}: stacker DB sync state is {:?}", + "{:?}: stacker DB sync state for {} is {:?}", network.get_local_peer(), + &self.smart_contract_id, &self.state ); let mut blocked = true; From 7c7f9b3238b47059ec3fe489cec58fb964967873 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 26 Jul 2024 14:52:41 -0500 Subject: [PATCH 0571/1400] feat: support multi-miner integration test * test: add nakamoto_integration test with multiple miners (this test uses the blind signer test signing channel) * fix: nakamoto miner communicates over the correct miner slot for their block election (rather than searching by pubkey) * fix: neon miner does not submit block commits if the next burn block is in nakamoto * feat: update `/v2/neighbors` to use qualified contract identifier's ToString and parse() for JSON serialization * perf: nakamoto miner caches the reward set for their tenure --- stackslib/src/chainstate/nakamoto/mod.rs | 93 ++-- .../src/chainstate/nakamoto/tests/mod.rs | 10 +- stackslib/src/net/api/getneighbors.rs | 34 ++ stackslib/src/net/rpc.rs | 12 +- stackslib/src/net/stackerdb/config.rs | 15 +- stackslib/src/net/stackerdb/mod.rs | 54 +- stackslib/src/net/stackerdb/sync.rs | 3 - .../burnchains/bitcoin_regtest_controller.rs | 101 ++-- .../stacks-node/src/nakamoto_node/miner.rs | 127 +++-- .../src/nakamoto_node/sign_coordinator.rs | 16 +- testnet/stacks-node/src/neon_node.rs | 11 + testnet/stacks-node/src/tests/mod.rs | 35 ++ .../src/tests/nakamoto_integrations.rs | 468 ++++++++++++++++-- .../src/tests/neon_integrations.rs | 7 + 14 files changed, 776 insertions(+), 210 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e0dd0c0afd..b65e9ff086 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -455,6 +455,28 @@ impl MaturedMinerPaymentSchedules { } } +pub struct MinersDBInformation { + signer_0_sortition: ConsensusHash, + signer_1_sortition: ConsensusHash, + latest_winner: u16, +} + +impl MinersDBInformation { + pub fn get_signer_index(&self, sortition: &ConsensusHash) -> Option { + if sortition == &self.signer_0_sortition { + Some(0) + } else if sortition == &self.signer_1_sortition { + Some(1) + } else { + None + } + } + + pub fn get_latest_winner_index(&self) -> u16 { + self.latest_winner + } +} + /// Calculated matured miner rewards, from scheduled rewards #[derive(Debug, Clone)] pub struct MaturedMinerRewards { @@ -4039,7 +4061,7 @@ impl NakamotoChainState { pub fn make_miners_stackerdb_config( sortdb: &SortitionDB, tip: &BlockSnapshot, - ) -> Result { + ) -> Result<(StackerDBConfig, MinersDBInformation), ChainstateError> { let ih = sortdb.index_handle(&tip.sortition_id); let last_winner_snapshot = ih.get_last_snapshot_with_sortition(tip.block_height)?; let parent_winner_snapshot = ih.get_last_snapshot_with_sortition( @@ -4051,13 +4073,13 @@ impl NakamotoChainState { // go get their corresponding leader keys, but preserve the miner's relative position in // the stackerdb signer list -- if a miner was in slot 0, then it should stay in slot 0 // after a sortition (and vice versa for 1) - let sns = if last_winner_snapshot.num_sortitions % 2 == 0 { - [last_winner_snapshot, parent_winner_snapshot] + let (latest_winner_idx, sns) = if last_winner_snapshot.num_sortitions % 2 == 0 { + (0, [last_winner_snapshot, parent_winner_snapshot]) } else { - [parent_winner_snapshot, last_winner_snapshot] + (1, [parent_winner_snapshot, last_winner_snapshot]) }; - for sn in sns { + for sn in sns.iter() { // find the commit let Some(block_commit) = ih.get_block_commit_by_txid(&sn.sortition_id, &sn.winning_block_txid)? @@ -4088,6 +4110,12 @@ impl NakamotoChainState { ); } + let miners_db_info = MinersDBInformation { + signer_0_sortition: sns[0].consensus_hash, + signer_1_sortition: sns[1].consensus_hash, + latest_winner: latest_winner_idx, + }; + let signers = miner_key_hash160s .into_iter() .map(|hash160| @@ -4101,14 +4129,17 @@ impl NakamotoChainState { )) .collect(); - Ok(StackerDBConfig { - chunk_size: MAX_PAYLOAD_LEN.into(), - signers, - write_freq: 0, - max_writes: u32::MAX, // no limit on number of writes - max_neighbors: 200, // TODO: const -- just has to be equal to or greater than the number of signers - hint_replicas: vec![], // TODO: is there a way to get the IP addresses of stackers' preferred nodes? - }) + Ok(( + StackerDBConfig { + chunk_size: MAX_PAYLOAD_LEN.into(), + signers, + write_freq: 0, + max_writes: u32::MAX, // no limit on number of writes + max_neighbors: 200, // TODO: const -- just has to be equal to or greater than the number of signers + hint_replicas: vec![], // TODO: is there a way to get the IP addresses of stackers' preferred nodes? + }, + miners_db_info, + )) } /// Get the slot range for the given miner's public key. @@ -4119,33 +4150,29 @@ impl NakamotoChainState { pub fn get_miner_slot( sortdb: &SortitionDB, tip: &BlockSnapshot, - miner_pubkey: &StacksPublicKey, + election_sortition: &ConsensusHash, ) -> Result>, ChainstateError> { - let miner_hash160 = Hash160::from_node_public_key(&miner_pubkey); - let stackerdb_config = Self::make_miners_stackerdb_config(sortdb, &tip)?; + let (stackerdb_config, miners_info) = Self::make_miners_stackerdb_config(sortdb, &tip)?; // find out which slot we're in - let mut slot_index = 0; - let mut slot_id_result = None; - for (addr, slot_count) in stackerdb_config.signers.iter() { - if addr.bytes == miner_hash160 { - slot_id_result = Some(Range { - start: slot_index, - end: slot_index + slot_count, - }); - break; - } - slot_index += slot_count; - } - - let Some(slot_id_range) = slot_id_result else { - // miner key does not match any slot + let Some(signer_ix) = miners_info + .get_signer_index(election_sortition) + .map(usize::from) + else { warn!("Miner is not in the miners StackerDB config"; - "miner" => %miner_hash160, "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); - return Ok(None); }; + let mut signer_ranges = stackerdb_config.signer_ranges(); + if signer_ix >= signer_ranges.len() { + // should be unreachable, but always good to be careful + warn!("Miner is not in the miners StackerDB config"; + "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); + + return Ok(None); + } + let slot_id_range = signer_ranges.swap_remove(signer_ix); + Ok(Some(slot_id_range)) } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index b96ed86f03..ef38ec76c6 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2049,8 +2049,9 @@ fn test_make_miners_stackerdb_config() { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); // check the stackerdb config as of this chain tip - let stackerdb_config = - NakamotoChainState::make_miners_stackerdb_config(sort_db, &tip).unwrap(); + let stackerdb_config = NakamotoChainState::make_miners_stackerdb_config(sort_db, &tip) + .unwrap() + .0; eprintln!( "stackerdb_config at i = {} (sorition? {}): {:?}", &i, sortition, &stackerdb_config @@ -2079,8 +2080,9 @@ fn test_make_miners_stackerdb_config() { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let miner_privkey = &miner_keys[i]; let miner_pubkey = StacksPublicKey::from_private(miner_privkey); - let slot_id = NakamotoChainState::get_miner_slot(&sort_db, &tip, &miner_pubkey) - .expect("Failed to get miner slot"); + let slot_id = + NakamotoChainState::get_miner_slot(&sort_db, &tip, &block.header.consensus_hash) + .expect("Failed to get miner slot"); if sortition { let slot_id = slot_id.expect("No miner slot exists for this miner").start; let slot_version = stackerdbs diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs index 51454352a1..6707ed3ba1 100644 --- a/stackslib/src/net/api/getneighbors.rs +++ b/stackslib/src/net/api/getneighbors.rs @@ -51,9 +51,43 @@ pub struct RPCNeighbor { pub public_key_hash: Hash160, pub authenticated: bool, #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "serde_opt_vec_qci")] pub stackerdbs: Option>, } +/// Serialize and deserialize `Option>` +/// using the `to_string()` and `parse()` implementations of `QualifiedContractIdentifier`. +mod serde_opt_vec_qci { + use clarity::vm::types::QualifiedContractIdentifier; + use serde::{Deserialize, Serialize}; + + pub fn serialize( + opt: &Option>, + serializer: S, + ) -> Result { + let serialize_as: Option> = opt + .as_ref() + .map(|vec_qci| vec_qci.iter().map(ToString::to_string).collect()); + serialize_as.serialize(serializer) + } + + pub fn deserialize<'de, D>(de: D) -> Result>, D::Error> + where + D: serde::Deserializer<'de>, + { + let from_str: Option> = Deserialize::deserialize(de)?; + let Some(vec_str) = from_str else { + return Ok(None); + }; + let parse_opt: Result, _> = vec_str + .into_iter() + .map(|x| QualifiedContractIdentifier::parse(&x).map_err(serde::de::Error::custom)) + .collect(); + let out_vec = parse_opt?; + Ok(Some(out_vec)) + } +} + impl RPCNeighbor { pub fn from_neighbor_key_and_pubkh( nk: NeighborKey, diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index efa0484d4b..78b1ff096b 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -554,12 +554,12 @@ impl ConversationHttp { )?; info!("Handled StacksHTTPRequest"; - "verb" => %verb, - "path" => %request_path, - "processing_time_ms" => start_time.elapsed().as_millis(), - "latency_ms" => latency, - "conn_id" => self.conn_id, - "peer_addr" => &self.peer_addr); + "verb" => %verb, + "path" => %request_path, + "processing_time_ms" => start_time.elapsed().as_millis(), + "latency_ms" => latency, + "conn_id" => self.conn_id, + "peer_addr" => &self.peer_addr); if let Some(msg) = msg_opt { ret.push(msg); diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index 2d4c39e349..97f8214913 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -555,10 +555,17 @@ impl StackerDBConfig { reason, )); } else if let Some(Err(e)) = res { - warn!( - "Could not use contract {} for StackerDB: {:?}", - contract_id, &e - ); + if contract_id.is_boot() { + debug!( + "Could not use contract {} for StackerDB: {:?}", + contract_id, &e + ); + } else { + warn!( + "Could not use contract {} for StackerDB: {:?}", + contract_id, &e + ); + } return Err(e); } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 3e91c9b542..847363b2e3 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -119,6 +119,7 @@ pub mod db; pub mod sync; use std::collections::{HashMap, HashSet}; +use std::ops::Range; use clarity::vm::types::QualifiedContractIdentifier; use libstackerdb::{SlotMetadata, STACKERDB_MAX_CHUNK_SIZE}; @@ -205,6 +206,22 @@ impl StackerDBConfig { pub fn num_slots(&self) -> u32 { self.signers.iter().fold(0, |acc, s| acc + s.1) } + + /// What are the slot index ranges for each signer? + /// Returns the ranges in the same ordering as `self.signers` + pub fn signer_ranges(&self) -> Vec> { + let mut slot_index = 0; + let mut result = Vec::with_capacity(self.signers.len()); + for (_, slot_count) in self.signers.iter() { + let end = slot_index + *slot_count; + result.push(Range { + start: slot_index, + end, + }); + slot_index = end; + } + result + } } /// This is the set of replicated chunks in all stacker DBs that this node subscribes to. @@ -280,14 +297,16 @@ impl StackerDBs { == boot_code_id(MINERS_NAME, chainstate.mainnet) { // .miners contract -- directly generate the config - NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip).unwrap_or_else(|e| { - warn!( - "Failed to generate .miners StackerDB config"; - "contract" => %stackerdb_contract_id, - "err" => ?e, - ); - StackerDBConfig::noop() - }) + NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip) + .map(|(config, _)| config) + .unwrap_or_else(|e| { + warn!( + "Failed to generate .miners StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + StackerDBConfig::noop() + }) } else { // attempt to load the config from the contract itself StackerDBConfig::from_smart_contract( @@ -297,11 +316,20 @@ impl StackerDBs { num_neighbors, ) .unwrap_or_else(|e| { - warn!( - "Failed to load StackerDB config"; - "contract" => %stackerdb_contract_id, - "err" => ?e, - ); + if matches!(e, net_error::NoSuchStackerDB(_)) && stackerdb_contract_id.is_boot() + { + debug!( + "Failed to load StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + } else { + warn!( + "Failed to load StackerDB config"; + "contract" => %stackerdb_contract_id, + "err" => ?e, + ); + } StackerDBConfig::noop() }) }; diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 7309ad40b2..f574efd5fb 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -1096,9 +1096,6 @@ impl StackerDBSync { // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_push_priorities.len(); } - if pushed == 0 { - return Err(net_error::PeerNotConnected); - } self.next_chunk_push_priority = cur_priority; Ok(self .chunk_push_priorities diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 30f088a96f..8b5a6c470c 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2037,6 +2037,61 @@ impl BitcoinRegtestController { let tx: Transaction = btc_deserialize(&hex_bytes(&txstr).unwrap()).unwrap(); tx } + + /// Produce `num_blocks` regtest bitcoin blocks, sending the bitcoin coinbase rewards + /// to the bitcoin single sig addresses corresponding to `pks` in a round robin fashion. + #[cfg(test)] + pub fn bootstrap_chain_to_pks(&mut self, num_blocks: usize, pks: &[Secp256k1PublicKey]) { + info!("Creating wallet if it does not exist"); + if let Err(e) = self.create_wallet_if_dne() { + error!("Error when creating wallet: {e:?}"); + } + + for pk in pks { + debug!("Import public key '{}'", &pk.to_hex()); + if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, &pk) { + warn!("Error when importing pubkey: {e:?}"); + } + } + + if pks.len() == 1 { + // if we only have one pubkey, just generate all the blocks at once + let address = self.get_miner_address(StacksEpochId::Epoch21, &pks[0]); + debug!( + "Generate to address '{}' for public key '{}'", + &addr2str(&address), + &pks[0].to_hex() + ); + if let Err(e) = BitcoinRPCRequest::generate_to_address( + &self.config, + num_blocks.try_into().unwrap(), + addr2str(&address), + ) { + error!("Bitcoin RPC failure: error generating block {:?}", e); + panic!(); + } + return; + } + + // otherwise, round robin generate blocks + for i in 0..num_blocks { + let pk = &pks[usize::try_from(i % pks.len()).unwrap()]; + let address = self.get_miner_address(StacksEpochId::Epoch21, pk); + if i < pks.len() { + debug!( + "Generate to address '{}' for public key '{}'", + &addr2str(&address), + &pk.to_hex(), + ); + } + if let Err(e) = + BitcoinRPCRequest::generate_to_address(&self.config, 1, addr2str(&address)) + { + error!("Bitcoin RPC failure: error generating block {:?}", e); + panic!(); + } + } + } } impl BurnchainController for BitcoinRegtestController { @@ -2152,45 +2207,19 @@ impl BurnchainController for BitcoinRegtestController { #[cfg(test)] fn bootstrap_chain(&mut self, num_blocks: u64) { - if let Some(ref local_mining_pubkey) = &self.config.burnchain.local_mining_public_key { - // NOTE: miner address is whatever the miner's segwit setting says it is here - let mut local_mining_pubkey = - Secp256k1PublicKey::from_hex(local_mining_pubkey).unwrap(); - let address = self.get_miner_address(StacksEpochId::Epoch21, &local_mining_pubkey); - - if self.config.miner.segwit { - local_mining_pubkey.set_compressed(true); - } - - info!("Creating wallet if it does not exist"); - match self.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} - } - - test_debug!("Import public key '{}'", &local_mining_pubkey.to_hex()); - - let _result = BitcoinRPCRequest::import_public_key(&self.config, &local_mining_pubkey); + let Some(ref local_mining_pubkey) = &self.config.burnchain.local_mining_public_key else { + warn!("No local mining pubkey while bootstrapping bitcoin regtest, will not generate bitcoin blocks"); + return; + }; - test_debug!( - "Generate to address '{}' for public key '{}'", - &addr2str(&address), - &local_mining_pubkey.to_hex() - ); - let result = BitcoinRPCRequest::generate_to_address( - &self.config, - num_blocks, - addr2str(&address), - ); + // NOTE: miner address is whatever the miner's segwit setting says it is here + let mut local_mining_pubkey = Secp256k1PublicKey::from_hex(local_mining_pubkey).unwrap(); - match result { - Ok(_) => {} - Err(e) => { - error!("Bitcoin RPC failure: error generating block {:?}", e); - panic!(); - } - } + if self.config.miner.segwit { + local_mining_pubkey.set_compressed(true); } + + self.bootstrap_chain_to_pks(num_blocks.try_into().unwrap(), &[local_mining_pubkey]) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index aaeb931a1a..527117fb4d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -150,6 +150,7 @@ pub struct BlockMinerThread { reason: MinerReason, /// Handle to the p2p thread for block broadcast p2p_handle: NetworkHandle, + signer_set_cache: Option, } impl BlockMinerThread { @@ -175,6 +176,7 @@ impl BlockMinerThread { parent_tenure_id, reason, p2p_handle: rt.get_p2p_handle(), + signer_set_cache: None, } } @@ -324,19 +326,14 @@ impl BlockMinerThread { } } - /// Gather a list of signatures from the signers for the block - fn gather_signatures( - &mut self, - new_block: &mut NakamotoBlock, - burn_block_height: u64, - stackerdbs: &mut StackerDBs, - attempts: &mut u64, - ) -> Result<(RewardSet, Vec), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; + /// Load the signer set active for this miner's blocks. This is the + /// active reward set during `self.burn_election_block`. The miner + /// thread caches this information, and this method will consult + /// that cache (or populate it if necessary). + fn load_signer_set(&mut self) -> Result { + if let Some(set) = self.signer_set_cache.as_ref() { + return Ok(set.clone()); + } let sort_db = SortitionDB::open( &self.config.get_burn_db_file_path(), true, @@ -348,22 +345,6 @@ impl BlockMinerThread { )) })?; - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to retrieve chain tip: {:?}", - e - )) - }) - .and_then(|result| { - result.ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) - }) - })?; - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { NakamotoNodeError::SigningCoordinatorFailure(format!( @@ -371,14 +352,16 @@ impl BlockMinerThread { )) })?; + let burn_election_height = self.burn_election_block.block_height; + let reward_info = match load_nakamoto_reward_set( self.burnchain - .pox_reward_cycle(tip.block_height.saturating_add(1)) + .pox_reward_cycle(burn_election_height) .expect("FATAL: no reward cycle for sortition"), - &tip.sortition_id, + &self.burn_election_block.sortition_id, &self.burnchain, &mut chain_state, - &new_block.header.parent_block_id, + &self.parent_tenure_id, &sort_db, &OnChainRewardSetProvider::new(), ) { @@ -401,7 +384,52 @@ impl BlockMinerThread { )); }; + self.signer_set_cache = Some(reward_set.clone()); + Ok(reward_set) + } + + /// Gather a list of signatures from the signers for the block + fn gather_signatures( + &mut self, + new_block: &mut NakamotoBlock, + burn_block_height: u64, + stackerdbs: &mut StackerDBs, + attempts: &mut u64, + ) -> Result<(RewardSet, Vec), NakamotoNodeError> { + let Some(miner_privkey) = self.config.miner.mining_key else { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open sortition DB. Cannot mine! {e:?}" + )) + })?; + + let tip = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &new_block.header.consensus_hash, + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to retrieve chain tip: {:?}", + e + )) + }) + .and_then(|result| { + result.ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) + }) + })?; + let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); + let reward_set = self.load_signer_set()?; let mut coordinator = SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( |e| { @@ -421,6 +449,7 @@ impl BlockMinerThread { &sort_db, &stackerdbs, &self.globals.counters, + &self.burn_election_block.consensus_hash, )?; return Ok((reward_set, signature)); @@ -644,6 +673,7 @@ impl BlockMinerThread { MinerSlotID::BlockPushed, chain_state.mainnet, &mut miners_session, + &self.burn_election_block.consensus_hash, ) .map_err(NakamotoNodeError::SigningCoordinatorFailure) } @@ -886,6 +916,7 @@ impl BlockMinerThread { debug!("block miner thread ID is {:?}", thread::current().id()); let burn_db_path = self.config.get_burn_db_file_path(); + let reward_set = self.load_signer_set()?; // NOTE: read-write access is needed in order to be able to query the recipient set. // This is an artifact of the way the MARF is built (see #1449) @@ -932,38 +963,6 @@ impl BlockMinerThread { let signer_transactions = self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; - let tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) - .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::DBError(e)))?; - - let reward_info = match load_nakamoto_reward_set( - self.burnchain - .pox_reward_cycle(tip.block_height.saturating_add(1)) - .expect("FATAL: no reward cycle defined for sortition tip"), - &tip.sortition_id, - &self.burnchain, - &mut chain_state, - &parent_block_info.stacks_parent_header.index_block_hash(), - &burn_db, - &OnChainRewardSetProvider::new(), - ) { - Ok(Some((reward_info, _))) => reward_info, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set stored yet. Cannot mine!".into(), - )); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" - ))); - } - }; - - let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Current reward cycle did not select a reward set. Cannot mine!".into(), - )); - }; let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); // build the block itself diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 30f73e75be..b6e42b87ee 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -23,7 +23,7 @@ use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; use stacks::chainstate::stacks::events::StackerDBChunksEvent; @@ -341,6 +341,7 @@ impl SignCoordinator { miner_slot_id: MinerSlotID, is_mainnet: bool, miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, ) -> Result<(), String> { let mut miner_sk = StacksPrivateKey::from_slice(&message_key.to_bytes()).unwrap(); miner_sk.set_compress_public(true); @@ -353,6 +354,7 @@ impl SignCoordinator { miner_slot_id, is_mainnet, miners_session, + election_sortition, ) } @@ -366,9 +368,9 @@ impl SignCoordinator { miner_slot_id: MinerSlotID, is_mainnet: bool, miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, ) -> Result<(), String> { - let miner_pubkey = StacksPublicKey::from_private(&miner_sk); - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey) + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &election_sortition) .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? else { return Err("No slot for miner".into()); @@ -417,6 +419,7 @@ impl SignCoordinator { sortdb: &SortitionDB, stackerdbs: &StackerDBs, counters: &Counters, + election_sortiton: &ConsensusHash, ) -> Result { let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; @@ -450,6 +453,7 @@ impl SignCoordinator { MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, + election_sortiton, ) .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; counters.bump_naka_proposed_blocks(); @@ -604,6 +608,7 @@ impl SignCoordinator { MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, + election_sortiton, ) { Ok(()) => { debug!("Miner/Coordinator: sent outbound message."); @@ -636,6 +641,7 @@ impl SignCoordinator { sortdb: &SortitionDB, stackerdbs: &StackerDBs, counters: &Counters, + election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; @@ -664,11 +670,15 @@ impl SignCoordinator { MinerSlotID::BlockProposal, self.is_mainnet, &mut self.miners_session, + election_sortition, ) .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; counters.bump_naka_proposed_blocks(); #[cfg(test)] { + info!( + "SignCoordinator: sent block proposal to .miners, waiting for test signing channel" + ); // In test mode, short-circuit waiting for the signers if the TEST_SIGNING // channel has been created. This allows integration tests for the stacks-node // independent of the stacks-signer. diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index ed31540f20..8c3c4ed179 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -3985,6 +3985,17 @@ impl RelayerThread { } RelayerDirective::RunTenure(registered_key, last_burn_block, issue_timestamp_ms) => { debug!("Relayer: directive Run tenure"); + let Ok(Some(next_block_epoch)) = SortitionDB::get_stacks_epoch( + self.sortdb_ref().conn(), + last_burn_block.block_height.saturating_add(1), + ) else { + warn!("Failed to load Stacks Epoch for next burn block, skipping RunTenure directive"); + return true; + }; + if next_block_epoch.epoch_id.uses_nakamoto_blocks() { + info!("Next burn block is in Nakamoto epoch, skipping RunTenure directive for 2.x node"); + return true; + } self.block_miner_thread_try_start( registered_key, last_burn_block, diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 0b8c379f7c..a7892b9a2d 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -324,6 +324,41 @@ pub fn new_test_conf() -> Config { conf } +/// Randomly change the config's network ports to new ports. +pub fn set_random_binds(config: &mut Config) { + let prior_rpc_port: u16 = config + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = config + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let mut rng = rand::thread_rng(); + let mut buf = [0u8; 8]; + rng.fill_bytes(&mut buf); + let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + if rpc_port != prior_rpc_port && p2p_port != prior_p2p_port { + break (rpc_port, p2p_port); + } + }; + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); + config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); + config.node.data_url = format!("http://{}:{}", localhost, rpc_port); + config.node.p2p_address = format!("{}:{}", localhost, p2p_port); +} + pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9b15b83afb..ba80f64c6c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -100,12 +100,13 @@ use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ - call_read_only, get_account, get_account_result, get_chain_info_result, get_pox_info, - next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, + call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, + get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, + wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, - to_addr, + set_random_binds, to_addr, }; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -293,31 +294,79 @@ pub fn blind_signer( signers: &TestSigners, proposals_count: RunLoopCounter, ) -> JoinHandle<()> { + blind_signer_multinode(signers, &[conf], vec![proposals_count]) +} + +/// Spawn a blind signing thread listening to potentially multiple stacks nodes. +/// `signer` is the private key of the individual signer who broadcasts the response to the StackerDB. +/// The thread will check each node's proposal counter in order to wake up, but will only read from the first +/// node's StackerDB (it will read all of the StackerDBs to provide logging information, though). +pub fn blind_signer_multinode( + signers: &TestSigners, + configs: &[&Config], + proposals_count: Vec, +) -> JoinHandle<()> { + assert_eq!( + configs.len(), + proposals_count.len(), + "Expect the same number of node configs as proposals counters" + ); let sender = TestSigningChannel::instantiate(); let mut signed_blocks = HashSet::new(); - let conf = conf.clone(); + let configs: Vec<_> = configs.iter().map(|x| Clone::clone(*x)).collect(); let signers = signers.clone(); - let mut last_count = proposals_count.load(Ordering::SeqCst); - thread::spawn(move || loop { - thread::sleep(Duration::from_millis(100)); - let cur_count = proposals_count.load(Ordering::SeqCst); - if cur_count <= last_count { - continue; - } - last_count = cur_count; - match read_and_sign_block_proposal(&conf, &signers, &signed_blocks, &sender) { - Ok(signed_block) => { - if signed_blocks.contains(&signed_block) { - continue; - } - info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); - signed_blocks.insert(signed_block); + let mut last_count: Vec<_> = proposals_count + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + thread::Builder::new() + .name("blind-signer".into()) + .spawn(move || loop { + thread::sleep(Duration::from_millis(100)); + let cur_count: Vec<_> = proposals_count + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + if cur_count + .iter() + .zip(last_count.iter()) + .all(|(cur_count, last_count)| cur_count <= last_count) + { + continue; } - Err(e) => { - warn!("Error reading and signing block proposal: {e}"); + thread::sleep(Duration::from_secs(2)); + info!("Checking for a block proposal to sign..."); + last_count = cur_count; + let configs: Vec<&Config> = configs.iter().map(|x| x).collect(); + match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { + Ok(signed_block) => { + if signed_blocks.contains(&signed_block) { + info!("Already signed block, will sleep and try again"; "signer_sig_hash" => signed_block.to_hex()); + thread::sleep(Duration::from_secs(5)); + match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { + Ok(signed_block) => { + if signed_blocks.contains(&signed_block) { + info!("Already signed block, ignoring"; "signer_sig_hash" => signed_block.to_hex()); + continue; + } + info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + signed_blocks.insert(signed_block); + } + Err(e) => { + warn!("Error reading and signing block proposal: {e}"); + } + }; + continue; + } + info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + signed_blocks.insert(signed_block); + } + Err(e) => { + warn!("Error reading and signing block proposal: {e}"); + } } - } - }) + }) + .unwrap() } pub fn get_latest_block_proposal( @@ -325,26 +374,68 @@ pub fn get_latest_block_proposal( sortdb: &SortitionDB, ) -> Result<(NakamotoBlock, StacksPublicKey), String> { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let miner_pubkey = StacksPublicKey::from_private(&conf.get_miner_config().mining_key.unwrap()); - let miner_slot_id = NakamotoChainState::get_miner_slot(&sortdb, &tip, &miner_pubkey) - .map_err(|_| "Unable to get miner slot")? - .ok_or("No miner slot exists")?; - - let proposed_block = { - let miner_contract_id = boot_code_id(MINERS_NAME, false); - let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); - let message: SignerMessageV0 = miners_stackerdb - .get_latest(miner_slot_id.start) - .expect("Failed to get latest chunk from the miner slot ID") - .expect("No chunk found"); - let SignerMessageV0::BlockProposal(block_proposal) = message else { - panic!("Expected a signer message block proposal. Got {message:?}"); - }; - // TODO: use v1 message types behind epoch gate - // get_block_proposal_msg_v1(&mut miners_stackerdb, miner_slot_id.start); - block_proposal.block - }; - Ok((proposed_block, miner_pubkey)) + let (stackerdb_conf, miner_info) = + NakamotoChainState::make_miners_stackerdb_config(sortdb, &tip) + .map_err(|e| e.to_string())?; + let miner_ranges = stackerdb_conf.signer_ranges(); + let latest_miner = usize::from(miner_info.get_latest_winner_index()); + let miner_contract_id = boot_code_id(MINERS_NAME, false); + let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); + + let mut proposed_blocks: Vec<_> = stackerdb_conf + .signers + .iter() + .enumerate() + .zip(miner_ranges) + .filter_map(|((miner_ix, (miner_addr, _)), miner_slot_id)| { + let proposed_block = { + let message: SignerMessageV0 = + miners_stackerdb.get_latest(miner_slot_id.start).ok()??; + let SignerMessageV0::BlockProposal(block_proposal) = message else { + panic!("Expected a signer message block proposal. Got {message:?}"); + }; + block_proposal.block + }; + Some((proposed_block, miner_addr, miner_ix == latest_miner)) + }) + .collect(); + + proposed_blocks.sort_by(|(block_a, _, is_latest_a), (block_b, _, is_latest_b)| { + if block_a.header.chain_length > block_b.header.chain_length { + return std::cmp::Ordering::Greater; + } else if block_a.header.chain_length < block_b.header.chain_length { + return std::cmp::Ordering::Less; + } + // the heights are tied, tie break with the latest miner + if *is_latest_a { + return std::cmp::Ordering::Greater; + } + if *is_latest_b { + return std::cmp::Ordering::Less; + } + return std::cmp::Ordering::Equal; + }); + + for (b, _, is_latest) in proposed_blocks.iter() { + info!("Consider block"; "signer_sighash" => %b.header.signer_signature_hash(), "is_latest_sortition" => is_latest, "chain_height" => b.header.chain_length); + } + + let (proposed_block, miner_addr, _) = proposed_blocks.pop().unwrap(); + + let pubkey = StacksPublicKey::recover_to_pubkey( + proposed_block.header.miner_signature_hash().as_bytes(), + &proposed_block.header.miner_signature, + ) + .map_err(|e| e.to_string())?; + let miner_signed_addr = StacksAddress::p2pkh(false, &pubkey); + if miner_signed_addr.bytes != miner_addr.bytes { + return Err(format!( + "Invalid miner signature on proposal. Found {}, expected {}", + miner_signed_addr.bytes, miner_addr.bytes + )); + } + + Ok((proposed_block, pubkey)) } #[allow(dead_code)] @@ -369,11 +460,12 @@ fn get_block_proposal_msg_v1( } pub fn read_and_sign_block_proposal( - conf: &Config, + configs: &[&Config], signers: &TestSigners, signed_blocks: &HashSet, channel: &Sender>, ) -> Result { + let conf = configs.first().unwrap(); let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let (mut chainstate, _) = StacksChainState::open( @@ -387,8 +479,30 @@ pub fn read_and_sign_block_proposal( let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let mut proposed_block = get_latest_block_proposal(conf, &sortdb)?.0; + let other_views_result: Result, _> = configs + .get(1..) + .unwrap() + .iter() + .map(|other_conf| { + get_latest_block_proposal(other_conf, &sortdb).map(|proposal| { + ( + proposal.0.header.signer_signature_hash(), + proposal.0.header.chain_length, + ) + }) + }) + .collect(); let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let signer_sig_hash = proposed_block.header.signer_signature_hash(); + let other_views = other_views_result?; + if !other_views.is_empty() { + info!( + "Fetched block proposals"; + "primary_latest_signer_sighash" => %signer_sig_hash, + "primary_latest_block_height" => proposed_block.header.chain_length, + "other_views" => ?other_views, + ); + } if signed_blocks.contains(&signer_sig_hash) { // already signed off on this block, don't sign again. @@ -632,6 +746,17 @@ pub fn boot_to_epoch_3( // first mined stacks block next_block_and_wait(btc_regtest_controller, &blocks_processed); + let start_time = Instant::now(); + loop { + if start_time.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for the stacks height to increment") + } + let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + if stacks_height >= 1 { + break; + } + thread::sleep(Duration::from_millis(100)); + } // stack enough to activate pox-4 let block_height = btc_regtest_controller.get_headers_height(); @@ -1444,6 +1569,261 @@ fn mine_multiple_per_tenure_integration() { run_loop_thread.join().unwrap(); } +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 5 tenures are mined after 3.0 starts +/// * Each tenure has 10 blocks (the coinbase block and 9 interim blocks) +fn multiple_nodes() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.node.local_peer_seed = vec![1, 1, 1, 1]; + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let tenure_count = 15; + let inter_blocks_per_tenure = 6; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + let mut conf_node_2 = naka_conf.clone(); + set_random_binds(&mut conf_node_2); + conf_node_2.node.seed = vec![2, 2, 2, 2]; + conf_node_2.burnchain.local_mining_public_key = Some( + Keychain::default(conf_node_2.node.seed.clone()) + .get_pub_key() + .to_hex(), + ); + conf_node_2.node.local_peer_seed = vec![2, 2, 2, 2]; + conf_node_2.node.miner = true; + + let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), naka_conf.node.p2p_bind), + naka_conf.burnchain.chain_id, + naka_conf.burnchain.peer_version, + ); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain_to_pks( + 201, + &[ + Secp256k1PublicKey::from_hex( + naka_conf + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(), + Secp256k1PublicKey::from_hex( + conf_node_2 + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(), + ], + ); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let run_loop_2_stopper = run_loop.get_termination_switch(); + let Counters { + naka_proposed_blocks: proposals_submitted_2, + .. + } = run_loop_2.counters(); + + let coord_channel = run_loop.coordinator_channels(); + let coord_channel_2 = run_loop_2.coordinator_channels(); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer_multinode( + &signers, + &[&naka_conf, &conf_node_2], + vec![proposals_submitted, proposals_submitted_2], + ); + + info!("Neighbors 1"; "neighbors" => ?get_neighbors(&naka_conf)); + info!("Neighbors 2"; "neighbors" => ?get_neighbors(&conf_node_2)); + + // Wait one block to confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {}", tenure_ix); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + info!("Peer 1 information"; "chain_info" => ?get_chain_info(&naka_conf).stacks_tip_height); + info!("Peer 2 information"; "chain_info" => ?get_chain_info(&conf_node_2).stacks_tip_height); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, + block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + coord_channel_2 + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + run_loop_2_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + // run_loop_2_thread.join().unwrap(); +} + #[test] #[ignore] fn correct_burn_outs() { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index e1f72ba2e3..ac6a3ea978 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1318,6 +1318,13 @@ pub fn get_account(http_origin: &str, account: &F) -> Acco get_account_result(http_origin, account).unwrap() } +pub fn get_neighbors(conf: &Config) -> Option { + let client = reqwest::blocking::Client::new(); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let path = format!("{}/v2/neighbors", http_origin); + client.get(&path).send().ok()?.json().ok() +} + pub fn get_pox_info(http_origin: &str) -> Option { let client = reqwest::blocking::Client::new(); let path = format!("{}/v2/pox", http_origin); From 75baf497a856c2e1c1831c76e71a719111c45277 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 12 Jul 2024 10:12:02 -0400 Subject: [PATCH 0572/1400] feat: support mock mining in epoch 3.0 --- testnet/stacks-node/src/nakamoto_node/miner.rs | 4 ++++ testnet/stacks-node/src/nakamoto_node/relayer.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index aaeb931a1a..70a6f7b3e3 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -411,6 +411,10 @@ impl BlockMinerThread { }, )?; + if self.config.get_node_config(false).mock_mining { + return Ok((reward_set, Vec::new())); + } + *attempts += 1; let signature = coordinator.begin_sign_v0( new_block, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 12f7dbc9e9..0d8567a95d 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -412,7 +412,7 @@ impl RelayerThread { } let directive = if sn.sortition { - if won_sortition { + if won_sortition || self.config.get_node_config(false).mock_mining { MinerDirective::BeginTenure { parent_tenure_start: committed_index_hash, burnchain_tip: sn, From 23876fb3d8d5ab11c4f0eaad1e7f1254edbd2c76 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 29 Jul 2024 18:50:03 +0300 Subject: [PATCH 0573/1400] move stacks block validate response to info logs from debug --- stacks-signer/src/v0/signer.rs | 6 +++--- stacks-signer/src/v1/signer.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 72c8cc4dfe..b38467d454 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -251,7 +251,7 @@ impl Signer { block_proposal: &BlockProposal, miner_pubkey: &Secp256k1PublicKey, ) { - debug!("{self}: Received a block proposal: {block_proposal:?}"); + info!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { // We are not signing for this reward cycle. Ignore the block. debug!( @@ -287,7 +287,7 @@ impl Signer { return; } - debug!( + info!( "{self}: received a block proposal for a new block. Submit block for validation. "; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), @@ -388,7 +388,7 @@ impl Signer { /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { - debug!("{self}: Received a block validate response: {block_validate_response:?}"); + info!("{self}: Received a block validate response: {block_validate_response:?}"); let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index bd4b36f489..18e31946c0 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -201,7 +201,7 @@ impl SignerTrait for Signer { }; match event { SignerEvent::BlockValidationResponse(block_validate_response) => { - debug!("{self}: Received a block proposal result from the stacks node..."); + info!("{self}: Received a block proposal result from the stacks node..."); self.handle_block_validate_response( stacks_client, block_validate_response, @@ -703,7 +703,7 @@ impl Signer { }; self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); } - debug!( + info!( "{self}: Received a block validate response"; "block_hash" => block_info.block.header.block_hash(), "valid" => block_info.valid, @@ -1130,7 +1130,7 @@ impl Signer { match operation_result { OperationResult::Sign(signature) => { crate::monitoring::increment_operation_results("sign"); - debug!("{self}: Received signature result"); + info!("{self}: Received signature result"); self.process_signature(signature); } OperationResult::SignTaproot(_) => { From 5ee315224aa3ec1a5b64313eb986be7a99629f69 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 29 Jul 2024 11:29:57 -0500 Subject: [PATCH 0574/1400] chore: fix unit test, add integration test to github workflow --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 18 ++++++++++++-- .../src/chainstate/nakamoto/tests/mod.rs | 5 ++-- .../src/tests/nakamoto_integrations.rs | 24 +++++++++---------- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a5604efd7d..b1e81a7112 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -97,6 +97,7 @@ jobs: - tests::nakamoto_integrations::check_block_info - tests::nakamoto_integrations::check_block_info_rewards - tests::nakamoto_integrations::continue_tenure_extend + - tests::nakamoto_integrations::multiple_miners # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b65e9ff086..09794c4775 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -455,6 +455,8 @@ impl MaturedMinerPaymentSchedules { } } +/// Struct containing information about the miners assigned in the +/// .miners stackerdb config pub struct MinersDBInformation { signer_0_sortition: ConsensusHash, signer_1_sortition: ConsensusHash, @@ -462,6 +464,8 @@ pub struct MinersDBInformation { } impl MinersDBInformation { + /// What index in the `.miners` stackerdb is the miner who won + /// `sortition`? pub fn get_signer_index(&self, sortition: &ConsensusHash) -> Option { if sortition == &self.signer_0_sortition { Some(0) @@ -472,6 +476,12 @@ impl MinersDBInformation { } } + /// Get all of the sortitions whose winners are included in .miners + pub fn get_sortitions(&self) -> [&ConsensusHash; 2] { + [&self.signer_0_sortition, &self.signer_1_sortition] + } + + /// Get the index of the latest sortition winner in .miners pub fn get_latest_winner_index(&self) -> u16 { self.latest_winner } @@ -4160,14 +4170,18 @@ impl NakamotoChainState { .map(usize::from) else { warn!("Miner is not in the miners StackerDB config"; - "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); + "stackerdb_slots" => ?stackerdb_config.signers, + "queried_sortition" => %election_sortition, + "sortition_hashes" => ?miners_info.get_sortitions()); return Ok(None); }; let mut signer_ranges = stackerdb_config.signer_ranges(); if signer_ix >= signer_ranges.len() { // should be unreachable, but always good to be careful warn!("Miner is not in the miners StackerDB config"; - "stackerdb_slots" => format!("{:?}", &stackerdb_config.signers)); + "stackerdb_slots" => ?stackerdb_config.signers, + "queried_sortition" => %election_sortition, + "sortition_hashes" => ?miners_info.get_sortitions()); return Ok(None); } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index ef38ec76c6..722cfa541a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2080,9 +2080,8 @@ fn test_make_miners_stackerdb_config() { let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let miner_privkey = &miner_keys[i]; let miner_pubkey = StacksPublicKey::from_private(miner_privkey); - let slot_id = - NakamotoChainState::get_miner_slot(&sort_db, &tip, &block.header.consensus_hash) - .expect("Failed to get miner slot"); + let slot_id = NakamotoChainState::get_miner_slot(&sort_db, &tip, &tip.consensus_hash) + .expect("Failed to get miner slot"); if sortition { let slot_id = slot_id.expect("No miner slot exists for this miner").start; let slot_version = stackerdbs diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ba80f64c6c..9f38e23cfc 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1571,14 +1571,15 @@ fn mine_multiple_per_tenure_integration() { #[test] #[ignore] -/// This test spins up a nakamoto-neon node. +/// This test spins up two nakamoto nodes, both configured to mine. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches /// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop /// struct handles the epoch-2/3 tear-down and spin-up. /// This test makes three assertions: -/// * 5 tenures are mined after 3.0 starts -/// * Each tenure has 10 blocks (the coinbase block and 9 interim blocks) -fn multiple_nodes() { +/// * 15 tenures are mined after 3.0 starts +/// * Each tenure has 6 blocks (the coinbase block and 5 interim blocks) +/// * Both nodes see the same chainstate at the end of the test +fn multiple_miners() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1683,7 +1684,7 @@ fn multiple_nodes() { let coord_channel = run_loop.coordinator_channels(); let coord_channel_2 = run_loop_2.coordinator_channels(); - let run_loop_2_thread = thread::Builder::new() + let _run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); @@ -1733,11 +1734,7 @@ fn multiple_nodes() { info!("Neighbors 2"; "neighbors" => ?get_neighbors(&conf_node_2)); // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { @@ -1799,8 +1796,10 @@ fn multiple_nodes() { "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), ); - info!("Peer 1 information"; "chain_info" => ?get_chain_info(&naka_conf).stacks_tip_height); - info!("Peer 2 information"; "chain_info" => ?get_chain_info(&conf_node_2).stacks_tip_height); + let peer_1_height = get_chain_info(&naka_conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height); + assert_eq!(peer_1_height, peer_2_height); assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert_eq!( @@ -1821,7 +1820,6 @@ fn multiple_nodes() { run_loop_2_stopper.store(false, Ordering::SeqCst); run_loop_thread.join().unwrap(); - // run_loop_2_thread.join().unwrap(); } #[test] From 53855fecdd7f91c28199a47858571dd32fc18259 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 29 Jul 2024 12:52:51 -0500 Subject: [PATCH 0575/1400] chore: cleanup commented out code in stackerdb::sync --- stackslib/src/net/stackerdb/sync.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index f574efd5fb..53a1f67c46 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -1039,16 +1039,13 @@ impl StackerDBSync { } let chunk_push = self.chunk_push_priorities[cur_priority].0.clone(); + // try the first neighbor in the chunk_push_priorities list let selected_neighbor_opt = self.chunk_push_priorities[cur_priority] .1 - .iter() - .enumerate() - // .find(|(_i, naddr)| !self.comms.has_inflight(naddr)); - .find(|(_i, _naddr)| true); + .first() + .map(|neighbor| (0, neighbor)); - let (idx, selected_neighbor) = if let Some(x) = selected_neighbor_opt { - x - } else { + let Some((idx, selected_neighbor)) = selected_neighbor_opt else { debug!("{:?}: pushchunks_begin: no available neighbor to send StackerDBChunk(db={},id={},ver={}) to", &network.get_local_peer(), &self.smart_contract_id, From ebb23943e6436a47432da2a44564d66a8b1ac2ab Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:08:25 -0700 Subject: [PATCH 0576/1400] Update to print full version string --- stacks-signer/Cargo.toml | 1 + stacks-signer/src/cli.rs | 26 +++++++++++++++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 1d1af6da78..b4f77abca1 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -25,6 +25,7 @@ clarity = { path = "../clarity" } clap = { version = "4.1.1", features = ["derive", "env"] } hashbrown = { workspace = true } lazy_static = "1.4.0" +once_cell = "1.8.0" libsigner = { path = "../libsigner" } libstackerdb = { path = "../libstackerdb" } prometheus = { version = "0.9", optional = true } diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index ecb1c24741..921cb8cc32 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -37,12 +37,36 @@ use stacks_common::address::{ }; use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; +use once_cell::sync::Lazy; extern crate alloc; +const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); +const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); +#[cfg(debug_assertions)] +const BUILD_TYPE: &'static str = "debug"; +#[cfg(not(debug_assertions))] +const BUILD_TYPE: &'static str = "release"; + + +static VERSION_STRING: Lazy = Lazy::new(|| { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + let git_branch = GIT_BRANCH.unwrap_or(""); + let git_commit = GIT_COMMIT.unwrap_or(""); + format!( + "{} ({}:{}, {} build, {} [{}])", + pkg_version, + git_branch, + git_commit, + BUILD_TYPE, + std::env::consts::OS, + std::env::consts::ARCH + ) +}); + #[derive(Parser, Debug)] #[command(author, version, about)] -#[command(long_version = option_env!("SIGNER_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")))] +#[command(long_version = VERSION_STRING.as_str())] /// The CLI arguments for the stacks signer pub struct Cli { From 2cc9ff3cb98a04aa4af72c0a0bc8ecfa89eac0e8 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:14:04 -0700 Subject: [PATCH 0577/1400] fix formatting --- stacks-signer/src/cli.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 921cb8cc32..971e5ba8d7 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -29,6 +29,7 @@ use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; +use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, @@ -37,7 +38,6 @@ use stacks_common::address::{ }; use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; -use once_cell::sync::Lazy; extern crate alloc; @@ -48,11 +48,10 @@ const BUILD_TYPE: &'static str = "debug"; #[cfg(not(debug_assertions))] const BUILD_TYPE: &'static str = "release"; - static VERSION_STRING: Lazy = Lazy::new(|| { let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); let git_branch = GIT_BRANCH.unwrap_or(""); - let git_commit = GIT_COMMIT.unwrap_or(""); + let git_commit = GIT_COMMIT.unwrap_or(""); format!( "{} ({}:{}, {} build, {} [{}])", pkg_version, From 8b3ee439e12ea3acdc1027efb85577e297344e71 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 30 Jul 2024 01:06:50 +0300 Subject: [PATCH 0578/1400] move static from `once_cell` to `lazy_static` --- stacks-signer/Cargo.toml | 1 - stacks-signer/src/cli.rs | 32 +++++++++++++++++--------------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index b4f77abca1..1d1af6da78 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -25,7 +25,6 @@ clarity = { path = "../clarity" } clap = { version = "4.1.1", features = ["derive", "env"] } hashbrown = { workspace = true } lazy_static = "1.4.0" -once_cell = "1.8.0" libsigner = { path = "../libsigner" } libstackerdb = { path = "../libstackerdb" } prometheus = { version = "0.9", optional = true } diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 971e5ba8d7..d3e998e15c 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -29,7 +29,7 @@ use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; -use once_cell::sync::Lazy; +use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, @@ -48,20 +48,22 @@ const BUILD_TYPE: &'static str = "debug"; #[cfg(not(debug_assertions))] const BUILD_TYPE: &'static str = "release"; -static VERSION_STRING: Lazy = Lazy::new(|| { - let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); - let git_branch = GIT_BRANCH.unwrap_or(""); - let git_commit = GIT_COMMIT.unwrap_or(""); - format!( - "{} ({}:{}, {} build, {} [{}])", - pkg_version, - git_branch, - git_commit, - BUILD_TYPE, - std::env::consts::OS, - std::env::consts::ARCH - ) -}); +lazy_static! { + static ref VERSION_STRING: String = { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + let git_branch = GIT_BRANCH.unwrap_or(""); + let git_commit = GIT_COMMIT.unwrap_or(""); + format!( + "{} ({}:{}, {} build, {} [{}])", + pkg_version, + git_branch, + git_commit, + BUILD_TYPE, + std::env::consts::OS, + std::env::consts::ARCH + ) + }; +} #[derive(Parser, Debug)] #[command(author, version, about)] From c36af7316d16204265ba157ae0ed201a755f8ac6 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Jul 2024 19:22:26 -0400 Subject: [PATCH 0579/1400] WIP: integration test for mock signing Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 2 +- stacks-signer/src/v0/signer.rs | 19 +- .../src/tests/nakamoto_integrations.rs | 55 ++- testnet/stacks-node/src/tests/signer/mod.rs | 3 +- testnet/stacks-node/src/tests/signer/v0.rs | 370 +++++++++++++++++- 5 files changed, 427 insertions(+), 22 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 76dee99ded..d4311f8aa0 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -238,7 +238,7 @@ pub struct MockSignature { /// The signature across the stacks consensus hash signature: MessageSignature, /// The block hash that the signature is across - stacks_consensus_hash: ConsensusHash, + pub stacks_consensus_hash: ConsensusHash, } impl MockSignature { diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 0ab444b78f..cb3100674c 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -86,7 +86,7 @@ impl SignerTrait for Signer { sortition_state: &mut Option, event: Option<&SignerEvent>, _res: Sender>, - _current_reward_cycle: u64, + current_reward_cycle: u64, ) { let event_parity = match event { // Block proposal events do have reward cycles, but each proposal has its own cycle, @@ -155,7 +155,7 @@ impl SignerTrait for Signer { burn_header_hash, received_time, } => { - debug!("{self}: Receved a new burn block event for block height {burn_height}"); + debug!("{self}: Received a new burn block event for block height {burn_height}"); if let Err(e) = self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) @@ -168,9 +168,12 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { - // We are in epoch 25, so we should mock mine to prove we are still alive. - self.mock_mine(stacks_client); + if self.reward_cycle == current_reward_cycle { + if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { + // We are in epoch 2.5, so we should mock mine to prove we are still alive. + debug!("Mock signing for burn block {burn_height:?}"); + self.mock_sign(stacks_client); + } }; } } @@ -470,13 +473,15 @@ impl Signer { } /// Send a mock signature to stackerdb to prove we are still alive - fn mock_mine(&mut self, stacks_client: &StacksClient) { + fn mock_sign(&mut self, stacks_client: &StacksClient) { let Ok(peer_info) = stacks_client.get_peer_info() else { warn!("{self}: Failed to get peer info. Cannot mock mine."); return; }; + let consensus_hash = peer_info.stacks_tip_consensus_hash; + debug!("Mock signing using stacks tip {consensus_hash:?}"); let mock_signature = - MockSignature::new(peer_info.stacks_tip_consensus_hash, &self.private_key); + MockSignature::new(consensus_hash, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 492646a84d..92ee54ab61 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -110,7 +110,7 @@ use crate::tests::{ use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; -static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; +pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ @@ -166,13 +166,13 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: 201, - end_height: 231, + end_height: 251, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_5 }, StacksEpoch { epoch_id: StacksEpochId::Epoch30, - start_height: 231, + start_height: 251, end_height: STACKS_EPOCH_MAX, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 @@ -621,9 +621,9 @@ pub fn boot_to_epoch_3( let epochs = naka_conf.burnchain.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - + let current_height = btc_regtest_controller.get_headers_height(); info!( - "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -995,6 +995,47 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( info!("Bootstrapped to Epoch 3.0 reward set calculation boundary height: {epoch_3_reward_set_calculation_boundary}."); } +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +/// * `signer_pks` - must be the same size as `stacker_sks` +pub fn boot_to_epoch_25( + naka_conf: &Config, + blocks_processed: &Arc, + btc_regtest_controller: &mut BitcoinRegtestController, +) { + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_25 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + + let epoch_25_start_height = epoch_25.start_height; + assert!( + epoch_25_start_height > 0, + "Epoch 2.5 start height must be greater than 0" + ); + // stack enough to activate pox-4 + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + debug!("Test Cycle Info"; + "prepare_phase_len" => {prepare_phase_len}, + "reward_cycle_len" => {reward_cycle_len}, + "block_height" => {block_height}, + "reward_cycle" => {reward_cycle}, + "epoch_25_start_height" => {epoch_25_start_height}, + ); + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_25_start_height, + &naka_conf, + ); + info!("Bootstrapped to Epoch 2.5: {epoch_25_start_height}."); +} + /// /// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order /// for pox-4 to activate @@ -1517,9 +1558,9 @@ fn correct_burn_outs() { let epochs = naka_conf.burnchain.epochs.clone().unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; let epoch_25 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; - + let current_height = btc_regtest_controller.get_headers_height(); info!( - "Chain bootstrapped to bitcoin block 201, starting Epoch 2x miner"; + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 12584ab89a..bad0b499ea 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -604,7 +604,8 @@ fn setup_stx_btc_node( let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); info!("Bootstraping..."); - btc_regtest_controller.bootstrap_chain(201); + // Bootstrap the chain to BEFORE epoch 2.5 to enable mock mining of blocks in Epoch 2.5 tests + btc_regtest_controller.bootstrap_chain(195); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 75856593bb..cb0c73265f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -18,20 +18,27 @@ use std::time::{Duration, Instant}; use std::{env, thread}; use clarity::vm::types::PrincipalData; +use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use stacks::address::AddressHashMode; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; +use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; -use stacks::types::chainstate::{StacksAddress, StacksPrivateKey}; +use stacks::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; +use stacks::util_lib::signed_structured_data::pox4::{ + make_pox_4_signer_key_signature, Pox4SignatureTopic, +}; use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; @@ -44,14 +51,185 @@ use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; -use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; +use crate::tests::nakamoto_integrations::{ + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, POX_4_DEFAULT_STACKER_STX_AMT, +}; use crate::tests::neon_integrations::{ - get_chain_info, next_block_and_wait, submit_tx, test_observer, + get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, }; use crate::tests::{self, make_stacks_transfer}; use crate::{nakamoto_node, BurnchainController}; impl SignerTest { + /// Run the test until the first epoch 2.5 reward cycle. + /// Will activate pox-4 and register signers for the first full Epoch 2.5 reward cycle. + fn boot_to_epoch_25_reward_cycle(&mut self) { + boot_to_epoch_25( + &self.running_nodes.conf, + &self.running_nodes.blocks_processed, + &mut self.running_nodes.btc_regtest_controller, + ); + + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let lock_period = 12; + + let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); + let epoch_25 = + &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let epoch_25_start_height = epoch_25.start_height; + // stack enough to activate pox-4 + let block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let reward_cycle = self + .running_nodes + .btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + for stacker_sk in self.signer_stacks_private_keys.iter() { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &stacker_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + lock_period, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = StacksPublicKey::from_private(stacker_sk); + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(block_height as u128), + clarity::vm::Value::UInt(lock_period), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + + let reward_cycle_len = self + .running_nodes + .conf + .get_burnchain() + .pox_constants + .reward_cycle_length as u64; + let prepare_phase_len = self + .running_nodes + .conf + .get_burnchain() + .pox_constants + .prepare_length as u64; + + let epoch_25_reward_cycle_boundary = + epoch_25_start_height.saturating_sub(epoch_25_start_height % reward_cycle_len); + let epoch_25_reward_set_calculation_boundary = epoch_25_reward_cycle_boundary + .saturating_sub(prepare_phase_len) + .wrapping_add(reward_cycle_len) + .wrapping_add(1); + + let next_reward_cycle_boundary = + epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); + run_until_burnchain_height( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + epoch_25_reward_set_calculation_boundary, + &self.running_nodes.conf, + ); + debug!("Waiting for signer set calculation."); + let mut reward_set_calculated = false; + let short_timeout = Duration::from_secs(30); + let now = std::time::Instant::now(); + // Make sure the signer set is calculated before continuing or signers may not + // recognize that they are registered signers in the subsequent burn block event + let reward_cycle = self.get_current_reward_cycle().wrapping_add(1); + while !reward_set_calculated { + let reward_set = self + .stacks_client + .get_reward_set_signers(reward_cycle) + .expect("Failed to check if reward set is calculated"); + reward_set_calculated = reward_set.is_some(); + if reward_set_calculated { + debug!("Signer set: {:?}", reward_set.unwrap()); + } + std::thread::sleep(Duration::from_secs(1)); + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for reward set calculation" + ); + } + debug!("Signer set calculated"); + // Manually consume one more block to ensure signers refresh their state + debug!("Waiting for signers to initialize."); + next_block_and_wait( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + ); + let now = std::time::Instant::now(); + loop { + self.send_status_request(); + let states = self.wait_for_states(short_timeout); + if states + .iter() + .all(|state_info| state_info.runloop_state == State::RegisteredSigners) + { + break; + } + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for signers to be registered" + ); + std::thread::sleep(Duration::from_secs(1)); + } + debug!("Signers initialized"); + + info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); + run_until_burnchain_height( + &mut self.running_nodes.btc_regtest_controller, + &self.running_nodes.blocks_processed, + next_reward_cycle_boundary, + &self.running_nodes.conf, + ); + + info!("Ready to mine the first Epoch 2.5 reward cycle!"); + } + /// Run the test until the epoch 3 boundary fn boot_to_epoch_3(&mut self) { boot_to_epoch_3_reward_set( @@ -1087,13 +1265,193 @@ fn mock_mine_epoch_25() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], Some(Duration::from_secs(5)), ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - todo!("BOOT TO EPOCH 2.5 AND VERIFY WE RECEIVE A MOCK SIGNATURE PER SORTITION"); + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + + // Mine until epoch 3.0 and ensure that no more mock signatures are received + + let mut reward_cycle = signer_test.get_current_reward_cycle(); + let mut stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + let mut signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + // Give the peer info some time to update + let poll_time = Instant::now(); + while signer_test + .stacks_client + .get_peer_info() + .unwrap() + .burn_block_height + + 1 + < signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + { + std::thread::sleep(Duration::from_secs(1)); + assert!( + poll_time.elapsed() <= Duration::from_secs(15), + "Timed out waiting for peer info to update" + ); + } + // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition + let main_poll_time = Instant::now(); + while signer_test + .stacks_client + .get_peer_info() + .unwrap() + .burn_block_height + + 1 + < epoch_3_start_height + { + let old_consensus_hash = signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_consensus_hash; + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + let peer_poll_time = Instant::now(); + while signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_consensus_hash + == old_consensus_hash + { + std::thread::sleep(Duration::from_millis(100)); + assert!( + peer_poll_time.elapsed() < Duration::from_secs(5), + "Timed out waiting for peer info to update" + ); + } + let expected_consensus_hash = signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_consensus_hash; + let mut mock_signatures = vec![]; + let mock_poll_time = Instant::now(); + while mock_signatures.len() != num_signers { + std::thread::sleep(Duration::from_millis(100)); + let messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + for message in messages { + if let SignerMessage::MockSignature(mock_signature) = message { + debug!("MOCK SIGNATURE: {:?}", mock_signature); + if mock_signature.stacks_consensus_hash == expected_consensus_hash { + mock_signatures.push(mock_signature); + } + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock signatures within timeout" + ); + } + let current_reward_cycle = signer_test.get_current_reward_cycle(); + if current_reward_cycle != reward_cycle { + debug!("Rolling over reward cycle to {:?}", current_reward_cycle); + reward_cycle = current_reward_cycle; + stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + signer_slot_ids = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + } + assert!( + main_poll_time.elapsed() <= Duration::from_secs(45), + "Timed out waiting to advance epoch 3.0" + ); + } + + info!("------------------------- Test Processing Epoch 3.0 Tenure -------------------------"); + let old_messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + let old_signatures = old_messages + .iter() + .filter_map(|message| { + if let SignerMessage::MockSignature(mock_signature) = message { + Some(mock_signature) + } else { + None + } + }) + .collect::>(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + // Wait a bit to ensure no new mock signatures show up + std::thread::sleep(Duration::from_secs(5)); + let new_messages: Vec = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::MockSignature) + .expect("Failed to get BlockResponse stackerdb session"), + &signer_slot_ids, + ) + .expect("Failed to get message from stackerdb"); + let new_signatures = new_messages + .iter() + .filter_map(|message| { + if let SignerMessage::MockSignature(mock_signature) = message { + Some(mock_signature) + } else { + None + } + }) + .collect::>(); + assert_eq!(old_signatures, new_signatures); } From 397318c6a832a33e2631c2c3d132509a103d3a25 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Jul 2024 21:34:14 -0400 Subject: [PATCH 0580/1400] WIP: fix if check order in mock sign scenario Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index cb3100674c..71caf8bf51 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -168,8 +168,8 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - if self.reward_cycle == current_reward_cycle { - if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { + if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { + if self.reward_cycle == current_reward_cycle { // We are in epoch 2.5, so we should mock mine to prove we are still alive. debug!("Mock signing for burn block {burn_height:?}"); self.mock_sign(stacks_client); @@ -480,8 +480,7 @@ impl Signer { }; let consensus_hash = peer_info.stacks_tip_consensus_hash; debug!("Mock signing using stacks tip {consensus_hash:?}"); - let mock_signature = - MockSignature::new(consensus_hash, &self.private_key); + let mock_signature = MockSignature::new(consensus_hash, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb From df7e9e165718e103766d0c0d3515375356bc04cd Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 30 Jul 2024 17:35:11 +0200 Subject: [PATCH 0581/1400] fix: update max_tx_sizes for regtest ops --- .../burnchains/bitcoin_regtest_controller.rs | 20 ++++++++++++------- testnet/stacks-node/src/config.rs | 14 +++++++++---- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index d4aa528fbe..8e1fafcdd3 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -57,7 +57,11 @@ use stacks_common::util::sleep_ms; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; -use crate::config::BurnchainConfig; +use crate::config::{ + BurnchainConfig, OP_TX_BLOCK_COMMIT_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, + OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, + OP_TX_VOTE_AGG_ESTIM_SIZE, +}; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -950,7 +954,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_TRANSFER_STACKS_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( Transaction { @@ -1032,7 +1036,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_DELEGATE_STACKS_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( @@ -1110,7 +1114,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 230; + let max_tx_size = OP_TX_VOTE_AGG_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( @@ -1204,9 +1208,11 @@ impl BitcoinRegtestController { signer: &mut BurnchainOpSigner, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 280; + let max_tx_size = OP_TX_PRE_STACKS_ESTIM_SIZE; + + let max_tx_size_any_op = 380; + let output_amt = DUST_UTXO_LIMIT + max_tx_size_any_op * get_satoshis_per_byte(&self.config); - let output_amt = DUST_UTXO_LIMIT + max_tx_size * get_satoshis_per_byte(&self.config); let (mut tx, mut utxos) = self.prepare_tx(epoch_id, &public_key, output_amt, None, None, 0)?; @@ -1271,7 +1277,7 @@ impl BitcoinRegtestController { utxo_to_use: Option, ) -> Option { let public_key = signer.get_public_key(); - let max_tx_size = 250; + let max_tx_size = OP_TX_STACK_STX_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { ( diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f52c5d6ba9..2bfef69e90 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -49,10 +49,16 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use crate::chain_data::MinerStats; pub const DEFAULT_SATS_PER_VB: u64 = 50; +pub const OP_TX_LEADER_KEY_ESTIM_SIZE: u64 = 290; +pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 350; +pub const OP_TX_TRANSFER_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_DELEGATE_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_VOTE_AGG_ESTIM_SIZE: u64 = 230; +pub const OP_TX_PRE_STACKS_ESTIM_SIZE: u64 = 280; +pub const OP_TX_STACK_STX_ESTIM_SIZE: u64 = 250; + const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; -const LEADER_KEY_TX_ESTIM_SIZE: u64 = 290; -const BLOCK_COMMIT_TX_ESTIM_SIZE: u64 = 350; const INV_REWARD_CYCLES_TESTNET: u64 = 6; #[derive(Clone, Deserialize, Default, Debug)] @@ -1427,8 +1433,8 @@ impl BurnchainConfig { poll_time_secs: 10, // TODO: this is a testnet specific value. satoshis_per_byte: DEFAULT_SATS_PER_VB, max_rbf: DEFAULT_MAX_RBF_RATE, - leader_key_tx_estimated_size: LEADER_KEY_TX_ESTIM_SIZE, - block_commit_tx_estimated_size: BLOCK_COMMIT_TX_ESTIM_SIZE, + leader_key_tx_estimated_size: OP_TX_LEADER_KEY_ESTIM_SIZE, + block_commit_tx_estimated_size: OP_TX_BLOCK_COMMIT_ESTIM_SIZE, rbf_fee_increment: DEFAULT_RBF_FEE_RATE_INCREMENT, first_burn_block_height: None, first_burn_block_timestamp: None, From eb80166aa8e62af6f0f24f8395f24e0e8730723c Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 30 Jul 2024 17:47:24 +0200 Subject: [PATCH 0582/1400] refactor: update estimated tx size consts --- .../burnchains/bitcoin_regtest_controller.rs | 8 ++++---- testnet/stacks-node/src/config.rs | 18 ++++++++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 8e1fafcdd3..7de1d09dce 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -58,9 +58,9 @@ use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; use crate::config::{ - BurnchainConfig, OP_TX_BLOCK_COMMIT_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, - OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, - OP_TX_VOTE_AGG_ESTIM_SIZE, + BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_BLOCK_COMMIT_ESTIM_SIZE, + OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, + OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, }; /// The number of bitcoin blocks that can have @@ -1210,7 +1210,7 @@ impl BitcoinRegtestController { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_PRE_STACKS_ESTIM_SIZE; - let max_tx_size_any_op = 380; + let max_tx_size_any_op = OP_TX_ANY_ESTIM_SIZE; let output_amt = DUST_UTXO_LIMIT + max_tx_size_any_op * get_satoshis_per_byte(&self.config); let (mut tx, mut utxos) = diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 2bfef69e90..4eef0bbdd0 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -49,13 +49,23 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use crate::chain_data::MinerStats; pub const DEFAULT_SATS_PER_VB: u64 = 50; -pub const OP_TX_LEADER_KEY_ESTIM_SIZE: u64 = 290; -pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 350; -pub const OP_TX_TRANSFER_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_BLOCK_COMMIT_ESTIM_SIZE: u64 = 380; pub const OP_TX_DELEGATE_STACKS_ESTIM_SIZE: u64 = 230; -pub const OP_TX_VOTE_AGG_ESTIM_SIZE: u64 = 230; +pub const OP_TX_LEADER_KEY_ESTIM_SIZE: u64 = 290; pub const OP_TX_PRE_STACKS_ESTIM_SIZE: u64 = 280; pub const OP_TX_STACK_STX_ESTIM_SIZE: u64 = 250; +pub const OP_TX_TRANSFER_STACKS_ESTIM_SIZE: u64 = 230; +pub const OP_TX_VOTE_AGG_ESTIM_SIZE: u64 = 230; + +pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( + OP_TX_BLOCK_COMMIT_ESTIM_SIZE, + OP_TX_DELEGATE_STACKS_ESTIM_SIZE, + OP_TX_LEADER_KEY_ESTIM_SIZE, + OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, + OP_TX_TRANSFER_STACKS_ESTIM_SIZE, + OP_TX_VOTE_AGG_ESTIM_SIZE +); const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; From 0468efd7d63ac7e72af95e4ff88196bf6c4b2692 Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 30 Jul 2024 17:51:43 +0200 Subject: [PATCH 0583/1400] chore: remove unused import --- .../src/burnchains/bitcoin_regtest_controller.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 7de1d09dce..b09a71e5cf 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -58,9 +58,9 @@ use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; use crate::config::{ - BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_BLOCK_COMMIT_ESTIM_SIZE, - OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, - OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, + BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, + OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, + OP_TX_VOTE_AGG_ESTIM_SIZE, }; /// The number of bitcoin blocks that can have From fda2e863af93ec3bbb65ecae7b067d41875bda1d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 29 Jul 2024 15:56:20 -0500 Subject: [PATCH 0584/1400] test: multi-miner & multi-signer scenario * assert that both stacks-nodes have same chain height, and that they produced blocks in each bitcoin block of nakamoto * signers are distributed as event observers across 2 stacks-nodes --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/mod.rs | 116 +++++++++++++---- testnet/stacks-node/src/tests/signer/v0.rs | 137 +++++++++++++++++++- 3 files changed, 224 insertions(+), 30 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index b1e81a7112..2df168ee16 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -90,6 +90,7 @@ jobs: - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid - tests::signer::v0::bitcoind_forking_test + - tests::signer::v0::multiple_miners - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 12584ab89a..a481613864 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -44,7 +44,7 @@ use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::types::chainstate::StacksAddress; -use stacks::util::secp256k1::MessageSignature; +use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; @@ -105,14 +105,26 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, wait_on_signers: Option, ) -> Self { - Self::new_with_config_modifications(num_signers, initial_balances, wait_on_signers, |_| {}) + Self::new_with_config_modifications( + num_signers, + initial_balances, + wait_on_signers, + |_| {}, + |_| {}, + &[], + ) } - fn new_with_config_modifications ()>( + fn new_with_config_modifications< + F: FnMut(&mut SignerConfig) -> (), + G: FnMut(&mut NeonConfig) -> (), + >( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, wait_on_signers: Option, - modifier: F, + mut signer_config_modifier: F, + node_config_modifier: G, + btc_miner_pubkeys: &[Secp256k1PublicKey], ) -> Self { // Generate Signer Data let signer_stacks_private_keys = (0..num_signers) @@ -136,11 +148,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = build_signer_config_tomls( &signer_stacks_private_keys, &naka_conf.node.rpc_bind, Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. @@ -151,23 +162,45 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = (0..num_signers) - .into_iter() - .map(|i| { - info!("spawning signer"); - let mut signer_config = - SignerConfig::load_from_str(&signer_configs[i as usize]).unwrap(); - modifier(&mut signer_config); - SpawnedSigner::new(signer_config) - }) + ) + .into_iter() + .map(|toml| { + let mut signer_config = SignerConfig::load_from_str(&toml).unwrap(); + signer_config_modifier(&mut signer_config); + signer_config + }) + .collect(); + assert_eq!(signer_configs.len(), num_signers); + + let spawned_signers = signer_configs + .iter() + .cloned() + .map(SpawnedSigner::new) .collect(); // Setup the nodes and deploy the contract to it - let node = setup_stx_btc_node(naka_conf, &signer_stacks_private_keys, &signer_configs); - let config = SignerConfig::load_from_str(&signer_configs[0]).unwrap(); - let stacks_client = StacksClient::from(&config); + let btc_miner_pubkeys = if btc_miner_pubkeys.is_empty() { + let pk = Secp256k1PublicKey::from_hex( + naka_conf + .burnchain + .local_mining_public_key + .as_ref() + .unwrap(), + ) + .unwrap(); + &[pk] + } else { + btc_miner_pubkeys + }; + let node = setup_stx_btc_node( + naka_conf, + &signer_stacks_private_keys, + &signer_configs, + btc_miner_pubkeys, + node_config_modifier, + ); + let config = signer_configs.first().unwrap(); + let stacks_client = StacksClient::from(config); Self { running_nodes: node, @@ -294,6 +327,33 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()>( mut naka_conf: NeonConfig, signer_stacks_private_keys: &[StacksPrivateKey], - signer_config_tomls: &[String], + signer_configs: &[SignerConfig], + btc_miner_pubkeys: &[Secp256k1PublicKey], + mut node_config_modifier: G, ) -> RunningNodes { // Spawn the endpoints for observing signers - for toml in signer_config_tomls { - let signer_config = SignerConfig::load_from_str(toml).unwrap(); - + for signer_config in signer_configs { naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("{}", signer_config.endpoint), + endpoint: signer_config.endpoint.to_string(), events_keys: vec![ EventKeyType::StackerDBChunks, EventKeyType::BlockProposal, @@ -593,6 +653,8 @@ fn setup_stx_btc_node( } } } + node_config_modifier(&mut naka_conf); + info!("Make new BitcoinCoreController"); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -604,7 +666,7 @@ fn setup_stx_btc_node( let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); info!("Bootstraping..."); - btc_regtest_controller.bootstrap_chain(201); + btc_regtest_controller.bootstrap_chain_to_pks(201, btc_miner_pubkeys); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ef66eec52..9457a3d6b6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,6 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::str::FromStr; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -28,7 +29,7 @@ use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; -use stacks::types::chainstate::{StacksAddress, StacksPrivateKey}; +use stacks::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; @@ -44,6 +45,7 @@ use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; +use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, submit_tx, test_observer, @@ -611,6 +613,8 @@ fn forked_tenure_testing( // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; }, + |_| {}, + &[], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -799,11 +803,10 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], Some(Duration::from_secs(15)), - |_config| {}, ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -936,6 +939,134 @@ fn bitcoind_forking_test() { signer_test.shutdown(); } +#[test] +#[ignore] +fn multiple_miners() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let nakamoto_tenures = 20; + for _i in 0..nakamoto_tenures { + let _mined_block = signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + } + + info!( + "New chain info: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + nakamoto_tenures); + + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behavior at the end of a tenure. Specifically: From 568ada6ea870b31ea45f7f5913a763a1b18cfa80 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 30 Jul 2024 14:15:48 -0400 Subject: [PATCH 0585/1400] Use SIP-18 structured data for mock signature, add peer info to mock signature struct, and update integration test to use burn block height Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + libsigner/src/v0/messages.rs | 194 ++++++++++++++++++--- stacks-signer/src/v0/signer.rs | 28 ++- testnet/stacks-node/src/tests/signer/v0.rs | 120 +++++-------- 4 files changed, 230 insertions(+), 113 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index b1e81a7112..d301e227ce 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -90,6 +90,7 @@ jobs: - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid - tests::signer::v0::bitcoind_forking_test + - tests::signer::v0::mock_sign_epoch_25 - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index d4311f8aa0..f16dd6d4ed 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -34,16 +34,24 @@ use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; use blockstack_lib::chainstate::stacks::StacksTransaction; +use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; use blockstack_lib::util_lib::boot::boot_code_id; -use clarity::types::chainstate::{ConsensusHash, StacksPrivateKey, StacksPublicKey}; +use blockstack_lib::util_lib::signed_structured_data::{ + make_structured_data_domain, structured_data_message_hash, +}; +use clarity::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksPrivateKey, StacksPublicKey, +}; use clarity::types::PrivateKey; +use clarity::util::hash::Sha256Sum; use clarity::util::retry::BoundReader; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::serialization::SerializationError; -use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; +use clarity::vm::Value; use hashbrown::{HashMap, HashSet}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha512_256}; @@ -231,25 +239,103 @@ pub trait StacksMessageCodecExtensions: Sized { fn inner_consensus_deserialize(fd: &mut R) -> Result; } -/// A signer's mock signature across its last seen Stacks Consensus Hash. This is only used -/// by Epoch 2.5 signers to simulate the signing of a block for every sortition. +/// A snapshot of the signer view of the stacks node to be used for mock signing. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignData { + /// The stacks tip consensus hash at the time of the mock signature + pub stacks_tip_consensus_hash: ConsensusHash, + /// The stacks tip header hash at the time of the mock signature + pub stacks_tip: BlockHeaderHash, + /// The server version + pub server_version: String, + /// The burn block height that triggered the mock signature + pub burn_block_height: u64, + /// The burn block height of the peer view at the time of the mock signature. Note + /// that this may be different from the burn_block_height if the peer view is stale. + pub peer_burn_block_height: u64, + /// The POX consensus hash at the time of the mock signature + pub pox_consensus: ConsensusHash, + /// The chain id for the mock signature + pub chain_id: u32, +} + +impl MockSignData { + fn new(peer_view: RPCPeerInfoData, burn_block_height: u64, chain_id: u32) -> Self { + Self { + stacks_tip_consensus_hash: peer_view.stacks_tip_consensus_hash, + stacks_tip: peer_view.stacks_tip, + server_version: peer_view.server_version, + burn_block_height, + peer_burn_block_height: peer_view.burn_block_height, + pox_consensus: peer_view.pox_consensus, + chain_id, + } + } +} + +impl StacksMessageCodec for MockSignData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, self.stacks_tip_consensus_hash.as_bytes())?; + write_next(fd, &self.stacks_tip)?; + write_next(fd, &(self.server_version.as_bytes().len() as u8))?; + fd.write_all(self.server_version.as_bytes()) + .map_err(CodecError::WriteError)?; + write_next(fd, &self.burn_block_height)?; + write_next(fd, &self.peer_burn_block_height)?; + write_next(fd, &self.pox_consensus)?; + write_next(fd, &self.chain_id)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let stacks_tip_consensus_hash = read_next::(fd)?; + let stacks_tip = read_next::(fd)?; + let len_byte: u8 = read_next(fd)?; + let mut bytes = vec![0u8; len_byte as usize]; + fd.read_exact(&mut bytes).map_err(CodecError::ReadError)?; + // must encode a valid string + let server_version = String::from_utf8(bytes).map_err(|_e| { + CodecError::DeserializeError( + "Failed to parse server version name: could not contruct from utf8".to_string(), + ) + })?; + let burn_block_height = read_next::(fd)?; + let peer_burn_block_height = read_next::(fd)?; + let pox_consensus = read_next::(fd)?; + let chain_id = read_next::(fd)?; + Ok(Self { + stacks_tip_consensus_hash, + stacks_tip, + server_version, + burn_block_height, + peer_burn_block_height, + pox_consensus, + chain_id, + }) + } +} + +/// A mock signature for the stacks node to be used for mock signing. +/// This is only used by Epoch 2.5 signers to simulate the signing of a block for every sortition. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MockSignature { - /// The signature across the stacks consensus hash + /// The signature of the mock signature signature: MessageSignature, - /// The block hash that the signature is across - pub stacks_consensus_hash: ConsensusHash, + /// The data that was signed across + pub sign_data: MockSignData, } impl MockSignature { - /// Create a new mock signature with the provided stacks consensus hash and private key + /// Create a new mock sign data struct from the provided peer info, burn block height, chain id, and private key. pub fn new( - stacks_consensus_hash: ConsensusHash, + peer_view: RPCPeerInfoData, + burn_block_height: u64, + chain_id: u32, stacks_private_key: &StacksPrivateKey, ) -> Self { let mut sig = Self { signature: MessageSignature::empty(), - stacks_consensus_hash, + sign_data: MockSignData::new(peer_view, burn_block_height, chain_id), }; sig.sign(stacks_private_key) .expect("Failed to sign MockSignature"); @@ -257,16 +343,43 @@ impl MockSignature { } /// The signature hash for the mock signature - pub fn signature_hash(&self) -> Result { - let mut hasher = Sha512_256::new(); - let fd = &mut hasher; - write_next(fd, &self.stacks_consensus_hash)?; - Ok(Sha512Trunc256Sum::from_hasher(hasher)) + pub fn signature_hash(&self) -> Sha256Sum { + let domain_tuple = + make_structured_data_domain("mock-signer", "1.0.0", self.sign_data.chain_id); + let data_tuple = Value::Tuple( + TupleData::from_data(vec![ + ( + "stacks-tip-consensus-hash".into(), + Value::buff_from(self.sign_data.stacks_tip_consensus_hash.as_bytes().into()) + .unwrap(), + ), + ( + "stacks-tip".into(), + Value::buff_from(self.sign_data.stacks_tip.as_bytes().into()).unwrap(), + ), + ( + "server-version".into(), + Value::string_ascii_from_bytes(self.sign_data.server_version.clone().into()) + .unwrap(), + ), + ( + "burn-block-height".into(), + Value::UInt(self.sign_data.burn_block_height.into()), + ), + ( + "pox-consensus".into(), + Value::buff_from(self.sign_data.pox_consensus.as_bytes().into()).unwrap(), + ), + ]) + .expect("Error creating signature hash"), + ); + structured_data_message_hash(data_tuple, domain_tuple) } + /// Sign the mock signature and set the internal signature field fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { - let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; - self.signature = private_key.sign(&signature_hash.0)?; + let signature_hash = self.signature_hash(); + self.signature = private_key.sign(signature_hash.as_bytes())?; Ok(()) } /// Verify the mock signature against the provided public key @@ -274,7 +387,7 @@ impl MockSignature { if self.signature == MessageSignature::empty() { return Ok(false); } - let signature_hash = self.signature_hash().map_err(|e| e.to_string())?; + let signature_hash = self.signature_hash(); public_key .verify(&signature_hash.0, &self.signature) .map_err(|e| e.to_string()) @@ -284,16 +397,16 @@ impl MockSignature { impl StacksMessageCodec for MockSignature { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.signature)?; - write_next(fd, &self.stacks_consensus_hash)?; + self.sign_data.consensus_serialize(fd)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let signature = read_next::(fd)?; - let stacks_consensus_hash = read_next::(fd)?; + let sign_data = read_next::(fd)?; Ok(Self { signature, - stacks_consensus_hash, + sign_data, }) } } @@ -591,6 +704,7 @@ mod test { TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; use blockstack_lib::util_lib::strings::StacksString; + use clarity::consts::CHAIN_ID_MAINNET; use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; use clarity::types::PrivateKey; use clarity::util::hash::MerkleTree; @@ -708,6 +822,27 @@ mod test { assert_eq!(signer_message, deserialized_signer_message); } + fn random_mock_sign_data() -> MockSignData { + let stacks_tip_consensus_byte: u8 = thread_rng().gen(); + let stacks_tip_byte: u8 = thread_rng().gen(); + let pox_consensus_byte: u8 = thread_rng().gen(); + let chain_byte: u8 = thread_rng().gen_range(0..=1); + let chain_id = if chain_byte == 1 { + CHAIN_ID_TESTNET + } else { + CHAIN_ID_MAINNET + }; + MockSignData { + stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), + stacks_tip: BlockHeaderHash([stacks_tip_byte; 32]), + server_version: "0.0.0".to_string(), + burn_block_height: thread_rng().next_u64(), + peer_burn_block_height: thread_rng().next_u64(), + pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + chain_id, + } + } + #[test] fn verify_sign_mock_signature() { let private_key = StacksPrivateKey::new(); @@ -716,11 +851,9 @@ mod test { let bad_private_key = StacksPrivateKey::new(); let bad_public_key = StacksPublicKey::from_private(&bad_private_key); - let byte: u8 = thread_rng().gen(); - let stacks_consensus_hash = ConsensusHash([byte; 20]); let mut mock_signature = MockSignature { signature: MessageSignature::empty(), - stacks_consensus_hash, + sign_data: random_mock_sign_data(), }; assert!(!mock_signature .verify(&public_key) @@ -740,15 +873,22 @@ mod test { #[test] fn serde_mock_signature() { - let byte: u8 = thread_rng().gen(); - let stacks_consensus_hash = ConsensusHash([byte; 20]); let mock_signature = MockSignature { signature: MessageSignature::empty(), - stacks_consensus_hash, + sign_data: random_mock_sign_data(), }; let serialized_signature = mock_signature.serialize_to_vec(); let deserialized_signature = read_next::(&mut &serialized_signature[..]) .expect("Failed to deserialize MockSignature"); assert_eq!(mock_signature, deserialized_signature); } + + #[test] + fn serde_sign_data() { + let sign_data = random_mock_sign_data(); + let serialized_data = sign_data.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) + .expect("Failed to deserialize MockSignData"); + assert_eq!(sign_data, deserialized_data); + } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 71caf8bf51..8a0d4772e2 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -16,6 +16,7 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use clarity::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; @@ -171,8 +172,7 @@ impl SignerTrait for Signer { if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { if self.reward_cycle == current_reward_cycle { // We are in epoch 2.5, so we should mock mine to prove we are still alive. - debug!("Mock signing for burn block {burn_height:?}"); - self.mock_sign(stacks_client); + self.mock_sign(*burn_height, stacks_client); } }; } @@ -473,14 +473,26 @@ impl Signer { } /// Send a mock signature to stackerdb to prove we are still alive - fn mock_sign(&mut self, stacks_client: &StacksClient) { - let Ok(peer_info) = stacks_client.get_peer_info() else { - warn!("{self}: Failed to get peer info. Cannot mock mine."); + fn mock_sign(&mut self, burn_block_height: u64, stacks_client: &StacksClient) { + let Ok(peer_view) = stacks_client.get_peer_info() else { + warn!("{self}: Failed to get peer info. Cannot mock sign."); return; }; - let consensus_hash = peer_info.stacks_tip_consensus_hash; - debug!("Mock signing using stacks tip {consensus_hash:?}"); - let mock_signature = MockSignature::new(consensus_hash, &self.private_key); + let chain_id = if self.mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + }; + debug!("Mock signing for burn block {burn_block_height:?}"; + "stacks_tip_consensus_hash" => ?peer_view.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?peer_view.stacks_tip.clone(), + "peer_burn_block_height" => peer_view.burn_block_height, + "pox_consensus" => ?peer_view.pox_consensus.clone(), + "server_version" => peer_view.server_version.clone(), + "chain_id" => chain_id + ); + let mock_signature = + MockSignature::new(peer_view, burn_block_height, chain_id, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a8491c4638..410942f033 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -165,8 +165,9 @@ impl SignerTest { .wrapping_add(reward_cycle_len) .wrapping_add(1); - let next_reward_cycle_boundary = - epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); + let next_reward_cycle_boundary = epoch_25_reward_cycle_boundary + .wrapping_add(reward_cycle_len) + .saturating_sub(1); run_until_burnchain_height( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, @@ -228,7 +229,11 @@ impl SignerTest { &self.running_nodes.conf, ); - info!("Ready to mine the first Epoch 2.5 reward cycle!"); + let current_burn_block_height = self + .running_nodes + .btc_regtest_controller + .get_headers_height(); + info!("At burn block height {current_burn_block_height}. Ready to mine the first Epoch 2.5 reward cycle!"); } /// Run the test until the epoch 3 boundary @@ -1398,8 +1403,8 @@ fn retry_on_timeout() { #[test] #[ignore] -/// This test checks that the miner will retry when signature collection times out. -fn mock_mine_epoch_25() { +/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. +fn mock_sign_epoch_25() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1452,68 +1457,44 @@ fn mock_mine_epoch_25() { .map(|id| id.0) .collect(); assert_eq!(signer_slot_ids.len(), num_signers); - - // Give the peer info some time to update - let poll_time = Instant::now(); - while signer_test - .stacks_client - .get_peer_info() - .unwrap() - .burn_block_height - + 1 - < signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height() - { - std::thread::sleep(Duration::from_secs(1)); - assert!( - poll_time.elapsed() <= Duration::from_secs(15), - "Timed out waiting for peer info to update" - ); - } // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition let main_poll_time = Instant::now(); - while signer_test - .stacks_client - .get_peer_info() - .unwrap() - .burn_block_height - + 1 - < epoch_3_start_height - { - let old_consensus_hash = signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_consensus_hash; + let mut current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + while current_burn_block_height + 1 < epoch_3_start_height { + current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let current_reward_cycle = signer_test.get_current_reward_cycle(); + if current_reward_cycle != reward_cycle { + debug!("Rolling over reward cycle to {:?}", current_reward_cycle); + reward_cycle = current_reward_cycle; + stackerdb = StackerDB::new( + &signer_test.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + reward_cycle, + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + signer_slot_ids = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + } next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || Ok(true), ) .unwrap(); - let peer_poll_time = Instant::now(); - while signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_consensus_hash - == old_consensus_hash - { - std::thread::sleep(Duration::from_millis(100)); - assert!( - peer_poll_time.elapsed() < Duration::from_secs(5), - "Timed out waiting for peer info to update" - ); - } - let expected_consensus_hash = signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_consensus_hash; let mut mock_signatures = vec![]; let mock_poll_time = Instant::now(); + debug!("Waiting for mock signatures for burn block height {current_burn_block_height}"); while mock_signatures.len() != num_signers { std::thread::sleep(Duration::from_millis(100)); let messages: Vec = StackerDB::get_messages( @@ -1525,9 +1506,10 @@ fn mock_mine_epoch_25() { .expect("Failed to get message from stackerdb"); for message in messages { if let SignerMessage::MockSignature(mock_signature) = message { - debug!("MOCK SIGNATURE: {:?}", mock_signature); - if mock_signature.stacks_consensus_hash == expected_consensus_hash { - mock_signatures.push(mock_signature); + if mock_signature.sign_data.burn_block_height == current_burn_block_height { + if !mock_signatures.contains(&mock_signature) { + mock_signatures.push(mock_signature); + } } } } @@ -1536,24 +1518,6 @@ fn mock_mine_epoch_25() { "Failed to find mock signatures within timeout" ); } - let current_reward_cycle = signer_test.get_current_reward_cycle(); - if current_reward_cycle != reward_cycle { - debug!("Rolling over reward cycle to {:?}", current_reward_cycle); - reward_cycle = current_reward_cycle; - stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - signer_slot_ids = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); - } assert!( main_poll_time.elapsed() <= Duration::from_secs(45), "Timed out waiting to advance epoch 3.0" From 6acd51086a1f27585fdfa775d9ba832eb490da3f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:19:37 -0400 Subject: [PATCH 0586/1400] fix: test_debug --> debug --- stackslib/src/net/chat.rs | 151 ++++++++++++++++++-------------------- 1 file changed, 73 insertions(+), 78 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 1e1fa79f5c..8d8dc7ca5c 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -735,10 +735,9 @@ impl ConversationP2P { } }; if bhh != their_burn_header_hash { - test_debug!( + debug!( "Burn header hash mismatch in preamble: {} != {}", - bhh, - their_burn_header_hash + bhh, their_burn_header_hash ); return true; } @@ -764,18 +763,16 @@ impl ConversationP2P { if my_epoch <= remote_epoch { // remote node supports same epochs we do - test_debug!( + debug!( "Remote peer has epoch {}, which is newer than our epoch {}", - remote_epoch, - my_epoch + remote_epoch, my_epoch ); return true; } - test_debug!( + debug!( "Remote peer has old network version {} (epoch {})", - remote_peer_version, - remote_epoch + remote_peer_version, remote_epoch ); // what epoch are we in? @@ -786,10 +783,9 @@ impl ConversationP2P { if cur_epoch <= remote_epoch { // epoch shift hasn't happened yet, and this peer supports the current epoch - test_debug!( + debug!( "Remote peer has epoch {} and current epoch is {}, so still valid", - remote_epoch, - cur_epoch + remote_epoch, cur_epoch ); return true; } @@ -828,11 +824,9 @@ impl ConversationP2P { } if (msg.preamble.peer_version & 0xff000000) != (self.version & 0xff000000) { // major version mismatch - test_debug!( + debug!( "{:?}: Preamble invalid: wrong peer version: {:x} != {:x}", - &self, - msg.preamble.peer_version, - self.version + &self, msg.preamble.peer_version, self.version ); return Err(net_error::InvalidMessage); } @@ -1366,11 +1360,6 @@ impl ConversationP2P { }; if let Some(stackerdb_accept) = stackerdb_accept { - test_debug!( - "{} =?= {}", - &stackerdb_accept.rc_consensus_hash, - &burnchain_view.rc_consensus_hash - ); if stackerdb_accept.rc_consensus_hash == burnchain_view.rc_consensus_hash { // remote peer is in the same reward cycle as us. self.update_from_stacker_db_handshake_data(stackerdb_accept); @@ -1457,7 +1446,7 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_chat_neighbors { // never report neighbors if this is disabled by a test - test_debug!( + debug!( "{:?}: Neighbor crawl is disabled; reporting 0 neighbors", &local_peer ); @@ -1694,7 +1683,7 @@ impl ConversationP2P { if self.connection.options.disable_inv_chat { // never reply that we have blocks - test_debug!( + debug!( "{:?}: Disable inv chat -- pretend like we have nothing", network.get_local_peer() ); @@ -1759,11 +1748,9 @@ impl ConversationP2P { e })?; - test_debug!( + debug!( "Reply NakamotoInv for {} (rc {}): {:?}", - &get_nakamoto_inv.consensus_hash, - reward_cycle, - &nakamoto_inv + &get_nakamoto_inv.consensus_hash, reward_cycle, &nakamoto_inv ); Ok(StacksMessageType::NakamotoInv(nakamoto_inv)) @@ -1798,7 +1785,7 @@ impl ConversationP2P { if self.connection.options.disable_inv_chat { // never reply that we have blocks - test_debug!( + debug!( "{:?}: Disable inv chat -- pretend like we have nothing", network.get_local_peer() ); @@ -1837,10 +1824,9 @@ impl ConversationP2P { Ok(Some(sn)) => { if !sn.pox_valid { // invalid consensus hash - test_debug!( + debug!( "{:?}: Snapshot {:?} is not on a valid PoX fork", - local_peer, - sn.burn_header_hash + local_peer, sn.burn_header_hash ); return Ok(StacksMessageType::Nack(NackData::new( NackErrorCodes::InvalidPoxFork, @@ -1852,7 +1838,7 @@ impl ConversationP2P { % (burnchain.pox_constants.reward_cycle_length as u64) != 1 { - test_debug!( + debug!( "{:?}: block height ({} - {}) % {} != 1", local_peer, sn.block_height, @@ -1896,10 +1882,9 @@ impl ConversationP2P { } } Ok(None) | Err(db_error::NotFoundError) => { - test_debug!( + debug!( "{:?}: snapshot for consensus hash {} not found", - local_peer, - getpoxinv.consensus_hash + local_peer, getpoxinv.consensus_hash ); Ok(StacksMessageType::Nack(NackData::new( NackErrorCodes::InvalidPoxFork, @@ -1999,9 +1984,29 @@ impl ConversationP2P { ) { Ok(Some(chunk)) => chunk, Ok(None) => { - // request for a stale chunk + // TODO: this is racey + if let Ok(Some(actual_version)) = + stacker_dbs.get_slot_version(&getchunk.contract_id, getchunk.slot_id) + { + // request for a stale chunk + debug!("{:?}: NACK StackerDBGetChunk; version mismatch for requested slot {}.{} for {}. Expected {}", local_peer, getchunk.slot_id, getchunk.slot_version, &getchunk.contract_id, actual_version); + if actual_version > getchunk.slot_version { + return Ok(StacksMessageType::Nack(NackData::new( + NackErrorCodes::StaleVersion, + ))); + } else { + return Ok(StacksMessageType::Nack(NackData::new( + NackErrorCodes::FutureVersion, + ))); + } + } + // if we hit a DB error, just treat it as if the DB doesn't exist + debug!( + "{:?}: NACK StackerDBGetChunk; unloadable slot {}.{} for {}", + local_peer, getchunk.slot_id, getchunk.slot_version, &getchunk.contract_id + ); return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::StaleVersion, + NackErrorCodes::NoSuchDB, ))); } Err(e) => { @@ -2435,7 +2440,7 @@ impl ConversationP2P { } } } - test_debug!("{:?}: received {} bytes", self, total_recved); + debug!("{:?}: received {} bytes", self, total_recved); Ok(total_recved) } @@ -2463,7 +2468,7 @@ impl ConversationP2P { } } } - test_debug!("{:?}: sent {} bytes", self, total_sent); + debug!("{:?}: sent {} bytes", self, total_sent); Ok(total_sent) } @@ -2554,12 +2559,12 @@ impl ConversationP2P { Ok(handshake_opt) } StacksMessageType::HandshakeAccept(ref data) => { - test_debug!("{:?}: Got HandshakeAccept", &self); + debug!("{:?}: Got HandshakeAccept", &self); self.handle_handshake_accept(network.get_chain_view(), &msg.preamble, data, None) .and_then(|_| Ok(None)) } StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { - test_debug!("{:?}: Got StackerDBHandshakeAccept", &self); + debug!("{:?}: Got StackerDBHandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2569,21 +2574,21 @@ impl ConversationP2P { .and_then(|_| Ok(None)) } StacksMessageType::Ping(_) => { - test_debug!("{:?}: Got Ping", &self); + debug!("{:?}: Got Ping", &self); // consume here if unsolicited consume = true; self.handle_ping(network.get_chain_view(), msg) } StacksMessageType::Pong(_) => { - test_debug!("{:?}: Got Pong", &self); + debug!("{:?}: Got Pong", &self); Ok(None) } StacksMessageType::NatPunchRequest(ref nonce) => { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!("{:?}: Got NatPunchRequest({})", &self, nonce); + debug!("{:?}: Got NatPunchRequest({})", &self, nonce); consume = true; let msg = self.handle_natpunch_request(network.get_chain_view(), *nonce); @@ -2593,11 +2598,11 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!("{:?}: Got NatPunchReply({})", &self, _m.nonce); + debug!("{:?}: Got NatPunchReply({})", &self, _m.nonce); Ok(None) } _ => { - test_debug!( + debug!( "{:?}: Got a data-plane message (type {})", &self, msg.payload.get_message_name() @@ -2626,14 +2631,14 @@ impl ConversationP2P { let reply_opt = match msg.payload { StacksMessageType::Handshake(_) => { monitoring::increment_msg_counter("p2p_unauthenticated_handshake".to_string()); - test_debug!("{:?}: Got unauthenticated Handshake", &self); + debug!("{:?}: Got unauthenticated Handshake", &self); let (reply_opt, handled) = self.handle_handshake(network, msg, false, ibd)?; consume = handled; Ok(reply_opt) } StacksMessageType::HandshakeAccept(ref data) => { if solicited { - test_debug!("{:?}: Got unauthenticated HandshakeAccept", &self); + debug!("{:?}: Got unauthenticated HandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2642,7 +2647,7 @@ impl ConversationP2P { ) .and_then(|_| Ok(None)) } else { - test_debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self); + debug!("{:?}: Unsolicited unauthenticated HandshakeAccept", &self); // don't update stats or state, and don't pass back consume = true; @@ -2651,7 +2656,7 @@ impl ConversationP2P { } StacksMessageType::StackerDBHandshakeAccept(ref data, ref db_data) => { if solicited { - test_debug!("{:?}: Got unauthenticated StackerDBHandshakeAccept", &self); + debug!("{:?}: Got unauthenticated StackerDBHandshakeAccept", &self); self.handle_handshake_accept( network.get_chain_view(), &msg.preamble, @@ -2660,7 +2665,7 @@ impl ConversationP2P { ) .and_then(|_| Ok(None)) } else { - test_debug!( + debug!( "{:?}: Unsolicited unauthenticated StackerDBHandshakeAccept", &self ); @@ -2671,14 +2676,14 @@ impl ConversationP2P { } } StacksMessageType::HandshakeReject => { - test_debug!("{:?}: Got unauthenticated HandshakeReject", &self); + debug!("{:?}: Got unauthenticated HandshakeReject", &self); // don't NACK this back just because we were rejected. // But, it's okay to forward this back (i.e. don't consume). Ok(None) } StacksMessageType::Nack(_) => { - test_debug!("{:?}: Got unauthenticated Nack", &self); + debug!("{:?}: Got unauthenticated Nack", &self); // don't NACK back. // But, it's okay to forward this back (i.e. don't consume). @@ -2688,10 +2693,9 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!( + debug!( "{:?}: Got unauthenticated NatPunchRequest({})", - &self, - *nonce + &self, *nonce ); consume = true; let msg = self.handle_natpunch_request(network.get_chain_view(), *nonce); @@ -2701,10 +2705,9 @@ impl ConversationP2P { if cfg!(test) && self.connection.options.disable_natpunch { return Err(net_error::InvalidMessage); } - test_debug!( + debug!( "{:?}: Got unauthenticated NatPunchReply({})", - &self, - _m.nonce + &self, _m.nonce ); // it's okay to forward this back (i.e. don't consume) @@ -2939,7 +2942,7 @@ impl ConversationP2P { ibd: bool, ) -> Result, net_error> { let num_inbound = self.connection.inbox_len(); - test_debug!("{:?}: {} messages pending", &self, num_inbound); + debug!("{:?}: {} messages pending", &self, num_inbound); let mut unsolicited = vec![]; for _ in 0..num_inbound { @@ -2972,7 +2975,7 @@ impl ConversationP2P { if let Some(mut reply) = reply_opt.take() { // handler generated a reply. // send back this message to the remote peer. - test_debug!( + debug!( "{:?}: Send control-plane reply type {}", &self, reply.payload.get_message_name() @@ -2988,11 +2991,9 @@ impl ConversationP2P { let _relayers = format!("{:?}", &msg.relayers); let _seq = msg.request_id(); - test_debug!( + debug!( "{:?}: Received message {}, relayed by {}", - &self, - &_msgtype, - &_relayers + &self, &_msgtype, &_relayers ); // Is there someone else waiting for this message? If so, pass it along. @@ -3004,33 +3005,27 @@ impl ConversationP2P { &self, _msgtype, _seq ); } else { - test_debug!( + debug!( "{:?}: Try handling message (type {} seq {})", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); if let Some(msg) = self.handle_data_message(network, sortdb, chainstate, msg)? { // this message was unsolicited - test_debug!( + debug!( "{:?}: Did not handle message (type {} seq {}); passing upstream", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); unsolicited.push(msg); } else { // expected and handled the message - test_debug!("{:?}: Handled message {} seq {}", &self, _msgtype, _seq); + debug!("{:?}: Handled message {} seq {}", &self, _msgtype, _seq); } } } else { // no one was waiting for this reply, so just drop it - test_debug!( + debug!( "{:?}: Fulfilled pending message request (type {} seq {})", - &self, - _msgtype, - _seq + &self, _msgtype, _seq ); } } From 5df7845da13436664df757168868f8efe7d4ce10 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:19:51 -0400 Subject: [PATCH 0587/1400] fix: dead/broken peers were mixed up --- stackslib/src/net/inv/epoch2x.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 62a5d02470..b3092d8f12 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -2629,7 +2629,7 @@ impl PeerNetwork { } // synchronize peer block inventories - let (done, throttled, dead_neighbors, broken_neighbors) = + let (done, throttled, broken_neighbors, dead_neighbors) = self.sync_inventories_epoch2x(sortdb, ibd); // disconnect and ban broken peers From afcfa00bfc9a5d8d7bc291add035d5f50bd000b8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:20:11 -0400 Subject: [PATCH 0588/1400] chore: new nack code for requesting a chunk from the future --- stackslib/src/net/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index e836bdfec2..da323be3e7 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1044,6 +1044,7 @@ pub mod NackErrorCodes { pub const NoSuchDB: u32 = 6; pub const StaleVersion: u32 = 7; pub const StaleView: u32 = 8; + pub const FutureVersion: u32 = 9; } #[derive(Debug, Clone, PartialEq)] From d74772ad0387c9c59c557ac899db4c591cd8b54b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:20:32 -0400 Subject: [PATCH 0589/1400] chore: log stackerdb contract and inventory --- stackslib/src/net/stackerdb/mod.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 847363b2e3..d310998a19 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -423,6 +423,8 @@ pub struct StackerDBSync { /// whether or not we should immediately re-fetch chunks because we learned about new chunks /// from our peers when they replied to our chunk-pushes with new inventory state need_resync: bool, + /// whether or not the fetched inventory was determined to be stale + stale_inv: bool, /// Track stale neighbors pub(crate) stale_neighbors: HashSet, /// How many attempted connections have been made in the last pass (gets reset) @@ -505,7 +507,9 @@ impl PeerNetwork { Err(e) => { debug!( "{:?}: failed to get chunk versions for {}: {:?}", - self.local_peer, contract_id, &e + self.get_local_peer(), + contract_id, + &e ); // most likely indicates that this DB doesn't exist @@ -514,6 +518,14 @@ impl PeerNetwork { }; let num_outbound_replicas = self.count_outbound_stackerdb_replicas(contract_id) as u32; + + debug!( + "{:?}: inventory for {} has {} outbound replicas; versions are {:?}", + self.get_local_peer(), + contract_id, + num_outbound_replicas, + &slot_versions + ); StacksMessageType::StackerDBChunkInv(StackerDBChunkInvData { slot_versions, num_outbound_replicas, From bc8311611f0c17a35d77fb3c0a3329675e19ae55 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:20:51 -0400 Subject: [PATCH 0590/1400] fix: don't disconnect a replica if it nacked us for asking for a stale version; instead, immediately re-sync --- stackslib/src/net/stackerdb/sync.rs | 45 ++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 53a1f67c46..8444ed5e55 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -71,6 +71,7 @@ impl StackerDBSync { total_pushed: 0, last_run_ts: 0, need_resync: false, + stale_inv: false, stale_neighbors: HashSet::new(), num_connections: 0, num_attempted_connections: 0, @@ -212,6 +213,7 @@ impl StackerDBSync { self.write_freq = config.write_freq; self.need_resync = false; + self.stale_inv = false; self.last_run_ts = get_epoch_time_secs(); self.state = StackerDBSyncState::ConnectBegin; @@ -256,7 +258,7 @@ impl StackerDBSync { .get_slot_write_timestamps(&self.smart_contract_id)?; if local_slot_versions.len() != local_write_timestamps.len() { - let msg = format!("Local slot versions ({}) out of sync with DB slot versions ({}); abandoning sync and trying again", local_slot_versions.len(), local_write_timestamps.len()); + let msg = format!("Local slot versions ({}) out of sync with DB slot versions ({}) for {}; abandoning sync and trying again", local_slot_versions.len(), local_write_timestamps.len(), &self.smart_contract_id); warn!("{}", &msg); return Err(net_error::Transient(msg)); } @@ -270,12 +272,13 @@ impl StackerDBSync { let write_ts = local_write_timestamps[i]; if write_ts + self.write_freq > now { debug!( - "{:?}: Chunk {} was written too frequently ({} + {} >= {}), so will not fetch chunk", + "{:?}: Chunk {} was written too frequently ({} + {} >= {}) in {}, so will not fetch chunk", network.get_local_peer(), i, write_ts, self.write_freq, - now + now, + &self.smart_contract_id, ); continue; } @@ -343,10 +346,11 @@ impl StackerDBSync { schedule.reverse(); debug!( - "{:?}: Will request up to {} chunks for {}", + "{:?}: Will request up to {} chunks for {}. Schedule: {:?}", network.get_local_peer(), &schedule.len(), &self.smart_contract_id, + &schedule ); Ok(schedule) } @@ -520,12 +524,13 @@ impl StackerDBSync { if *old_version < new_inv.slot_versions[old_slot_id] { // remote peer indicated that it has a newer version of this chunk. debug!( - "{:?}: peer {:?} has a newer version of slot {} ({} < {})", + "{:?}: peer {:?} has a newer version of slot {} ({} < {}) in {}", _network.get_local_peer(), &naddr, old_slot_id, old_version, - new_inv.slot_versions[old_slot_id] + new_inv.slot_versions[old_slot_id], + &self.smart_contract_id, ); resync = true; break; @@ -833,9 +838,10 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us with code {}", + "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us (on {}) with code {}", &network.get_local_peer(), &naddr, + &self.smart_contract_id, data.error_code ); self.connected_replicas.remove(&naddr); @@ -851,9 +857,10 @@ impl StackerDBSync { } }; debug!( - "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}", + "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}: {:?}", network.get_local_peer(), - &naddr + &naddr, + &chunk_inv_opt ); if let Some(chunk_inv) = chunk_inv_opt { @@ -969,14 +976,17 @@ impl StackerDBSync { StacksMessageType::StackerDBChunk(data) => data, StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunk with code {}", + "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunk (on {}) with code {}", network.get_local_peer(), &naddr, + &self.smart_contract_id, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView { self.stale_neighbors.insert(naddr); + } else if data.error_code == NackErrorCodes::StaleVersion { + // try again immediately, without throttling + self.stale_inv = true; } continue; } @@ -1079,7 +1089,6 @@ impl StackerDBSync { &selected_neighbor, &e ); - self.connected_replicas.remove(&selected_neighbor); continue; } @@ -1119,7 +1128,6 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView { self.stale_neighbors.insert(naddr); } @@ -1279,8 +1287,19 @@ impl StackerDBSync { } } StackerDBSyncState::Finished => { + let stale_inv = self.stale_inv; + let result = self.reset(Some(network), config); self.state = StackerDBSyncState::ConnectBegin; + + if stale_inv { + debug!( + "{:?}: immediately retry StackerDB sync on {} due to stale inventory", + network.get_local_peer(), + &self.smart_contract_id + ); + self.wakeup(); + } return Ok(Some(result)); } }; From fb16102413695cc124be2a0102da81cd9fcb5888 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:21:22 -0400 Subject: [PATCH 0591/1400] fix: remove very noisy debug message --- testnet/stacks-node/src/config.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f52c5d6ba9..bdf3bd4c3d 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2125,7 +2125,6 @@ impl NodeConfig { let contract_name = NakamotoSigners::make_signers_db_name(signer_set, message_id); let contract_id = boot_code_id(contract_name.as_str(), is_mainnet); if !self.stacker_dbs.contains(&contract_id) { - debug!("A miner/stacker must subscribe to the {contract_id} stacker db contract. Forcibly subscribing..."); self.stacker_dbs.push(contract_id); } } @@ -2135,7 +2134,6 @@ impl NodeConfig { pub fn add_miner_stackerdb(&mut self, is_mainnet: bool) { let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); if !self.stacker_dbs.contains(&miners_contract_id) { - debug!("A miner/stacker must subscribe to the {miners_contract_id} stacker db contract. Forcibly subscribing..."); self.stacker_dbs.push(miners_contract_id); } } From 3835a184772a76477ed64fa61a561171a35267ca Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:21:35 -0400 Subject: [PATCH 0592/1400] chore: log stackerdb events --- testnet/stacks-node/src/event_dispatcher.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 5a72e4ca0a..06bde17d40 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1277,6 +1277,11 @@ impl EventDispatcher { contract_id: QualifiedContractIdentifier, modified_slots: Vec, ) { + debug!( + "event_dispatcher: New StackerDB chunk events for {}: {:?}", + contract_id, modified_slots + ); + let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); let interested_receiver = STACKER_DB_CHANNEL.is_active(&contract_id); @@ -1294,7 +1299,7 @@ impl EventDispatcher { if let Some(channel) = interested_receiver { if let Err(send_err) = channel.send(event) { warn!( - "Failed to send StackerDB event to WSTS coordinator channel. Miner thread may have exited."; + "Failed to send StackerDB event to signer coordinator channel. Miner thread may have exited."; "err" => ?send_err ); } From 99d55364ae0281dfc509f9abb4548c743dd24878 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:21:44 -0400 Subject: [PATCH 0593/1400] feat: allow other threads to raise the relayer's initiative to commit --- testnet/stacks-node/src/globals.rs | 33 ++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index b0f338032a..675a747480 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -69,6 +69,9 @@ pub struct Globals { /// previously-selected best tips /// maps stacks height to tip candidate previous_best_tips: Arc>>, + /// Initiative flag. + /// Raised when the main loop should wake up and do something. + initiative: Arc>, } // Need to manually implement Clone, because [derive(Clone)] requires @@ -90,6 +93,7 @@ impl Clone for Globals { start_mining_height: self.start_mining_height.clone(), estimated_winning_probs: self.estimated_winning_probs.clone(), previous_best_tips: self.previous_best_tips.clone(), + initiative: self.initiative.clone(), } } } @@ -119,6 +123,7 @@ impl Globals { start_mining_height: Arc::new(Mutex::new(start_mining_height)), estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), previous_best_tips: Arc::new(Mutex::new(BTreeMap::new())), + initiative: Arc::new(Mutex::new(false)), } } @@ -428,4 +433,32 @@ impl Globals { } } } + + /// Raise the initiative flag + pub fn raise_initiative(&self) { + match self.initiative.lock() { + Ok(mut initiative) => { + *initiative = true; + } + Err(_e) => { + error!("FATAL: failed to lock initiative"); + panic!(); + } + } + } + + /// Clear the initiative flag and return its value + pub fn take_initiative(&self) -> bool { + match self.initiative.lock() { + Ok(mut initiative) => { + let ret = *initiative; + *initiative = false; + ret + } + Err(_e) => { + error!("FATAL: failed to lock initiative"); + panic!(); + } + } + } } From ef38358f50395ffcf22b6f2bec04a98c1ddf8f19 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:22:20 -0400 Subject: [PATCH 0594/1400] fix: new burnchain block means new initiative to commit --- testnet/stacks-node/src/nakamoto_node.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index c57d630a58..d9f44cc67b 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -269,7 +269,10 @@ impl StacksNode { snapshot.parent_burn_header_hash, snapshot.winning_stacks_block_hash, )) - .map_err(|_| Error::ChannelClosed) + .map_err(|_| Error::ChannelClosed)?; + + self.globals.raise_initiative(); + Ok(()) } /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp From d1374d9a698d2d41e92be1d1e95e5692461b1221 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:22:35 -0400 Subject: [PATCH 0595/1400] fix: poll for 1s (since that's the stackerdb minimum sync time, and thus its maximum time between steps), and raise relayer initiative when there's a new network result --- testnet/stacks-node/src/nakamoto_node/peer.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index dc060e06b6..1fd5325623 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -190,7 +190,7 @@ impl PeerThread { info!("`PeerNetwork::bind()` skipped, already bound"); } - let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); + let poll_timeout = cmp::min(1000, config.miner.first_attempt_time_ms / 2); PeerThread { config, @@ -347,7 +347,11 @@ impl PeerThread { } } } else { - debug!("P2P: Dispatched result to Relayer!"); + debug!( + "P2P: Dispatched result to Relayer! {} results remaining", + self.results_with_data.len() + ); + self.globals.raise_initiative(); } } From d37f2bc28bd7696fb8c69ba9c63db65b896ed605 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 30 Jul 2024 21:22:40 +0300 Subject: [PATCH 0596/1400] move to debug block proposal log --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b38467d454..a4716e4c80 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -251,7 +251,7 @@ impl Signer { block_proposal: &BlockProposal, miner_pubkey: &Secp256k1PublicKey, ) { - info!("{self}: Received a block proposal: {block_proposal:?}"); + debug!("{self}: Received a block proposal: {block_proposal:?}"); if block_proposal.reward_cycle != self.reward_cycle { // We are not signing for this reward cycle. Ignore the block. debug!( From a071614565324a3c1e928658b0d0584f785ab236 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:23:13 -0400 Subject: [PATCH 0597/1400] chore: process a new initiative based on either a timeout, or on another thread's prompting --- .../stacks-node/src/nakamoto_node/relayer.rs | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 12f7dbc9e9..f9edd3db1a 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -48,9 +48,9 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed, }; use stacks_common::types::StacksEpochId; -use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::Hash160; use stacks_common::util::vrf::VRFPublicKey; +use stacks_common::util::{get_epoch_time_ms, sleep_ms}; use super::miner::MinerReason; use super::{ @@ -1065,8 +1065,11 @@ impl RelayerThread { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); + while self.globals.keep_running() { - let directive = if Instant::now() >= self.next_initiative { + let raised_initiative = self.globals.take_initiative(); + let timed_out = Instant::now() >= self.next_initiative; + let directive = if raised_initiative || timed_out { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() @@ -1074,19 +1077,19 @@ impl RelayerThread { None }; - let Some(timeout) = self.next_initiative.checked_duration_since(Instant::now()) else { - // next_initiative timeout occurred, so go to next loop iteration. - continue; - }; - let directive = if let Some(directive) = directive { directive } else { - match relay_rcv.recv_timeout(timeout) { + match relay_rcv.recv_timeout(Duration::from_millis( + self.config.node.next_initiative_delay, + )) { Ok(directive) => directive, - // timed out, so go to next loop iteration - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, + Err(RecvTimeoutError::Timeout) => { + continue; + } + Err(RecvTimeoutError::Disconnected) => { + break; + } } }; From 1a02c613195584d2893539d41b70b3d7179d14ee Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 30 Jul 2024 14:23:34 -0400 Subject: [PATCH 0598/1400] chore: new burnchain block means new initiative --- testnet/stacks-node/src/run_loop/nakamoto.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 511b6c84b2..3ecd4f1e7d 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -711,6 +711,7 @@ impl RunLoop { sortition_db_height ); last_tenure_sortition_height = sortition_db_height; + globals.raise_initiative(); } } } From 9e9aa77752703acd330479b83a2daf662778541d Mon Sep 17 00:00:00 2001 From: janniks Date: Wed, 31 Jul 2024 00:13:29 +0200 Subject: [PATCH 0599/1400] fix: only force change output for block commits --- .../src/burnchains/bitcoin_regtest_controller.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index b09a71e5cf..39ef40490b 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -872,6 +872,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1009,6 +1010,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1092,6 +1094,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1166,6 +1169,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1244,6 +1248,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1331,6 +1336,7 @@ impl BitcoinRegtestController { get_satoshis_per_byte(&self.config), &mut utxos, signer, + false, )?; increment_btc_ops_sent_counter(); @@ -1421,6 +1427,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, + true, // only block commit op requires change output to exist )?; let serialized_tx = SerializedTx::new(tx.clone()); @@ -1691,6 +1698,7 @@ impl BitcoinRegtestController { fee_rate: u64, utxos_set: &mut UTXOSet, signer: &mut BurnchainOpSigner, + force_change_output: bool, ) -> Option<()> { // spend UTXOs in order by confirmations. Spend the least-confirmed UTXO first, and in the // event of a tie, spend the smallest-value UTXO first. @@ -1721,7 +1729,7 @@ impl BitcoinRegtestController { spent_in_outputs + min_tx_size * fee_rate + estimated_rbf, &mut utxos_cloned, signer, - true, + force_change_output, ); let serialized_tx = SerializedTx::new(tx_cloned); cmp::max(min_tx_size, serialized_tx.bytes.len() as u64) @@ -1738,7 +1746,7 @@ impl BitcoinRegtestController { spent_in_outputs + tx_size * fee_rate + rbf_fee, utxos_set, signer, - true, + force_change_output, ); signer.dispose(); Some(()) From 0a0c5a462e0eb2b448743e2d5570ffb925a488f4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 31 Jul 2024 00:43:34 -0400 Subject: [PATCH 0600/1400] chore: improve signer logging --- stacks-signer/src/v0/signer.rs | 5 ++++- stackslib/src/net/api/postblock_proposal.rs | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index a4716e4c80..77f49bf9f9 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -445,7 +445,10 @@ impl Signer { } }; // Submit a proposal response to the .signers contract for miners - debug!("{self}: Broadcasting a block response to stacks node: {response:?}"); + info!( + "{self}: Broadcasting a block response to stacks node: {response:?}"; + "signer_sighash" => %block_info.signer_signature_hash(), + ); match self .stackerdb .send_message_with_retry::(response.clone().into()) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 901720ee81..6c1d5526b5 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -512,6 +512,15 @@ impl RPCRequestHandler for RPCBlockProposalRequestHandler { .take() .ok_or(NetError::SendError("`block_proposal` not set".into()))?; + info!( + "Received block proposal request"; + "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "block_header_hash" => %block_proposal.block.header.block_hash(), + "height" => block_proposal.block.header.chain_length, + "tx_count" => block_proposal.block.txs.len(), + "parent_stacks_block_id" => %block_proposal.block.header.parent_block_id, + ); + let res = node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { if network.is_proposal_thread_running() { return Err(( From f505399d1c3df52471bf65bd627f7ada501fad8b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 31 Jul 2024 10:57:27 -0400 Subject: [PATCH 0601/1400] WIP: add initial test outline with a bunch of TODOs Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 115 +++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5ebb893717..fe6c4ff619 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -36,6 +36,7 @@ use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; +use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -1373,3 +1374,117 @@ fn empty_sortition() { } signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test asserts that signer set rollover works as expected. +/// Specifically, if a new set of signers are registered for an upcoming reward cycle, +/// old signers shut down operation and the new signers take over with the commencement of +/// the next reward cycle. +fn signer_set_rollover() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let new_num_signers = 5; + + let new_signer_private_keys: Vec<_> = (0..new_num_signers) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| tests::to_addr(sk)) + .collect(); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Boot with some initial signer set + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + let run_stamp = rand::random(); + + // Setup the new signers that will take over + let new_signer_configs = build_signer_config_tomls( + &new_signer_private_keys, + &signer_test.running_nodes.conf.node.rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "12345", + run_stamp, + 3000 + num_signers, + Some(100_000), + None, + Some(9000), + ); + + let new_spawned_signers: Vec<_> = (0..num_signers) + .into_iter() + .map(|i| { + info!("spawning signer"); + let mut signer_config = + SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); + SpawnedSigner::new(signer_config) + }) + .collect(); + + // TODO: may need to modify signer_test to not auto stack and delegate the way it does right now. I think it delegates for 12 reward cycles. and we should delegate only for one before transferring to the new signer set + + // TODO: Advance to the first reward cycle, stacking and delegating to the old signers beforehand + signer_test.boot_to_epoch_3(); + + // TODO: verify that the first reward cycle has the old signers in the reward set + let reward_cycle = signer_test.get_current_reward_cycle(); + let old_signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + // TODO: manually trigger a stacks transaction and verify that only OLD signer signatures are found in the signed block + + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + // submit a tx so that the miner will mine an extra block + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + let mined_block = signer_test.mine_nakamoto_block(short_timeout); + // TODO: verify the mined_block signatures against the OLD signer set (might need to update event to take vector of message signatures?) + + //TODO: advance to the next reward cycle, stacking and delegating to the new signers beforehand + let reward_cycle = signer_test.get_current_reward_cycle(); + let new_signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + + // submit a tx so that the miner will mine an extra block + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 1; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + let mined_block = signer_test.mine_nakamoto_block(short_timeout); + // TODO: verify the mined_block signatures against the NEW signer set + + signer_test.shutdown(); + // TODO: shutdown the new signers as well +} From 71cb039f850b832cde54851e0a879eb9902a965b Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 29 Jul 2024 10:58:59 -0400 Subject: [PATCH 0602/1400] test: Add integration test for mock mining --- .github/workflows/bitcoin-tests.yml | 1 + .../stacks-node/src/nakamoto_node/miner.rs | 34 ++- .../stacks-node/src/nakamoto_node/relayer.rs | 22 +- .../src/tests/nakamoto_integrations.rs | 280 ++++++++++++++++++ 4 files changed, 321 insertions(+), 16 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a5604efd7d..b90a06c209 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -97,6 +97,7 @@ jobs: - tests::nakamoto_integrations::check_block_info - tests::nakamoto_integrations::check_block_info_rewards - tests::nakamoto_integrations::continue_tenure_extend + - tests::nakamoto_integrations::mock_mining # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 70a6f7b3e3..06a6e37006 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -222,6 +222,32 @@ impl BlockMinerThread { // now, actually run this tenure loop { let new_block = loop { + if self.config.get_node_config(false).mock_mining { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = SortitionDB::open( + &burn_db_path, + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let burn_tip_changed = self.check_burn_tip_changed(&burn_db); + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + match burn_tip_changed + .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) + { + Ok(..) => {} + Err(NakamotoNodeError::ParentNotFound) => { + info!("Mock miner has not processed parent block yet, sleeping and trying again"); + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + continue; + } + Err(e) => { + warn!("Mock miner failed to load parent info: {e:?}"); + return Err(e); + } + } + } match self.mine_block(&stackerdbs) { Ok(x) => break Some(x), Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { @@ -401,6 +427,10 @@ impl BlockMinerThread { )); }; + if self.config.get_node_config(false).mock_mining { + return Ok((reward_set, Vec::new())); + } + let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let mut coordinator = SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( @@ -411,10 +441,6 @@ impl BlockMinerThread { }, )?; - if self.config.get_node_config(false).mock_mining { - return Ok((reward_set, Vec::new())); - } - *attempts += 1; let signature = coordinator.begin_sign_v0( new_block, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 0d8567a95d..be1ff54698 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -794,7 +794,7 @@ impl RelayerThread { fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { - error!("Relayer: Failed to stop tenure: {:?}", e); + error!("Relayer: Failed to stop tenure: {e:?}"); return Ok(()); } debug!("Relayer: successfully stopped tenure."); @@ -867,7 +867,7 @@ impl RelayerThread { debug!("Relayer: successfully started new tenure."); } Err(e) => { - error!("Relayer: Failed to start new tenure: {:?}", e); + error!("Relayer: Failed to start new tenure: {e:?}"); } } Ok(()) @@ -879,13 +879,11 @@ impl RelayerThread { burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> bool { - let miner_instruction = - match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) { - Ok(mi) => mi, - Err(_) => { - return false; - } - }; + let Ok(miner_instruction) = + self.process_sortition(consensus_hash, burn_hash, committed_index_hash) + else { + return false; + }; match miner_instruction { MinerDirective::BeginTenure { @@ -901,7 +899,7 @@ impl RelayerThread { debug!("Relayer: successfully started new tenure."); } Err(e) => { - error!("Relayer: Failed to start new tenure: {:?}", e); + error!("Relayer: Failed to start new tenure: {e:?}"); } }, MinerDirective::ContinueTenure { new_burn_view } => { @@ -910,7 +908,7 @@ impl RelayerThread { debug!("Relayer: successfully handled continue tenure."); } Err(e) => { - error!("Relayer: Failed to continue tenure: {:?}", e); + error!("Relayer: Failed to continue tenure: {e:?}"); return false; } } @@ -920,7 +918,7 @@ impl RelayerThread { debug!("Relayer: successfully stopped tenure."); } Err(e) => { - error!("Relayer: Failed to stop tenure: {:?}", e); + error!("Relayer: Failed to stop tenure: {e:?}"); } }, } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9b15b83afb..49000abc2c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6589,3 +6589,283 @@ fn check_block_info_rewards() { run_loop_thread.join().unwrap(); } + +/// Test Nakamoto mock miner by booting a follower node +#[test] +#[ignore] +fn mock_mining() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + // Wait one block to confirm the VRF register, wait until a block commit is submitted + next_block_and(&mut btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }) + .unwrap(); + + let mut follower_conf = naka_conf.clone(); + follower_conf.node.mock_mining = true; + follower_conf.events_observers.clear(); + follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + let mut rng = rand::thread_rng(); + let mut buf = [0u8; 8]; + rng.fill_bytes(&mut buf); + + let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + + let localhost = "127.0.0.1"; + follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); + follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); + follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + + let node_info = get_chain_info(&naka_conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + naka_conf.node.p2p_bind + ), + CHAIN_ID_TESTNET, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap(); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); + let follower_coord_channel = follower_run_loop.coordinator_channels(); + + let Counters { + naka_mined_blocks: follower_naka_mined_blocks, + .. + } = follower_run_loop.counters(); + + let mock_mining_blocks_start = follower_naka_mined_blocks.load(Ordering::SeqCst); + + debug!( + "Booting follower-thread ({},{})", + &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind + ); + debug!( + "Booting follower-thread: neighbors = {:?}", + &follower_conf.node.bootstrap_node + ); + + // spawn a follower thread + let follower_thread = thread::Builder::new() + .name("follower-thread".into()) + .spawn(move || follower_run_loop.start(None, 0)) + .unwrap(); + + debug!("Booted follower-thread"); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + let follower_naka_mined_blocks_before = follower_naka_mined_blocks.load(Ordering::SeqCst); + + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_tip_height = 0; + + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_ne!(info.stacks_tip, last_tip); + assert_ne!(info.stacks_tip_height, last_tip_height); + + last_tip = info.stacks_tip; + last_tip_height = info.stacks_tip_height; + } + + let mock_miner_timeout = Instant::now(); + while follower_naka_mined_blocks.load(Ordering::SeqCst) <= follower_naka_mined_blocks_before + { + if mock_miner_timeout.elapsed() >= Duration::from_secs(30) { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ); + } + thread::sleep(Duration::from_millis(100)); + } + + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + let expected_blocks_mined = (inter_blocks_per_tenure + 1) * tenure_count; + let expected_tip_height = block_height_pre_3_0 + expected_blocks_mined; + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, expected_tip_height, + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + // Check follower's mock miner + let mock_mining_blocks_end = follower_naka_mined_blocks.load(Ordering::SeqCst); + let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; + assert_eq!( + blocks_mock_mined, tenure_count, + "Should have mock mined `tenure_count` nakamoto blocks" + ); + + // wait for follower to reach the chain tip + loop { + sleep_ms(1000); + let follower_node_info = get_chain_info(&follower_conf); + + info!( + "Follower tip is now {}/{}", + &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip + ); + if follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash + && follower_node_info.stacks_tip == tip.anchored_header.block_hash() + { + break; + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + follower_coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); +} From c50dff82555df388279fc1accf87d4eef9269b02 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 31 Jul 2024 11:51:44 -0500 Subject: [PATCH 0603/1400] chore: use 231 for default epoch25/epoch30 transition in int tests, customize for mock_signing --- .../src/tests/nakamoto_integrations.rs | 4 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 19 +++++++++++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3ab7248db6..c2fd6245f2 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -167,13 +167,13 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: 201, - end_height: 251, + end_height: 231, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_5 }, StacksEpoch { epoch_id: StacksEpochId::Epoch30, - start_height: 251, + start_height: 231, end_height: STACKS_EPOCH_MAX, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 17c3fdf814..98e2d64b55 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,8 +13,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::str::FromStr; use std::ops::Add; +use std::str::FromStr; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -1566,6 +1566,8 @@ fn empty_sortition() { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; }, + |_| {}, + &[], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(20); @@ -1709,10 +1711,23 @@ fn mock_sign_epoch_25() { let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], Some(Duration::from_secs(5)), + |_| {}, + |node_config| { + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + }, + &[], ); let epochs = signer_test From 7f6e541cb9dce67ba0111c03dcdb950781ef9188 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 31 Jul 2024 14:24:39 -0400 Subject: [PATCH 0604/1400] Add some logs to mock sign checks Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index ad2a459b96..b645b46a73 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -169,12 +169,19 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - if let Ok(StacksEpochId::Epoch25) = stacks_client.get_node_epoch() { - if self.reward_cycle == current_reward_cycle { - // We are in epoch 2.5, so we should mock mine to prove we are still alive. - self.mock_sign(*burn_height, stacks_client); - } + let Ok(epoch) = stacks_client.get_node_epoch() else { + warn!("{self}: Failed to determine node epoch. Cannot mock sign."); + return; }; + debug!("{self}: Epoch 2.5 signer received a new burn block event."; + "burn_height" => burn_height, + "current_reward_cycle" => current_reward_cycle, + "epoch" => ?epoch + ); + if epoch == StacksEpochId::Epoch25 && self.reward_cycle == current_reward_cycle { + // We are in epoch 2.5, so we should mock mine to prove we are still alive. + self.mock_sign(*burn_height, stacks_client); + } } } } From 5085bdad6d8b5611783093f426e8f41f159f7c09 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 31 Jul 2024 15:19:59 -0400 Subject: [PATCH 0605/1400] Add error log when failing to determine node epoch in mock sign Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b645b46a73..574c4d8df9 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -169,9 +169,12 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - let Ok(epoch) = stacks_client.get_node_epoch() else { - warn!("{self}: Failed to determine node epoch. Cannot mock sign."); - return; + let epoch = match stacks_client.get_node_epoch() { + Ok(epoch) => epoch, + Err(e) => { + warn!("{self}: Failed to determine node epoch. Cannot mock sign: {e}"); + return; + } }; debug!("{self}: Epoch 2.5 signer received a new burn block event."; "burn_height" => burn_height, From bf30fa62e8257d8c702788761cd7c4ceefef02a9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 1 Aug 2024 08:24:59 -0400 Subject: [PATCH 0606/1400] Deserialize only the necessary info from peer info to be more resilient to peer info updates in the signer Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 161 ++++++++++++++------- stacks-signer/src/cli.rs | 4 +- stacks-signer/src/client/stacks_client.rs | 19 ++- stacks-signer/src/v0/signer.rs | 14 +- testnet/stacks-node/src/tests/signer/v0.rs | 3 +- 5 files changed, 131 insertions(+), 70 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index f16dd6d4ed..7d411f89b5 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -239,57 +239,41 @@ pub trait StacksMessageCodecExtensions: Sized { fn inner_consensus_deserialize(fd: &mut R) -> Result; } -/// A snapshot of the signer view of the stacks node to be used for mock signing. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MockSignData { - /// The stacks tip consensus hash at the time of the mock signature +/// The signer relevant peer information from the stacks node +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct PeerInfo { + /// The burn block height + pub burn_block_height: u64, + /// The consensus hash of the stacks tip pub stacks_tip_consensus_hash: ConsensusHash, - /// The stacks tip header hash at the time of the mock signature + /// The stacks tip pub stacks_tip: BlockHeaderHash, + /// The stacks tip height + pub stacks_tip_height: u64, + /// The pox consensus + pub pox_consensus: ConsensusHash, /// The server version pub server_version: String, - /// The burn block height that triggered the mock signature - pub burn_block_height: u64, - /// The burn block height of the peer view at the time of the mock signature. Note - /// that this may be different from the burn_block_height if the peer view is stale. - pub peer_burn_block_height: u64, - /// The POX consensus hash at the time of the mock signature - pub pox_consensus: ConsensusHash, - /// The chain id for the mock signature - pub chain_id: u32, -} - -impl MockSignData { - fn new(peer_view: RPCPeerInfoData, burn_block_height: u64, chain_id: u32) -> Self { - Self { - stacks_tip_consensus_hash: peer_view.stacks_tip_consensus_hash, - stacks_tip: peer_view.stacks_tip, - server_version: peer_view.server_version, - burn_block_height, - peer_burn_block_height: peer_view.burn_block_height, - pox_consensus: peer_view.pox_consensus, - chain_id, - } - } } -impl StacksMessageCodec for MockSignData { +impl StacksMessageCodec for PeerInfo { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.burn_block_height)?; write_next(fd, self.stacks_tip_consensus_hash.as_bytes())?; write_next(fd, &self.stacks_tip)?; + write_next(fd, &self.stacks_tip_height)?; write_next(fd, &(self.server_version.as_bytes().len() as u8))?; fd.write_all(self.server_version.as_bytes()) .map_err(CodecError::WriteError)?; - write_next(fd, &self.burn_block_height)?; - write_next(fd, &self.peer_burn_block_height)?; write_next(fd, &self.pox_consensus)?; - write_next(fd, &self.chain_id)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { + let burn_block_height = read_next::(fd)?; let stacks_tip_consensus_hash = read_next::(fd)?; let stacks_tip = read_next::(fd)?; + let stacks_tip_height = read_next::(fd)?; let len_byte: u8 = read_next(fd)?; let mut bytes = vec![0u8; len_byte as usize]; fd.read_exact(&mut bytes).map_err(CodecError::ReadError)?; @@ -299,17 +283,44 @@ impl StacksMessageCodec for MockSignData { "Failed to parse server version name: could not contruct from utf8".to_string(), ) })?; - let burn_block_height = read_next::(fd)?; - let peer_burn_block_height = read_next::(fd)?; let pox_consensus = read_next::(fd)?; - let chain_id = read_next::(fd)?; Ok(Self { + burn_block_height, stacks_tip_consensus_hash, stacks_tip, + stacks_tip_height, server_version, - burn_block_height, - peer_burn_block_height, pox_consensus, + }) + } +} + +/// A snapshot of the signer view of the stacks node to be used for mock signing. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignData { + /// The view of the stacks node peer information at the time of the mock signature + pub peer_info: PeerInfo, + /// The burn block height of the event that triggered the mock signature + pub event_burn_block_height: u64, + /// The chain id for the mock signature + pub chain_id: u32, +} + +impl StacksMessageCodec for MockSignData { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.peer_info.consensus_serialize(fd)?; + write_next(fd, &self.event_burn_block_height)?; + write_next(fd, &self.chain_id)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let peer_info = PeerInfo::consensus_deserialize(fd)?; + let event_burn_block_height = read_next::(fd)?; + let chain_id = read_next::(fd)?; + Ok(Self { + peer_info, + event_burn_block_height, chain_id, }) } @@ -326,16 +337,21 @@ pub struct MockSignature { } impl MockSignature { - /// Create a new mock sign data struct from the provided peer info, burn block height, chain id, and private key. + /// Create a new mock sign data struct from the provided event burn block height, peer info, chain id, and private key. + /// Note that peer burn block height and event burn block height may not be the same if the peer view is stale. pub fn new( - peer_view: RPCPeerInfoData, - burn_block_height: u64, + event_burn_block_height: u64, + peer_info: PeerInfo, chain_id: u32, stacks_private_key: &StacksPrivateKey, ) -> Self { let mut sig = Self { signature: MessageSignature::empty(), - sign_data: MockSignData::new(peer_view, burn_block_height, chain_id), + sign_data: MockSignData { + peer_info, + event_burn_block_height, + chain_id, + }, }; sig.sign(stacks_private_key) .expect("Failed to sign MockSignature"); @@ -350,25 +366,39 @@ impl MockSignature { TupleData::from_data(vec![ ( "stacks-tip-consensus-hash".into(), - Value::buff_from(self.sign_data.stacks_tip_consensus_hash.as_bytes().into()) - .unwrap(), + Value::buff_from( + self.sign_data + .peer_info + .stacks_tip_consensus_hash + .as_bytes() + .into(), + ) + .unwrap(), ), ( "stacks-tip".into(), - Value::buff_from(self.sign_data.stacks_tip.as_bytes().into()).unwrap(), + Value::buff_from(self.sign_data.peer_info.stacks_tip.as_bytes().into()) + .unwrap(), + ), + ( + "stacks-tip-height".into(), + Value::UInt(self.sign_data.peer_info.stacks_tip_height.into()), ), ( "server-version".into(), - Value::string_ascii_from_bytes(self.sign_data.server_version.clone().into()) - .unwrap(), + Value::string_ascii_from_bytes( + self.sign_data.peer_info.server_version.clone().into(), + ) + .unwrap(), ), ( - "burn-block-height".into(), - Value::UInt(self.sign_data.burn_block_height.into()), + "event-burn-block-height".into(), + Value::UInt(self.sign_data.event_burn_block_height.into()), ), ( "pox-consensus".into(), - Value::buff_from(self.sign_data.pox_consensus.as_bytes().into()).unwrap(), + Value::buff_from(self.sign_data.peer_info.pox_consensus.as_bytes().into()) + .unwrap(), ), ]) .expect("Error creating signature hash"), @@ -822,23 +852,33 @@ mod test { assert_eq!(signer_message, deserialized_signer_message); } - fn random_mock_sign_data() -> MockSignData { + fn random_peer_data() -> PeerInfo { + let burn_block_height = thread_rng().next_u64(); let stacks_tip_consensus_byte: u8 = thread_rng().gen(); let stacks_tip_byte: u8 = thread_rng().gen(); + let stacks_tip_height = thread_rng().next_u64(); + let server_version = "0.0.0".to_string(); let pox_consensus_byte: u8 = thread_rng().gen(); + PeerInfo { + burn_block_height, + stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), + stacks_tip: BlockHeaderHash([stacks_tip_byte; 32]), + stacks_tip_height, + server_version, + pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + } + } + fn random_mock_sign_data() -> MockSignData { let chain_byte: u8 = thread_rng().gen_range(0..=1); let chain_id = if chain_byte == 1 { CHAIN_ID_TESTNET } else { CHAIN_ID_MAINNET }; + let peer_info = random_peer_data(); MockSignData { - stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), - stacks_tip: BlockHeaderHash([stacks_tip_byte; 32]), - server_version: "0.0.0".to_string(), - burn_block_height: thread_rng().next_u64(), - peer_burn_block_height: thread_rng().next_u64(), - pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + peer_info, + event_burn_block_height: thread_rng().next_u64(), chain_id, } } @@ -871,6 +911,15 @@ mod test { .expect("Failed to verify MockSignature")); } + #[test] + fn serde_peer_data() { + let peer_data = random_peer_data(); + let serialized_data = peer_data.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) + .expect("Failed to deserialize PeerInfo"); + assert_eq!(peer_data, deserialized_data); + } + #[test] fn serde_mock_signature() { let mock_signature = MockSignature { diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index d3e998e15c..74e2cd2344 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -44,9 +44,9 @@ extern crate alloc; const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); #[cfg(debug_assertions)] -const BUILD_TYPE: &'static str = "debug"; +const BUILD_TYPE: &str = "debug"; #[cfg(not(debug_assertions))] -const BUILD_TYPE: &'static str = "release"; +const BUILD_TYPE: &str = "release"; lazy_static! { static ref VERSION_STRING: String = { diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 8c63e181f7..b6337364db 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -31,7 +31,6 @@ use blockstack_lib::net::api::get_tenures_fork_info::{ TenureForkingInfo, RPC_TENURE_FORKING_INFO_PATH, }; use blockstack_lib::net::api::getaccount::AccountEntryResponse; -use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; use blockstack_lib::net::api::getstackers::GetStackersResponse; @@ -43,6 +42,7 @@ use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; use clarity::util::hash::to_hex; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; +use libsigner::v0::messages::PeerInfo; use reqwest::header::AUTHORIZATION; use serde_json::json; use slog::{slog_debug, slog_warn}; @@ -463,7 +463,7 @@ impl StacksClient { } /// Get the current peer info data from the stacks node - pub fn get_peer_info(&self) -> Result { + pub fn get_peer_info(&self) -> Result { debug!("Getting stacks node info..."); let timer = crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); @@ -478,7 +478,7 @@ impl StacksClient { if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } - let peer_info_data = response.json::()?; + let peer_info_data = response.json::()?; Ok(peer_info_data) } @@ -1387,7 +1387,18 @@ mod tests { let (response, peer_info) = build_get_peer_info_response(None, None); let h = spawn(move || mock.client.get_peer_info()); write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), peer_info); + let reduced_peer_info = h.join().unwrap().unwrap(); + assert_eq!( + reduced_peer_info.burn_block_height, + peer_info.burn_block_height + ); + assert_eq!(reduced_peer_info.pox_consensus, peer_info.pox_consensus); + assert_eq!( + reduced_peer_info.stacks_tip_consensus_hash, + peer_info.stacks_tip_consensus_hash + ); + assert_eq!(reduced_peer_info.stacks_tip, peer_info.stacks_tip); + assert_eq!(reduced_peer_info.server_version, peer_info.server_version); } #[test] diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 574c4d8df9..94e8fa0499 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -484,7 +484,7 @@ impl Signer { /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, burn_block_height: u64, stacks_client: &StacksClient) { - let Ok(peer_view) = stacks_client.get_peer_info() else { + let Ok(peer_info) = stacks_client.get_peer_info() else { warn!("{self}: Failed to get peer info. Cannot mock sign."); return; }; @@ -494,15 +494,15 @@ impl Signer { CHAIN_ID_TESTNET }; info!("Mock signing for burn block {burn_block_height:?}"; - "stacks_tip_consensus_hash" => ?peer_view.stacks_tip_consensus_hash.clone(), - "stacks_tip" => ?peer_view.stacks_tip.clone(), - "peer_burn_block_height" => peer_view.burn_block_height, - "pox_consensus" => ?peer_view.pox_consensus.clone(), - "server_version" => peer_view.server_version.clone(), + "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?peer_info.stacks_tip.clone(), + "peer_burn_block_height" => peer_info.burn_block_height, + "pox_consensus" => ?peer_info.pox_consensus.clone(), + "server_version" => peer_info.server_version.clone(), "chain_id" => chain_id ); let mock_signature = - MockSignature::new(peer_view, burn_block_height, chain_id, &self.private_key); + MockSignature::new(burn_block_height, peer_info, chain_id, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e1f57097e9..edd9300ff1 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1663,7 +1663,8 @@ fn mock_sign_epoch_25() { .expect("Failed to get message from stackerdb"); for message in messages { if let SignerMessage::MockSignature(mock_signature) = message { - if mock_signature.sign_data.burn_block_height == current_burn_block_height { + if mock_signature.sign_data.event_burn_block_height == current_burn_block_height + { if !mock_signatures.contains(&mock_signature) { mock_signatures.push(mock_signature); } From d65182512cc28584d83a8a0fcd60dcaca1e80ba0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 31 Jul 2024 14:13:18 -0500 Subject: [PATCH 0607/1400] test: move BTC commit stall to counters() --- .../stacks-node/src/nakamoto_node/relayer.rs | 15 ++++++----- testnet/stacks-node/src/run_loop/neon.rs | 14 +++++++++++ .../src/tests/nakamoto_integrations.rs | 11 ++++---- testnet/stacks-node/src/tests/signer/mod.rs | 5 +++- testnet/stacks-node/src/tests/signer/v0.rs | 25 ++++++++++++++++--- 5 files changed, 54 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 12f7dbc9e9..1234ad20cc 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -66,11 +66,6 @@ use crate::run_loop::nakamoto::{Globals, RunLoop}; use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; -#[cfg(test)] -lazy_static::lazy_static! { - pub static ref TEST_SKIP_COMMIT_OP: std::sync::Mutex> = std::sync::Mutex::new(None); -} - /// Command types for the Nakamoto relayer thread, issued to it by other threads pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and @@ -937,7 +932,15 @@ impl RelayerThread { let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; #[cfg(test)] { - if TEST_SKIP_COMMIT_OP.lock().unwrap().unwrap_or(false) { + if self + .globals + .counters + .naka_skip_commit_op + .0 + .lock() + .unwrap() + .unwrap_or(false) + { warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); return Ok(()); } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index d4aea34f0e..663c14e27b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -82,6 +82,17 @@ impl std::ops::Deref for RunLoopCounter { } } +#[cfg(test)] +#[derive(Clone)] +pub struct TestFlag(pub Arc>>); + +#[cfg(test)] +impl Default for TestFlag { + fn default() -> Self { + Self(Arc::new(std::sync::Mutex::new(None))) + } +} + #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -95,6 +106,9 @@ pub struct Counters { pub naka_mined_blocks: RunLoopCounter, pub naka_proposed_blocks: RunLoopCounter, pub naka_mined_tenures: RunLoopCounter, + + #[cfg(test)] + pub naka_skip_commit_op: TestFlag, } impl Counters { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c2fd6245f2..f6056f1b6f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -95,7 +95,6 @@ use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; -use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; @@ -3723,6 +3722,7 @@ fn forked_tenure_is_ignored() { naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, naka_mined_blocks: mined_blocks, + naka_skip_commit_op: test_skip_commit_op, .. } = run_loop.counters(); @@ -3791,7 +3791,7 @@ fn forked_tenure_is_ignored() { info!("Commit op is submitted; unpause tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits. - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + test_skip_commit_op.0.lock().unwrap().replace(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted @@ -3816,7 +3816,7 @@ fn forked_tenure_is_ignored() { let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); next_block_and(&mut btc_regtest_controller, 60, || { - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + test_skip_commit_op.0.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); Ok(commits_count > commits_before && blocks_count > blocks_before) @@ -5478,6 +5478,7 @@ fn continue_tenure_extend() { blocks_processed, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, + naka_skip_commit_op: test_skip_commit_op, .. } = run_loop.counters(); @@ -5549,7 +5550,7 @@ fn continue_tenure_extend() { ); info!("Pausing commit ops to trigger a tenure extend."); - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + test_skip_commit_op.0.lock().unwrap().replace(true); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -5604,7 +5605,7 @@ fn continue_tenure_extend() { ); info!("Resuming commit ops to mine regular tenures."); - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + test_skip_commit_op.0.lock().unwrap().replace(false); // Mine 15 more regular nakamoto tenures for _i in 0..15 { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 08c4004ed0..78ed2e7c7a 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -57,7 +57,7 @@ use wsts::state_machine::PublicKeys; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::neon::Counters; +use crate::neon::{Counters, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -81,6 +81,7 @@ pub struct RunningNodes { pub blocks_processed: Arc, pub nakamoto_blocks_proposed: Arc, pub nakamoto_blocks_mined: Arc, + pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, } @@ -679,6 +680,7 @@ fn setup_stx_btc_node ()>( naka_submitted_commits: commits_submitted, naka_proposed_blocks: naka_blocks_proposed, naka_mined_blocks: naka_blocks_mined, + naka_skip_commit_op: nakamoto_test_skip_commit_op, .. } = run_loop.counters(); @@ -711,6 +713,7 @@ fn setup_stx_btc_node ()>( blocks_processed: blocks_processed.0, nakamoto_blocks_proposed: naka_blocks_proposed.0, nakamoto_blocks_mined: naka_blocks_mined.0, + nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 98e2d64b55..13bcd575f5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -52,7 +52,6 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; -use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, POX_4_DEFAULT_STACKER_STX_AMT, @@ -859,7 +858,13 @@ fn forked_tenure_testing( info!("Commit op is submitted; unpause tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits. - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted @@ -892,7 +897,13 @@ fn forked_tenure_testing( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(false); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = if expect_tenure_c { mined_blocks.load(Ordering::SeqCst) @@ -1624,7 +1635,13 @@ fn empty_sortition() { TEST_BROADCAST_STALL.lock().unwrap().replace(true); info!("Pausing commit op to prevent tenure C from starting..."); - TEST_SKIP_COMMIT_OP.lock().unwrap().replace(true); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); let blocks_after = signer_test .running_nodes From ac99ec9d58c8bfec594a729ed6723af70adf4192 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 31 Jul 2024 19:51:38 -0500 Subject: [PATCH 0608/1400] test: add a test with 2 miners, one of whom tries to fork the other and is prevented by the signer set --- .../src/tests/nakamoto_integrations.rs | 15 + testnet/stacks-node/src/tests/signer/mod.rs | 3 + testnet/stacks-node/src/tests/signer/v0.rs | 326 +++++++++++++++++- 3 files changed, 340 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f6056f1b6f..31dfa3e414 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -625,6 +625,21 @@ where Ok(()) } +pub fn wait_for(timeout_secs: u64, mut check: F) -> Result<(), String> +where + F: FnMut() -> Result, +{ + let start = Instant::now(); + while !check()? { + if start.elapsed() > Duration::from_secs(timeout_secs) { + error!("Timed out waiting for check to process"); + return Err("Timed out".into()); + } + thread::sleep(Duration::from_millis(100)); + } + Ok(()) +} + /// Mine a bitcoin block, and wait until: /// (1) a new block has been processed by the coordinator pub fn next_block_and_process_new_stacks_block( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 78ed2e7c7a..7fe508407b 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -92,6 +92,8 @@ pub struct SignerTest { pub running_nodes: RunningNodes, // The spawned signers and their threads pub spawned_signers: Vec, + // The spawned signers and their threads + pub signer_configs: Vec, // the private keys of the signers pub signer_stacks_private_keys: Vec, // link to the stacks node @@ -209,6 +211,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest. +use std::collections::HashMap; use std::ops::Add; use std::str::FromStr; use std::sync::atomic::Ordering; @@ -26,7 +27,10 @@ use libsigner::v0::messages::{ }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use stacks::address::AddressHashMode; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, NakamotoChainState, +}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; @@ -34,9 +38,11 @@ use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; -use stacks::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks::util::get_epoch_time_secs; +use stacks::util::hash::Sha512Trunc256Sum; +use stacks::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -45,6 +51,7 @@ use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; +use stacks_signer::signerdb::{BlockInfo, SignerDb}; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -52,9 +59,11 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, wait_for, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -1264,6 +1273,315 @@ fn multiple_miners() { signer_test.shutdown(); } +#[test] +#[ignore] +fn miner_forking() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + // we're deliberately stalling proposals: don't punish this in this test! + signer_config.block_proposal_timeout = Duration::from_secs(240); + // make sure that we don't allow forking due to burn block timing + signer_config.first_proposal_burn_block_timing = Duration::from_secs(1); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let Counters { + naka_skip_commit_op, + naka_submitted_commits: second_miner_commits_submitted, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + naka_skip_commit_op.0.lock().unwrap().replace(false); + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let mut sortitions_seen = Vec::new(); + let run_sortition = || { + info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + + let rl2_commits_before = second_miner_commits_submitted.load(Ordering::SeqCst); + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + naka_skip_commit_op.0.lock().unwrap().replace(false); + + // wait until a commit is submitted by run_loop_2 + wait_for(60, || { + let commits_count = second_miner_commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > rl2_commits_before) + }) + .unwrap(); + + // fetch the current sortition info + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // block commits from RL2 -- this will block until the start of the next iteration + // in this loop. + naka_skip_commit_op.0.lock().unwrap().replace(true); + // ensure RL1 performs an RBF after unblock block broadcast + let rl1_commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // wait for a block to be processed (or timeout!) + if let Err(_) = wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)) { + info!("Timeout waiting for a block process: assuming this is because RL2 attempted to fork-- will check at end of test"); + return (sort_tip, false); + } + + info!("Nakamoto block processed, waiting for commit from RL1"); + + // wait for a commit from RL1 + wait_for(60, || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > rl1_commits_before) + }) + .unwrap(); + + // sleep for 1 second to prevent the block timing from allowing a fork by the signer set + thread::sleep(Duration::from_secs(1)); + (sort_tip, true) + }; + + let mut won_by_miner_2_but_no_tenure = false; + let mut won_by_miner_1_after_tenureless_miner_2 = false; + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + // miner 2 is expected to be valid iff: + // (a) its the first nakamoto tenure + // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) + let mut expects_miner_2_to_be_valid = true; + + // due to the random nature of mining sortitions, the way this test is structured + // is that keeps track of two scenarios that we want to cover, and once enough sortitions + // have been produced to cover those scenarios, it stops and checks the results at the end. + while !(won_by_miner_2_but_no_tenure && won_by_miner_1_after_tenureless_miner_2) { + if sortitions_seen.len() >= 20 { + panic!("Produced 20 sortitions, but didn't cover the test scenarios, aborting"); + } + let (sortition_data, had_tenure) = run_sortition(); + sortitions_seen.push((sortition_data.clone(), had_tenure)); + + let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + .into_iter() + .filter_map(|block_json| { + if block_json + .as_object() + .unwrap() + .get("miner_signature") + .is_none() + { + return None; + } + let block_id = StacksBlockId::from_hex( + &block_json + .as_object() + .unwrap() + .get("index_block_hash") + .unwrap() + .as_str() + .unwrap()[2..], + ) + .unwrap(); + Some(block_id) + }) + .collect(); + + let (chainstate, _) = StacksChainState::open( + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let nakamoto_headers: HashMap<_, _> = nakamoto_block_ids + .into_iter() + .map(|block_id| { + let header_info = NakamotoChainState::get_block_header(chainstate.db(), &block_id) + .unwrap() + .unwrap(); + (header_info.consensus_hash.clone(), header_info) + }) + .collect(); + + if had_tenure { + let header_info = nakamoto_headers + .get(&sortition_data.consensus_hash) + .unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + let mined_by_miner_1 = miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap(); + + info!("Block check"; + "height" => header.chain_length, + "consensus_hash" => %header.consensus_hash, + "block_hash" => %header.block_hash(), + "stacks_block_id" => %header.block_id(), + "mined_by_miner_1?" => mined_by_miner_1, + "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); + if !mined_by_miner_1 { + assert!(expects_miner_2_to_be_valid, "If a block was produced by miner 2, we should have expected miner 2 to be valid"); + } else if won_by_miner_2_but_no_tenure { + // the tenure was won by miner 1, they produced a block, and this follows a tenure that miner 2 won but couldn't + // mine during because they tried to fork. + won_by_miner_1_after_tenureless_miner_2 = true; + } + + // even if it was mined by miner 2, their next block commit should be invalid! + expects_miner_2_to_be_valid = false; + } else { + info!("Sortition without tenure"; "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); + assert!(nakamoto_headers + .get(&sortition_data.consensus_hash) + .is_none()); + assert!(!expects_miner_2_to_be_valid, "If no blocks were produced in the tenure, it should be because miner 2 committed to a fork"); + won_by_miner_2_but_no_tenure = true; + expects_miner_2_to_be_valid = true; + } + } + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + + let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + .into_iter() + .filter_map(|block_json| { + block_json + .as_object() + .unwrap() + .get("miner_signature") + .map(|x| x.as_str().unwrap().to_string()) + }) + .collect(); + + assert_eq!( + peer_1_height - pre_nakamoto_peer_1_height, + u64::try_from(nakamoto_block_ids.len()).unwrap(), + "There should be no forks in this test" + ); + + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behavior at the end of a tenure. Specifically: From a567db23c84adce33205b92e3e3c65dd87f7fa05 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 1 Aug 2024 10:26:21 -0500 Subject: [PATCH 0609/1400] tests: improve forking tests --- .../src/tests/nakamoto_integrations.rs | 33 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 178 +++++++++++------- 2 files changed, 133 insertions(+), 78 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 31dfa3e414..542ff7511c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1641,6 +1641,10 @@ fn multiple_miners() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.node.local_peer_seed = vec![1, 1, 1, 1]; + naka_conf.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + let node_2_rpc = 51026; + let node_2_p2p = 51025; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); @@ -1665,7 +1669,11 @@ fn multiple_miners() { let stacker_sk = setup_stacker(&mut naka_conf); let mut conf_node_2 = naka_conf.clone(); - set_random_binds(&mut conf_node_2); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); conf_node_2.node.seed = vec![2, 2, 2, 2]; conf_node_2.burnchain.local_mining_public_key = Some( Keychain::default(conf_node_2.node.seed.clone()) @@ -1674,6 +1682,8 @@ fn multiple_miners() { ); conf_node_2.node.local_peer_seed = vec![2, 2, 2, 2]; conf_node_2.node.miner = true; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.events_observers.clear(); let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); @@ -1813,16 +1823,14 @@ fn multiple_miners() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - loop { + wait_for(20, || { let blocks_processed = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); let info = get_chain_info_result(&naka_conf).unwrap(); assert_ne!(info.stacks_tip, last_tip); @@ -1832,13 +1840,10 @@ fn multiple_miners() { last_tip_height = info.stacks_tip_height; } - let start_time = Instant::now(); - while commits_submitted.load(Ordering::SeqCst) <= commits_before { - if start_time.elapsed() >= Duration::from_secs(20) { - panic!("Timed out waiting for block-commit"); - } - thread::sleep(Duration::from_millis(100)); - } + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 27b1df7450..c9b5cf4854 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::ops::Add; use std::str::FromStr; use std::sync::atomic::Ordering; @@ -28,9 +28,7 @@ use libsigner::v0::messages::{ use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use stacks::address::AddressHashMode; use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, NakamotoChainState, -}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; @@ -40,9 +38,7 @@ use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::get_epoch_time_secs; -use stacks::util::hash::Sha512Trunc256Sum; -use stacks::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, @@ -51,7 +47,6 @@ use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::runloop::State; -use stacks_signer::signerdb::{BlockInfo, SignerDb}; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -70,7 +65,7 @@ use crate::tests::neon_integrations::{ test_observer, }; use crate::tests::{self, make_stacks_transfer}; -use crate::{nakamoto_node, BurnchainController, Keychain}; +use crate::{nakamoto_node, BurnchainController, Config, Keychain}; impl SignerTest { /// Run the test until the first epoch 2.5 reward cycle. @@ -1197,6 +1192,7 @@ fn multiple_miners() { config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { @@ -1225,6 +1221,7 @@ fn multiple_miners() { conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); conf_node_2.node.miner = true; conf_node_2.events_observers.clear(); conf_node_2.events_observers.extend(node_2_listeners); @@ -1252,9 +1249,59 @@ fn multiple_miners() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - let nakamoto_tenures = 20; - for _i in 0..nakamoto_tenures { - let _mined_block = signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + let max_nakamoto_tenures = 20; + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 0; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + btc_blocks_mined += 1; + let blocks = get_nakamoto_headers(&conf); + // for this test, there should be one block per tenure + let consensus_hash_set: HashSet<_> = blocks + .iter() + .map(|header| header.consensus_hash.clone()) + .collect(); + assert_eq!( + consensus_hash_set.len(), + blocks.len(), + "In this test, there should only be one block per tenure" + ); + miner_1_tenures = blocks + .iter() + .filter(|header| { + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + }) + .count(); + miner_2_tenures = blocks + .iter() + .filter(|header| { + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + }) + .count(); } info!( @@ -1268,11 +1315,61 @@ fn multiple_miners() { let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); assert_eq!(peer_1_height, peer_2_height); - assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + nakamoto_tenures); + assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); signer_test.shutdown(); } +/// Read processed nakamoto block IDs from the test observer, and use `config` to open +/// a chainstate DB and returns their corresponding StacksHeaderInfos +fn get_nakamoto_headers(config: &Config) -> Vec { + let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + .into_iter() + .filter_map(|block_json| { + if block_json + .as_object() + .unwrap() + .get("miner_signature") + .is_none() + { + return None; + } + let block_id = StacksBlockId::from_hex( + &block_json + .as_object() + .unwrap() + .get("index_block_hash") + .unwrap() + .as_str() + .unwrap()[2..], + ) + .unwrap(); + Some(block_id) + }) + .collect(); + + let (chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + None, + ) + .unwrap(); + + nakamoto_block_ids + .into_iter() + .map(|block_id| { + NakamotoChainState::get_block_header(chainstate.db(), &block_id) + .unwrap() + .unwrap() + }) + .collect() +} + #[test] #[ignore] fn miner_forking() { @@ -1470,47 +1567,9 @@ fn miner_forking() { let (sortition_data, had_tenure) = run_sortition(); sortitions_seen.push((sortition_data.clone(), had_tenure)); - let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() - .into_iter() - .filter_map(|block_json| { - if block_json - .as_object() - .unwrap() - .get("miner_signature") - .is_none() - { - return None; - } - let block_id = StacksBlockId::from_hex( - &block_json - .as_object() - .unwrap() - .get("index_block_hash") - .unwrap() - .as_str() - .unwrap()[2..], - ) - .unwrap(); - Some(block_id) - }) - .collect(); - - let (chainstate, _) = StacksChainState::open( - conf.is_mainnet(), - conf.burnchain.chain_id, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - - let nakamoto_headers: HashMap<_, _> = nakamoto_block_ids + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) .into_iter() - .map(|block_id| { - let header_info = NakamotoChainState::get_block_header(chainstate.db(), &block_id) - .unwrap() - .unwrap(); - (header_info.consensus_hash.clone(), header_info) - }) + .map(|header| (header.consensus_hash.clone(), header)) .collect(); if had_tenure { @@ -1562,20 +1621,11 @@ fn miner_forking() { info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); assert_eq!(peer_1_height, peer_2_height); - let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() - .into_iter() - .filter_map(|block_json| { - block_json - .as_object() - .unwrap() - .get("miner_signature") - .map(|x| x.as_str().unwrap().to_string()) - }) - .collect(); + let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); assert_eq!( peer_1_height - pre_nakamoto_peer_1_height, - u64::try_from(nakamoto_block_ids.len()).unwrap(), + u64::try_from(nakamoto_blocks_count).unwrap(), "There should be no forks in this test" ); From 13e73fb93d3970f7c3635c2092925aa2b857f150 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 1 Aug 2024 11:02:14 -0500 Subject: [PATCH 0610/1400] ci: add new test to CI, comment for test --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 5c9de28361..15c2e125b0 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -93,6 +93,7 @@ jobs: - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 + - tests::signer::v0::miner_forking - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c9b5cf4854..fe53ca20cf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1372,6 +1372,16 @@ fn get_nakamoto_headers(config: &Config) -> Vec { #[test] #[ignore] +// Test two nakamoto miners, with the signer set split between them. +// One of the miners (run-loop-2) is prevented from submitting "good" block commits +// using the "commit stall" test flag in combination with "block broadcast stalls". +// (Because RL2 isn't able to RBF their initial commits after the tip is broadcasted). +// This test works by tracking two different scenarios: +// 1. RL2 must win a sortition that this block commit behavior would lead to a fork in. +// 2. After such a sortition, RL1 must win another block. +// The test asserts that every nakamoto sortition either has a successful tenure, or if +// RL2 wins and they would be expected to fork, no blocks are produced. The test asserts +// that every block produced increments the chain length. fn miner_forking() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; From 8e4031cbbdba4d17c5e3f36a3c49489bc57553a5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 1 Aug 2024 11:36:47 -0500 Subject: [PATCH 0611/1400] chore: add comment to mock miner check for tenure parent --- testnet/stacks-node/src/nakamoto_node/miner.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 06a6e37006..e2511ff388 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -222,6 +222,9 @@ impl BlockMinerThread { // now, actually run this tenure loop { let new_block = loop { + // If we're mock mining, we may not have processed the block that the + // actual tenure winner committed to yet. So, before attempting to + // mock mine, check if the parent is processed. if self.config.get_node_config(false).mock_mining { let burn_db_path = self.config.get_burn_db_file_path(); let mut burn_db = SortitionDB::open( From 8a7768df3ac7da055f0552ac2c436e06481def3e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 1 Aug 2024 15:32:32 -0400 Subject: [PATCH 0612/1400] fix: release the chainstate db before locking the sortition db This ordering needs to be maintained to avoid deadlock. --- stackslib/src/chainstate/nakamoto/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 09794c4775..64c367bd01 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1992,6 +1992,15 @@ impl NakamotoChainState { next_ready_block.header.consensus_hash ); + // this will panic if the Clarity commit fails. + clarity_commit.commit(); + chainstate_tx.commit() + .unwrap_or_else(|e| { + error!("Failed to commit chainstate transaction after committing Clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); + // set stacks block accepted let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; sort_tx.set_stacks_block_accepted( @@ -2000,15 +2009,6 @@ impl NakamotoChainState { next_ready_block.header.chain_length, )?; - // this will panic if the Clarity commit fails. - clarity_commit.commit(); - chainstate_tx.commit() - .unwrap_or_else(|e| { - error!("Failed to commit chainstate transaction after committing Clarity block. The chainstate database is now corrupted."; - "error" => ?e); - panic!() - }); - // as a separate transaction, mark this block as processed. // This is done separately so that the staging blocks DB, which receives writes // from the network to store blocks, will be available for writes while a block is From e65ad15af62a5616716455bb68f8365191993882 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 1 Aug 2024 17:22:03 -0400 Subject: [PATCH 0613/1400] chore: move `set_stacks_block_accepted` later Also, `commit` before announcing the block via the dispatcher. --- stackslib/src/chainstate/nakamoto/mod.rs | 32 ++++++++++++------------ 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 64c367bd01..1fab3ba9b1 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2001,14 +2001,6 @@ impl NakamotoChainState { panic!() }); - // set stacks block accepted - let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; - sort_tx.set_stacks_block_accepted( - &next_ready_block.header.consensus_hash, - &next_ready_block.header.block_hash(), - next_ready_block.header.chain_length, - )?; - // as a separate transaction, mark this block as processed. // This is done separately so that the staging blocks DB, which receives writes // from the network to store blocks, will be available for writes while a block is @@ -2019,6 +2011,22 @@ impl NakamotoChainState { let signer_bitvec = (&next_ready_block).header.pox_treatment.clone(); + // set stacks block accepted + let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; + sort_tx.set_stacks_block_accepted( + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash(), + next_ready_block.header.chain_length, + )?; + + sort_tx + .commit() + .unwrap_or_else(|e| { + error!("Failed to commit sortition db transaction after committing chainstate and clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2045,14 +2053,6 @@ impl NakamotoChainState { ); } - sort_tx - .commit() - .unwrap_or_else(|e| { - error!("Failed to commit sortition db transaction after committing chainstate and clarity block. The chainstate database is now corrupted."; - "error" => ?e); - panic!() - }); - Ok(Some(receipt)) } From fd7158708dfe58e8aac8019f50994baea75acd26 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 1 Aug 2024 16:05:39 -0700 Subject: [PATCH 0614/1400] wip: integration test on signer set handoff --- libsigner/src/runloop.rs | 2 +- stacks-signer/src/chainstate.rs | 3 +- stacks-signer/src/lib.rs | 5 +- testnet/stacks-node/src/event_dispatcher.rs | 2 + .../stacks-node/src/nakamoto_node/miner.rs | 49 ++-- .../src/nakamoto_node/sign_coordinator.rs | 14 +- .../src/tests/nakamoto_integrations.rs | 19 +- testnet/stacks-node/src/tests/signer/mod.rs | 34 ++- testnet/stacks-node/src/tests/signer/v0.rs | 273 +++++++++++++++--- testnet/stacks-node/src/tests/signer/v1.rs | 3 + 10 files changed, 328 insertions(+), 76 deletions(-) diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index b0f026f35f..e548db89e3 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -262,7 +262,7 @@ impl< // start receiving events and doing stuff with them let runloop_thread = thread::Builder::new() - .name("signer_runloop".to_string()) + .name(format!("signer_runloop:{}", bind_addr.port())) .stack_size(THREAD_STACK_SIZE) .spawn(move || { signer_loop.main_loop(event_recv, command_receiver, result_sender, stop_signaler) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4fc2de4cb8..c35ceb67e0 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -303,11 +303,12 @@ impl SortitionsView { let last_in_tenure = signer_db .get_last_signed_block_in_tenure(&block.header.consensus_hash) .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; - if last_in_tenure.is_some() { + if let Some(last_in_tenure) = last_in_tenure { warn!( "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), ); return Ok(false); } diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 15c0a25c3d..abc2db331b 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -100,6 +100,8 @@ pub struct SpawnedSigner + Send, T: SignerEventTrait> { pub cmd_send: Sender, /// The result receiver for interacting with the running signer pub res_recv: Receiver>, + /// The spawned signer's config + pub config: GlobalConfig, /// Phantom data for the signer type _phantom: std::marker::PhantomData, } @@ -136,7 +138,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner { crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); } - let runloop = RunLoop::new(config); + let runloop = RunLoop::new(config.clone()); let mut signer: RunLoopSigner = libsigner::Signer::new(runloop, ev, cmd_recv, res_send); let running_signer = signer.spawn(endpoint).expect("Failed to spawn signer"); @@ -145,6 +147,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner cmd_send, res_recv, _phantom: std::marker::PhantomData, + config: config.clone(), } } } diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 5a72e4ca0a..be0107e104 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -138,6 +138,7 @@ pub struct MinedNakamotoBlockEvent { pub signer_signature_hash: Sha512Trunc256Sum, pub tx_events: Vec, pub signer_bitvec: String, + pub signer_signature: Vec, } impl InnerStackerDBChannel { @@ -1261,6 +1262,7 @@ impl EventDispatcher { tx_events, miner_signature: block.header.miner_signature.clone(), signer_signature_hash: block.header.signer_signature_hash(), + signer_signature: block.header.signer_signature.clone(), signer_bitvec, }) .unwrap(); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 527117fb4d..29b2195af9 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -270,18 +270,16 @@ impl BlockMinerThread { } } - let (reward_set, signer_signature) = match self.gather_signatures( - &mut new_block, - self.burn_block.block_height, - &mut stackerdbs, - &mut attempts, - ) { - Ok(x) => x, - Err(e) => { - error!("Error while gathering signatures: {e:?}. Will try mining again."); - continue; - } - }; + let (reward_set, signer_signature) = + match self.gather_signatures(&mut new_block, &mut stackerdbs, &mut attempts) { + Ok(x) => x, + Err(e) => { + error!( + "Error while gathering signatures: {e:?}. Will try mining again." + ); + continue; + } + }; new_block.header.signer_signature = signer_signature; if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { @@ -354,10 +352,21 @@ impl BlockMinerThread { let burn_election_height = self.burn_election_block.block_height; + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(burn_election_height) + .expect("FATAL: no reward cycle for sortition"); + + #[cfg(test)] + { + info!( + "---- Fetching reward info at height {} for cycle {} ----", + burn_election_height, reward_cycle + ); + } + let reward_info = match load_nakamoto_reward_set( - self.burnchain - .pox_reward_cycle(burn_election_height) - .expect("FATAL: no reward cycle for sortition"), + reward_cycle, &self.burn_election_block.sortition_id, &self.burnchain, &mut chain_state, @@ -384,6 +393,14 @@ impl BlockMinerThread { )); }; + #[cfg(test)] + { + info!( + "---- New reward set has {} signers ----", + reward_set.clone().signers.unwrap_or(vec![]).len(), + ); + } + self.signer_set_cache = Some(reward_set.clone()); Ok(reward_set) } @@ -392,7 +409,6 @@ impl BlockMinerThread { fn gather_signatures( &mut self, new_block: &mut NakamotoBlock, - burn_block_height: u64, stackerdbs: &mut StackerDBs, attempts: &mut u64, ) -> Result<(RewardSet, Vec), NakamotoNodeError> { @@ -442,7 +458,6 @@ impl BlockMinerThread { *attempts += 1; let signature = coordinator.begin_sign_v0( new_block, - burn_block_height, *attempts, &tip, &self.burnchain, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b6e42b87ee..3afe36fa29 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -634,7 +634,6 @@ impl SignCoordinator { pub fn begin_sign_v0( &mut self, block: &NakamotoBlock, - burn_block_height: u64, block_attempt: u64, burn_tip: &BlockSnapshot, burnchain: &Burnchain, @@ -643,6 +642,13 @@ impl SignCoordinator { counters: &Counters, election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { + #[cfg(test)] + { + info!( + "---- Sign coordinator starting. Burn tip height: {} ----", + burn_tip.block_height + ); + } let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; let reward_cycle_id = burnchain @@ -653,7 +659,7 @@ impl SignCoordinator { let block_proposal = BlockProposal { block: block.clone(), - burn_height: burn_block_height, + burn_height: burn_tip.block_height, reward_cycle: reward_cycle_id, }; @@ -736,7 +742,7 @@ impl SignCoordinator { continue; }; if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); + info!("Received signer event for other reward cycle. Ignoring."); continue; }; let slot_ids = modified_slots @@ -776,6 +782,8 @@ impl SignCoordinator { "signature" => %signature, "block_signer_signature_hash" => %block_sighash, "slot_id" => slot_id, + "reward_cycle_id" => reward_cycle_id, + "response_hash" => %response_hash ); continue; } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2441380b2b..bf1b10c41b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -111,7 +111,7 @@ use crate::tests::{ use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; -static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; +pub static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000; lazy_static! { pub static ref NAKAMOTO_INTEGRATION_EPOCHS: [StacksEpoch; 9] = [ @@ -1028,6 +1028,7 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( stacker_sks: &[StacksPrivateKey], signer_sks: &[StacksPrivateKey], btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, ) { assert_eq!(stacker_sks.len(), signer_sks.len()); @@ -1058,7 +1059,7 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( .get_burnchain() .block_height_to_reward_cycle(block_height) .unwrap(); - let lock_period = 12; + let lock_period: u128 = num_stacking_cycles.unwrap_or(12_u64).into(); debug!("Test Cycle Info"; "prepare_phase_len" => {prepare_phase_len}, "reward_cycle_len" => {reward_cycle_len}, @@ -1130,6 +1131,7 @@ pub fn boot_to_epoch_3_reward_set( stacker_sks: &[StacksPrivateKey], signer_sks: &[StacksPrivateKey], btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, ) { boot_to_epoch_3_reward_set_calculation_boundary( naka_conf, @@ -1137,6 +1139,7 @@ pub fn boot_to_epoch_3_reward_set( stacker_sks, signer_sks, btc_regtest_controller, + num_stacking_cycles, ); let epoch_3_reward_set_calculation = btc_regtest_controller.get_headers_height().wrapping_add(1); @@ -4925,6 +4928,18 @@ fn signer_chainstate() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; let prom_http_origin = format!("http://{}", prom_bind); let client = reqwest::blocking::Client::new(); let res = client diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 12584ab89a..eb5b337ada 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -49,7 +49,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; -use stacks_signer::client::{SignerSlotID, StacksClient}; +use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; @@ -97,6 +97,8 @@ pub struct SignerTest { pub stacks_client: StacksClient, // Unique number used to isolate files created during the test pub run_stamp: u16, + /// The number of cycles to stack for + pub num_stacking_cycles: u64, } impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { @@ -105,14 +107,24 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, wait_on_signers: Option, ) -> Self { - Self::new_with_config_modifications(num_signers, initial_balances, wait_on_signers, |_| {}) + Self::new_with_config_modifications( + num_signers, + initial_balances, + wait_on_signers, + |_| {}, + |_| {}, + ) } - fn new_with_config_modifications ()>( + fn new_with_config_modifications< + SignerModifier: Fn(&mut SignerConfig) -> (), + NakaModifier: Fn(&mut NeonConfig) -> (), + >( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, wait_on_signers: Option, - modifier: F, + modifier: SignerModifier, + naka_modifier: NakaModifier, ) -> Self { // Generate Signer Data let signer_stacks_private_keys = (0..num_signers) @@ -121,6 +133,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { + fn get_signer_slots( + &self, + reward_cycle: u64, + ) -> Result, ClientError> { let valid_signer_set = u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); self.stacks_client .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) + } + + fn get_signer_indices(&self, reward_cycle: u64) -> Vec { + self.get_signer_slots(reward_cycle) .expect("FATAL: failed to get signer slots from stackerdb") .iter() .enumerate() .map(|(pos, _)| { SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) }) - .collect() + .collect::>() } /// Get the wsts public keys for the given reward cycle diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fe6c4ff619..c8b98510c9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,16 +23,24 @@ use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use rand::RngCore; +use stacks::address::AddressHashMode; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; +use stacks::core::CHAIN_ID_TESTNET; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; use stacks::types::chainstate::{StacksAddress, StacksPrivateKey}; use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; +use stacks::util_lib::signed_structured_data::pox4::{ + make_pox_4_signer_key_signature, Pox4SignatureTopic, +}; use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; @@ -43,10 +51,14 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; +use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::nakamoto_node::relayer::TEST_SKIP_COMMIT_OP; -use crate::tests::nakamoto_integrations::{boot_to_epoch_3_reward_set, next_block_and}; +use crate::tests::nakamoto_integrations::{ + boot_to_epoch_3_reward_set, next_block_and, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, +}; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, submit_tx, test_observer, }; @@ -62,6 +74,7 @@ impl SignerTest { &self.signer_stacks_private_keys, &self.signer_stacks_private_keys, &mut self.running_nodes.btc_regtest_controller, + Some(self.num_stacking_cycles), ); debug!("Waiting for signer set calculation."); let mut reward_set_calculated = false; @@ -128,6 +141,10 @@ impl SignerTest { // Only call after already past the epoch 3.0 boundary fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); + // Get the current signers _before_ the new block + let reward_cycle = self.get_current_reward_cycle(); + let signers = self.get_reward_set_signers(reward_cycle); + self.mine_nakamoto_block(timeout); // Verify that the signers accepted the proposed block, sending back a validate ok response @@ -144,9 +161,6 @@ impl SignerTest { // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - let reward_cycle = self.get_current_reward_cycle(); - let signers = self.get_reward_set_signers(reward_cycle); - // Verify that the signers signed the proposed block let mut signer_index = 0; let mut signature_index = 0; @@ -614,6 +628,7 @@ fn forked_tenure_testing( // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; }, + |_| {}, ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -802,11 +817,10 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], Some(Duration::from_secs(15)), - |_config| {}, ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -1252,6 +1266,7 @@ fn empty_sortition() { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; }, + |_| {}, ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(20); @@ -1389,12 +1404,16 @@ fn signer_set_rollover() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let new_num_signers = 5; + let new_num_signers = 4; let new_signer_private_keys: Vec<_> = (0..new_num_signers) .into_iter() .map(|_| StacksPrivateKey::new()) .collect(); + let new_signer_public_keys: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); let new_signer_addresses: Vec<_> = new_signer_private_keys .iter() .map(|sk| tests::to_addr(sk)) @@ -1405,21 +1424,25 @@ fn signer_set_rollover() { let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - // Boot with some initial signer set - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - None, - ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(20); + let mut initial_balances = new_signer_addresses + .iter() + .map(|addr| (addr.clone(), POX_4_DEFAULT_STACKER_BALANCE)) + .collect::>(); + + initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); let run_stamp = rand::random(); + let mut rng = rand::thread_rng(); + + let mut buf = [0u8; 2]; + rng.fill_bytes(&mut buf); + let rpc_port = u16::from_be_bytes(buf.try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let rpc_bind = format!("127.0.0.1:{}", rpc_port); // Setup the new signers that will take over let new_signer_configs = build_signer_config_tomls( &new_signer_private_keys, - &signer_test.running_nodes.conf.node.rpc_bind, + &rpc_bind, Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. &Network::Testnet, "12345", @@ -1427,64 +1450,224 @@ fn signer_set_rollover() { 3000 + num_signers, Some(100_000), None, - Some(9000), + Some(9000 + num_signers), ); - let new_spawned_signers: Vec<_> = (0..num_signers) + let new_spawned_signers: Vec<_> = (0..new_num_signers) .into_iter() .map(|i| { info!("spawning signer"); - let mut signer_config = + let signer_config = SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); SpawnedSigner::new(signer_config) }) .collect(); - // TODO: may need to modify signer_test to not auto stack and delegate the way it does right now. I think it delegates for 12 reward cycles. and we should delegate only for one before transferring to the new signer set + // Boot with some initial signer set + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + None, + |_| {}, + |naka_conf| { + for toml in new_signer_configs.clone() { + let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + info!( + "---- Adding signer endpoint to naka conf ({}) ----", + signer_config.endpoint + ); + + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("{}", signer_config.endpoint), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::BurnchainBlocks, + ], + }); + } + naka_conf.node.rpc_bind = rpc_bind.clone(); + }, + ); + assert_eq!( + new_spawned_signers[0].config.node_host, + signer_test.running_nodes.conf.node.rpc_bind + ); + // Only stack for one cycle so that the signer set changes + signer_test.num_stacking_cycles = 1_u64; - // TODO: Advance to the first reward cycle, stacking and delegating to the old signers beforehand + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + // Verify that naka_conf has our new signer's event observers + for toml in new_signer_configs.clone() { + let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + let endpoint = format!("{}", signer_config.endpoint); + assert!(signer_test + .running_nodes + .conf + .events_observers + .iter() + .any(|observer| observer.endpoint == endpoint)); + } + + // Advance to the first reward cycle, stacking to the old signers beforehand + + info!("---- Booting to epoch 3 -----"); signer_test.boot_to_epoch_3(); - // TODO: verify that the first reward cycle has the old signers in the reward set + // verify that the first reward cycle has the old signers in the reward set let reward_cycle = signer_test.get_current_reward_cycle(); - let old_signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) + let signer_test_public_keys: Vec<_> = signer_test + .signer_stacks_private_keys .iter() - .map(|id| id.0) + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) .collect(); - // TODO: manually trigger a stacks transaction and verify that only OLD signer signatures are found in the signed block - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - // submit a tx so that the miner will mine an extra block - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let start_time = Instant::now(); + info!("---- Verifying that the current signers are the old signers ----"); + let current_signers = signer_test.get_reward_set_signers(reward_cycle); + assert_eq!(current_signers.len(), num_signers as usize); + // Verify that the current signers are the same as the old signers + for signer in current_signers.iter() { + assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Mining a block to trigger the signer set -----"); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); + submit_tx(&http_origin, &transfer_tx); let mined_block = signer_test.mine_nakamoto_block(short_timeout); - // TODO: verify the mined_block signatures against the OLD signer set (might need to update event to take vector of message signatures?) + let block_sighash = mined_block.signer_signature_hash; + let signer_signatures = mined_block.signer_signature; + + // verify the mined_block signatures against the OLD signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(!new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } - //TODO: advance to the next reward cycle, stacking and delegating to the new signers beforehand + // advance to the next reward cycle, stacking to the new signers beforehand let reward_cycle = signer_test.get_current_reward_cycle(); - let new_signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - // submit a tx so that the miner will mine an extra block - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let start_time = Instant::now(); - // submit a tx so that the miner will mine an extra block + info!("---- Stacking new signers -----"); + + let burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + for stacker_sk in new_signer_private_keys.iter() { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &stacker_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 1_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(burn_block_height as u128), + clarity::vm::Value::UInt(1), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + + signer_test.mine_nakamoto_block(short_timeout); + + let next_reward_cycle = reward_cycle.saturating_add(1); + + let next_cycle_height = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_add(1); + + info!("---- Mining to next reward set calculation -----"); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height.saturating_sub(3), + new_num_signers, + ); + + // Verify that the new reward set is the new signers + let reward_set = signer_test.get_reward_set_signers(next_reward_cycle); + for signer in reward_set.iter() { + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!( + "---- Mining to the next reward cycle (block {}) -----", + next_cycle_height + ); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height, + new_num_signers, + ); + let new_reward_cycle = signer_test.get_current_reward_cycle(); + assert_eq!(new_reward_cycle, reward_cycle.saturating_add(1)); + + info!("---- Verifying that the current signers are the new signers ----"); + let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); + assert_eq!(current_signers.len(), new_num_signers as usize); + for signer in current_signers.iter() { + assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Mining a block to verify new signer set -----"); let sender_nonce = 1; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); + submit_tx(&http_origin, &transfer_tx); let mined_block = signer_test.mine_nakamoto_block(short_timeout); - // TODO: verify the mined_block signatures against the NEW signer set + + info!("---- Verifying that the new signers signed the block -----"); + let signer_signatures = mined_block.signer_signature; + + // verify the mined_block signatures against the NEW signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(!signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } signer_test.shutdown(); - // TODO: shutdown the new signers as well + for signer in new_spawned_signers { + assert!(signer.stop().is_none()); + } } diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 9a3af13081..44bbc57228 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -71,6 +71,7 @@ impl SignerTest { &self.signer_stacks_private_keys, &self.signer_stacks_private_keys, &mut self.running_nodes.btc_regtest_controller, + Some(self.num_stacking_cycles), ); let dkg_vote = self.wait_for_dkg(timeout); @@ -493,6 +494,7 @@ fn dkg() { &signer_test.signer_stacks_private_keys, &signer_test.signer_stacks_private_keys, &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), ); info!("Pox 4 activated and at epoch 3.0 reward set calculation (2nd block of its prepare phase)! Ready for signers to perform DKG and Sign!"); @@ -696,6 +698,7 @@ fn delayed_dkg() { &signer_test.signer_stacks_private_keys, &signer_test.signer_stacks_private_keys, &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), ); let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); let public_keys = signer_test.get_signer_public_keys(reward_cycle); From d64405422efa92e7f0f33627de1c0d533148fa34 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 1 Aug 2024 16:21:43 -0700 Subject: [PATCH 0615/1400] fix: revert behavior when verifying naka block in test code --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c8b98510c9..c7a08c2503 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -141,9 +141,6 @@ impl SignerTest { // Only call after already past the epoch 3.0 boundary fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); - // Get the current signers _before_ the new block - let reward_cycle = self.get_current_reward_cycle(); - let signers = self.get_reward_set_signers(reward_cycle); self.mine_nakamoto_block(timeout); @@ -161,6 +158,9 @@ impl SignerTest { // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); + let reward_cycle = self.get_current_reward_cycle(); + let signers = self.get_reward_set_signers(reward_cycle); + // Verify that the signers signed the proposed block let mut signer_index = 0; let mut signature_index = 0; From 5853580dd50381a7fd0dbec3bdf7f02e69f4efc4 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 1 Aug 2024 16:33:16 -0700 Subject: [PATCH 0616/1400] fix: issues after merge --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/mod.rs | 10 +++++----- testnet/stacks-node/src/tests/signer/v0.rs | 8 +------- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 5c9de28361..8307053fbf 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -93,6 +93,7 @@ jobs: - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 + - tests::signer::v0::signer_set_rollover - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 10b0a629a5..35db96c845 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -125,7 +125,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, wait_on_signers: Option, mut signer_config_modifier: F, - node_config_modifier: G, + mut node_config_modifier: G, btc_miner_pubkeys: &[Secp256k1PublicKey], ) -> Self { // Generate Signer Data @@ -135,7 +135,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Fri, 2 Aug 2024 11:26:46 -0400 Subject: [PATCH 0617/1400] test(wip): add test to verify deadlock in `process_next_nakamoto_block` --- .../chainstate/nakamoto/coordinator/tests.rs | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index bf1970f7f6..91a8d9f965 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -2453,3 +2453,75 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe fn test_nakamoto_coordinator_10_tenures_and_extensions_10_blocks() { simple_nakamoto_coordinator_10_extended_tenures_10_sortitions(); } + +#[test] +fn process_next_nakamoto_block_deadlock() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); + + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_addr: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + max_amount: None, + }) + .collect::>(); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let mut boot_plan = NakamotoBootPlan::new(function_name!()) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.pox_constants = pox_constants; + + info!("Creating peer"); + + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + + // Lock the sortdb + info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); + let mut sortition_db = peer.sortdb().reopen().unwrap(); + let sort_tx = sortition_db.tx_begin().unwrap(); + + // Start another thread that opens the sortdb, waits 10s, then tries to + // lock the chainstate db. This should cause a deadlock if the block + // processing is not acquiring the locks in the correct order. + info!(" ------------------------------- SPAWNING BLOCKER THREAD"); + let blocker_thread = std::thread::spawn(move || { + // Wait a bit, to ensure the tenure will have grabbed any locks it needs + std::thread::sleep(std::time::Duration::from_secs(10)); + + // Lock the chainstate db + info!(" ------------------------------- TRYING TO LOCK THE CHAINSTATE"); + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let (chainstate_tx, _) = chainstate.chainstate_tx_begin().unwrap(); + + info!(" ------------------------------- SORTDB AND CHAINSTATE LOCKED"); + info!(" ------------------------------- BLOCKER THREAD FINISHED"); + }); + + info!(" ------------------------------- MINING TENURE"); + let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + info!(" ------------------------------- TENURE MINED"); + + // Wait for the blocker thread to finish + blocker_thread.join().unwrap(); +} From 5bb6af63db403afb01a2f3749fb6ce1e4dc586fc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:26:30 -0400 Subject: [PATCH 0618/1400] fix: report _all_ tenure-start blocks for a tenure --- .../src/chainstate/nakamoto/staging_blocks.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 88e1744bb6..1d7b2a8414 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -325,6 +325,24 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { Ok(res) } + /// Get all Nakamoto blocks in a tenure that report being tenure-start blocks + /// (depending on signer behavior, there can be more than one; none are guaranteed to be + /// canonical). + /// + /// Used by the block downloader + pub fn get_nakamoto_tenure_start_blocks( + &self, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let qry = "SELECT data FROM nakamoto_staging_blocks WHERE is_tenure_start = 1 AND consensus_hash = ?1"; + let args = params![consensus_hash]; + let block_data: Vec> = query_rows(self, qry, args)?; + Ok(block_data + .into_iter() + .filter_map(|block_vec| NakamotoBlock::consensus_deserialize(&mut &block_vec[..]).ok()) + .collect()) + } + /// Find the next ready-to-process Nakamoto block, given a connection to the staging blocks DB. /// NOTE: the relevant field queried from `nakamoto_staging_blocks` are updated by a separate /// tx from block-processing, so it's imperative that the thread that calls this function is From a5f12b769e4978c0f611d862cda2750aa6ef3d8e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:27:01 -0400 Subject: [PATCH 0619/1400] chore: more debugging --- stackslib/src/net/chat.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 8d8dc7ca5c..95d6fbac82 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -764,7 +764,7 @@ impl ConversationP2P { if my_epoch <= remote_epoch { // remote node supports same epochs we do debug!( - "Remote peer has epoch {}, which is newer than our epoch {}", + "Remote peer has epoch {}, which is at least as new as our epoch {}", remote_epoch, my_epoch ); return true; @@ -2421,14 +2421,16 @@ impl ConversationP2P { Ok(num_recved) => { total_recved += num_recved; if num_recved > 0 { + debug!("{:?}: received {} bytes", self, num_recved); self.stats.last_recv_time = get_epoch_time_secs(); self.stats.bytes_rx += num_recved as u64; } else { + debug!("{:?}: received {} bytes, stopping", self, num_recved); break; } } Err(net_error::PermanentlyDrained) => { - trace!( + debug!( "{:?}: failed to recv on P2P conversation: PermanentlyDrained", self ); @@ -3022,7 +3024,7 @@ impl ConversationP2P { } } } else { - // no one was waiting for this reply, so just drop it + // message was passed to the relevant message handle debug!( "{:?}: Fulfilled pending message request (type {} seq {})", &self, _msgtype, _seq From 53ef9528ff0ef9527506f2f9d7bc6e794f1339e1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:27:12 -0400 Subject: [PATCH 0620/1400] chore: more debugging --- stackslib/src/net/connection.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 8dc5ad7794..d3a77ebc8d 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -167,8 +167,9 @@ impl NetworkReplyHandle

{ /// is destroyed in the process). pub fn try_recv(mut self) -> Result, net_error>> { if self.deadline > 0 && self.deadline < get_epoch_time_secs() { - test_debug!( - "Reply deadline {} exceeded (now = {})", + debug!( + "Reply deadline for event {} at {} exceeded (now = {})", + self.socket_event_id, self.deadline, get_epoch_time_secs() ); @@ -234,10 +235,9 @@ impl NetworkReplyHandle

{ None } else { // still have data to send, or we will send more. - test_debug!( + debug!( "Still have data to send, drop_on_success = {}, ret = {}", - drop_on_success, - ret + drop_on_success, ret ); Some(fd) } @@ -990,7 +990,7 @@ impl ConnectionInbox

{ || e.kind() == io::ErrorKind::ConnectionReset { // write endpoint is dead - test_debug!("reader was reset: {:?}", &e); + debug!("reader was reset: {:?}", &e); socket_closed = true; blocked = true; Ok(0) @@ -1004,7 +1004,7 @@ impl ConnectionInbox

{ total_read += num_read; if num_read > 0 || total_read > 0 { - trace!("read {} bytes; {} total", num_read, total_read); + debug!("read {} bytes; {} total", num_read, total_read); } if num_read > 0 { From 6d82225fce33ba1dd8d6af2bbc84a94d157467b8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:27:27 -0400 Subject: [PATCH 0621/1400] fix: more debugging, and also, don't add more entries to wanted_tenures if the sortition reward cycle is not equal to our tracked reward cycle --- .../nakamoto/download_state_machine.rs | 217 +++++++++--------- 1 file changed, 108 insertions(+), 109 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 3865e8ee39..d1510af9c1 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -164,11 +164,9 @@ impl NakamotoDownloadStateMachine { .get_block_snapshot_by_height(last_block_height.saturating_sub(1))? .ok_or(DBError::NotFoundError)?; while cursor.block_height >= first_block_height { - test_debug!( + debug!( "Load sortition {}/{} burn height {}", - &cursor.consensus_hash, - &cursor.winning_stacks_block_hash, - cursor.block_height + &cursor.consensus_hash, &cursor.winning_stacks_block_hash, cursor.block_height ); wanted_tenures.push(WantedTenure::new( cursor.consensus_hash, @@ -211,20 +209,16 @@ impl NakamotoDownloadStateMachine { .min(tip.block_height.saturating_add(1)); if highest_tenure_height > last_block_height { - test_debug!( + debug!( "Will NOT update wanted tenures for reward cycle {}: {} > {}", - cur_rc, - highest_tenure_height, - last_block_height + cur_rc, highest_tenure_height, last_block_height ); return Ok(()); } - test_debug!( + debug!( "Update reward cycle sortitions between {} and {} (rc is {})", - first_block_height, - last_block_height, - cur_rc + first_block_height, last_block_height, cur_rc ); // find all sortitions in this reward cycle @@ -276,7 +270,7 @@ impl NakamotoDownloadStateMachine { .saturating_sub(1) .min(tip.block_height.saturating_add(1)); - test_debug!( + debug!( "Load tip sortitions between {} and {} (loaded_so_far = {})", first_block_height, last_block_height, @@ -289,7 +283,7 @@ impl NakamotoDownloadStateMachine { let ih = sortdb.index_handle(&tip.sortition_id); let wanted_tenures = Self::load_wanted_tenures(&ih, first_block_height, last_block_height)?; - test_debug!( + debug!( "Loaded tip sortitions between {} and {} (loaded_so_far = {}): {:?}", first_block_height, last_block_height, @@ -315,7 +309,10 @@ impl NakamotoDownloadStateMachine { stacks_tip: &StacksBlockId, ) -> Result<(), NetError> { for wt in wanted_tenures.iter_mut() { - test_debug!("update_processed_wanted_tenures: consider {:?}", &wt); + debug!( + "update_processed_wanted_tenures: consider {:?} off of {}", + &wt, stacks_tip + ); if wt.processed { continue; } @@ -329,7 +326,7 @@ impl NakamotoDownloadStateMachine { stacks_tip, &wt.tenure_id_consensus_hash, )? { - test_debug!("Tenure {} is now processed", &wt.tenure_id_consensus_hash); + debug!("Tenure {} is now processed", &wt.tenure_id_consensus_hash); wt.processed = true; continue; } @@ -347,7 +344,7 @@ impl NakamotoDownloadStateMachine { chainstate: &StacksChainState, ) -> Result<(), NetError> { if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_mut() { - test_debug!("update_processed_wanted_tenures: update prev_tenures"); + debug!("update_processed_wanted_tenures: update prev_tenures"); Self::inner_update_processed_wanted_tenures( self.nakamoto_start_height, prev_wanted_tenures, @@ -355,7 +352,7 @@ impl NakamotoDownloadStateMachine { &self.nakamoto_tip, )?; } - test_debug!("update_processed_wanted_tenures: update wanted_tenures"); + debug!("update_processed_wanted_tenures: update wanted_tenures"); Self::inner_update_processed_wanted_tenures( self.nakamoto_start_height, &mut self.wanted_tenures, @@ -377,33 +374,19 @@ impl NakamotoDownloadStateMachine { pub(crate) fn load_tenure_start_blocks( wanted_tenures: &[WantedTenure], chainstate: &mut StacksChainState, - tip_block_id: &StacksBlockId, tenure_start_blocks: &mut HashMap, ) -> Result<(), NetError> { for wt in wanted_tenures { - let Some(tenure_start_block_header) = - NakamotoChainState::get_nakamoto_tenure_start_block_header( - &mut chainstate.index_conn(), - tip_block_id, - &wt.tenure_id_consensus_hash, - )? - else { - test_debug!("No tenure-start block for {}", &wt.tenure_id_consensus_hash); - continue; - }; - let Some((tenure_start_block, _)) = chainstate + let candidate_tenure_start_blocks = chainstate .nakamoto_blocks_db() - .get_nakamoto_block(&tenure_start_block_header.index_block_hash())? - else { - let msg = format!( - "Have header but no block for tenure-start of {} ({})", - &wt.tenure_id_consensus_hash, - &tenure_start_block_header.index_block_hash() + .get_nakamoto_tenure_start_blocks(&wt.tenure_id_consensus_hash)?; + + for candidate_tenure_start_block in candidate_tenure_start_blocks.into_iter() { + tenure_start_blocks.insert( + candidate_tenure_start_block.block_id(), + candidate_tenure_start_block, ); - error!("{}", &msg); - return Err(NetError::ChainstateError(msg)); - }; - tenure_start_blocks.insert(tenure_start_block.block_id(), tenure_start_block); + } } Ok(()) } @@ -416,7 +399,6 @@ impl NakamotoDownloadStateMachine { Self::load_tenure_start_blocks( &self.wanted_tenures, chainstate, - &self.nakamoto_tip, &mut self.tenure_start_blocks, ) } @@ -455,7 +437,7 @@ impl NakamotoDownloadStateMachine { let sort_tip = &network.burnchain_tip; let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return Err(NetError::PeerNotConnected); }; @@ -491,7 +473,7 @@ impl NakamotoDownloadStateMachine { invs.inventories.values(), ) } else { - test_debug!("No prev_wanted_tenures yet"); + debug!("No prev_wanted_tenures yet"); true }; @@ -506,7 +488,7 @@ impl NakamotoDownloadStateMachine { .chain(prev_wts.into_iter()) .chain(cur_wts.into_iter()) { - test_debug!("Consider wanted tenure: {:?}", &wt); + debug!("Consider wanted tenure: {:?}", &wt); let wt_rc = sortdb .pox_constants .block_height_to_reward_cycle(sortdb.first_block_height, wt.burn_height) @@ -516,15 +498,16 @@ impl NakamotoDownloadStateMachine { } else if wt_rc == sort_rc { cur_wanted_tenures.push(wt); } else { - test_debug!("Drop wanted tenure: {:?}", &wt); + debug!("Drop wanted tenure: {:?}", &wt); } } prev_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); cur_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); - test_debug!("prev_wanted_tenures is now {:?}", &prev_wanted_tenures); - test_debug!("wanted_tenures is now {:?}", &cur_wanted_tenures); + debug!("prev_wanted_tenures is now {:?}", &prev_wanted_tenures); + debug!("wanted_tenures is now {:?}", &cur_wanted_tenures); + debug!("set self.reward_cycle = {}", sort_rc); self.prev_wanted_tenures = if prev_wanted_tenures.is_empty() { None @@ -534,13 +517,13 @@ impl NakamotoDownloadStateMachine { self.wanted_tenures = cur_wanted_tenures; self.reward_cycle = sort_rc; } else { - test_debug!( + debug!( "Append {} wanted tenures: {:?}", new_wanted_tenures.len(), &new_wanted_tenures ); self.wanted_tenures.append(&mut new_wanted_tenures); - test_debug!("wanted_tenures is now {:?}", &self.wanted_tenures); + debug!("wanted_tenures is now {:?}", &self.wanted_tenures); } Ok(()) @@ -559,7 +542,7 @@ impl NakamotoDownloadStateMachine { let reorg = PeerNetwork::is_reorg(self.last_sort_tip.as_ref(), sort_tip, sortdb); if reorg { // force a reload - test_debug!("Detected reorg! Refreshing wanted tenures"); + debug!("Detected reorg! Refreshing wanted tenures"); self.prev_wanted_tenures = None; self.wanted_tenures.clear(); } @@ -587,10 +570,9 @@ impl NakamotoDownloadStateMachine { &mut prev_wanted_tenures, )?; - test_debug!( + debug!( "initial prev_wanted_tenures (rc {}): {:?}", - sort_rc, - &prev_wanted_tenures + sort_rc, &prev_wanted_tenures ); self.prev_wanted_tenures = Some(prev_wanted_tenures); } @@ -609,10 +591,9 @@ impl NakamotoDownloadStateMachine { &mut wanted_tenures, )?; - test_debug!( + debug!( "initial wanted_tenures (rc {}): {:?}", - sort_rc, - &wanted_tenures + sort_rc, &wanted_tenures ); self.wanted_tenures = wanted_tenures; self.reward_cycle = sort_rc; @@ -633,6 +614,7 @@ impl NakamotoDownloadStateMachine { inventory_iter: impl Iterator, ) -> bool { if prev_wanted_tenures.is_empty() { + debug!("prev_wanted_tenures is empty, so we have unprocessed tenures"); return true; } @@ -641,19 +623,29 @@ impl NakamotoDownloadStateMachine { // inventory messages for the reward cycle after `prev_wanted_rc`, then the former will be // true let prev_wanted_rc = prev_wanted_tenures - .first() + .last() .map(|wt| { pox_constants - .block_height_to_reward_cycle(first_burn_height, wt.burn_height) + .block_height_to_reward_cycle( + first_burn_height, + wt.burn_height.saturating_sub(1), + ) .expect("FATAL: wanted tenure before system start") }) .unwrap_or(u64::MAX); let cur_wanted_rc = prev_wanted_rc.saturating_add(1); + debug!( + "have_unprocessed_tenures: prev_wanted_rc = {}, cur_wanted_rc = {}", + prev_wanted_rc, cur_wanted_rc + ); + let mut has_prev_inv = false; let mut has_cur_inv = false; + let mut num_invs = 0; for inv in inventory_iter { + num_invs += 1; if prev_wanted_rc < first_nakamoto_rc { // assume the epoch 2.x inventory has this has_prev_inv = true; @@ -670,7 +662,7 @@ impl NakamotoDownloadStateMachine { } if !has_prev_inv || !has_cur_inv { - debug!("No peer has an inventory for either the previous ({}: available = {}) or current ({}: available = {}) wanted tenures", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv); + debug!("No peer has an inventory for either the previous ({}: available = {}) or current ({}: available = {}) wanted tenures. Total inventories: {}", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv, num_invs); return true; } @@ -682,16 +674,26 @@ impl NakamotoDownloadStateMachine { let mut available_considered = 0; for (_naddr, available) in tenure_block_ids.iter() { available_considered += available.len(); + debug!("Consider available tenures from {}", _naddr); for (_ch, tenure_info) in available.iter() { + debug!("Consider tenure info for {}: {:?}", _ch, tenure_info); if tenure_info.start_reward_cycle == prev_wanted_rc || tenure_info.end_reward_cycle == prev_wanted_rc { has_prev_rc_block = true; + debug!( + "Consider tenure info for {}: have a tenure in prev reward cycle {}", + _ch, prev_wanted_rc + ); } if tenure_info.start_reward_cycle == cur_wanted_rc || tenure_info.end_reward_cycle == cur_wanted_rc { has_cur_rc_block = true; + debug!( + "Consider tenure info for {}: have a tenure in cur reward cycle {}", + _ch, cur_wanted_rc + ); } } } @@ -720,14 +722,13 @@ impl NakamotoDownloadStateMachine { // this check is necessary because the check for .processed requires that a // child tenure block has been processed, which isn't guaranteed at a reward // cycle boundary - test_debug!("Tenure {:?} has been fully downloaded", &tenure_info); + debug!("Tenure {:?} has been fully downloaded", &tenure_info); continue; } if !tenure_info.processed { - test_debug!( + debug!( "Tenure {:?} is available from {} but not processed", - &tenure_info, - &_naddr + &tenure_info, &_naddr ); ret = true; } @@ -764,7 +765,7 @@ impl NakamotoDownloadStateMachine { let sort_tip = &network.burnchain_tip; let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return Err(NetError::PeerNotConnected); }; @@ -791,7 +792,7 @@ impl NakamotoDownloadStateMachine { .expect("FATAL: burnchain tip is before system start") }; - test_debug!( + debug!( "last_sort_height = {}, sort_rc = {}, next_sort_rc = {}, self.reward_cycle = {}, sort_tip.block_height = {}", last_sort_height, sort_rc, @@ -800,9 +801,9 @@ impl NakamotoDownloadStateMachine { sort_tip.block_height, ); - if sort_rc == next_sort_rc { - // not at a reward cycle boundary, os just extend self.wanted_tenures - test_debug!("Extend wanted tenures since no sort_rc change and we have tenure data"); + if self.reward_cycle == sort_rc { + // not at a reward cycle boundary, so just extend self.wanted_tenures + debug!("Extend wanted tenures since no sort_rc change and we have tenure data"); self.extend_wanted_tenures(network, sortdb)?; self.update_tenure_start_blocks(chainstate)?; return Ok(()); @@ -826,7 +827,7 @@ impl NakamotoDownloadStateMachine { invs.inventories.values(), ) } else { - test_debug!("No prev_wanted_tenures yet"); + debug!("No prev_wanted_tenures yet"); true }; if !can_advance_wanted_tenures { @@ -850,8 +851,8 @@ impl NakamotoDownloadStateMachine { &mut new_prev_wanted_tenures, )?; - test_debug!("new_wanted_tenures is now {:?}", &new_wanted_tenures); - test_debug!( + debug!("new_wanted_tenures is now {:?}", &new_wanted_tenures); + debug!( "new_prev_wanted_tenures is now {:?}", &new_prev_wanted_tenures ); @@ -889,7 +890,7 @@ impl NakamotoDownloadStateMachine { "Peer {} has no inventory for reward cycle {}", naddr, reward_cycle ); - test_debug!("Peer {} has the following inventory data: {:?}", naddr, inv); + debug!("Peer {} has the following inventory data: {:?}", naddr, inv); continue; }; for (i, wt) in wanted_tenures.iter().enumerate() { @@ -905,12 +906,9 @@ impl NakamotoDownloadStateMachine { let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); if !rc_inv.get(bit).unwrap_or(false) { // this neighbor does not have this tenure - test_debug!( + debug!( "Peer {} does not have sortition #{} in reward cycle {} (wt {:?})", - naddr, - bit, - reward_cycle, - &wt + naddr, bit, reward_cycle, &wt ); continue; } @@ -1045,7 +1043,7 @@ impl NakamotoDownloadStateMachine { } if Self::count_available_tenure_neighbors(&self.available_tenures) > 0 { // still have requests to try, so don't bother computing a new set of available tenures - test_debug!("Still have requests to try"); + debug!("Still have requests to try"); return; } if self.wanted_tenures.is_empty() { @@ -1054,7 +1052,7 @@ impl NakamotoDownloadStateMachine { } if inventories.is_empty() { // nothing to do - test_debug!("No inventories available"); + debug!("No inventories available"); return; } @@ -1064,7 +1062,7 @@ impl NakamotoDownloadStateMachine { .prev_wanted_tenures .as_ref() .map(|prev_wanted_tenures| { - test_debug!( + debug!( "Load availability for prev_wanted_tenures ({}) at rc {}", prev_wanted_tenures.len(), self.reward_cycle.saturating_sub(1) @@ -1089,7 +1087,7 @@ impl NakamotoDownloadStateMachine { .as_ref() .map(|prev_wanted_tenures| { // have both self.prev_wanted_tenures and self.wanted_tenures - test_debug!("Load tenure block IDs for prev_wanted_tenures ({}) and wanted_tenures ({}) at rc {}", prev_wanted_tenures.len(), self.wanted_tenures.len(), self.reward_cycle.saturating_sub(1)); + debug!("Load tenure block IDs for prev_wanted_tenures ({}) and wanted_tenures ({}) at rc {}", prev_wanted_tenures.len(), self.wanted_tenures.len(), self.reward_cycle.saturating_sub(1)); Self::find_tenure_block_ids( self.reward_cycle.saturating_sub(1), prev_wanted_tenures, @@ -1102,7 +1100,7 @@ impl NakamotoDownloadStateMachine { .unwrap_or(HashMap::new()); let mut tenure_block_ids = { - test_debug!( + debug!( "Load tenure block IDs for wanted_tenures ({}) at rc {}", self.wanted_tenures.len(), self.reward_cycle @@ -1171,9 +1169,9 @@ impl NakamotoDownloadStateMachine { prev_schedule }; - test_debug!("new schedule: {:?}", schedule); - test_debug!("new available: {:?}", &available); - test_debug!("new tenure_block_ids: {:?}", &tenure_block_ids); + debug!("new schedule: {:?}", schedule); + debug!("new available: {:?}", &available); + debug!("new tenure_block_ids: {:?}", &tenure_block_ids); self.tenure_download_schedule = schedule; self.tenure_block_ids = tenure_block_ids; @@ -1217,21 +1215,20 @@ impl NakamotoDownloadStateMachine { inventory_iter: impl Iterator, ) -> bool { if sort_tip.block_height < burnchain_height { - test_debug!( + debug!( "sort_tip {} < burn tip {}", - sort_tip.block_height, - burnchain_height + sort_tip.block_height, burnchain_height ); return false; } if wanted_tenures.is_empty() { - test_debug!("No wanted tenures"); + debug!("No wanted tenures"); return false; } if prev_wanted_tenures.is_empty() { - test_debug!("No prev wanted tenures"); + debug!("No prev wanted tenures"); return false; } @@ -1247,7 +1244,7 @@ impl NakamotoDownloadStateMachine { first_burn_height, inventory_iter, ) { - test_debug!("Still have unprocessed tenures, so we don't need unconfirmed tenures"); + debug!("Still have unprocessed tenures, so we don't need unconfirmed tenures"); return false; } @@ -1263,7 +1260,7 @@ impl NakamotoDownloadStateMachine { if is_available && !wt.processed { // a tenure is available but not yet processed, so we can't yet transition to // fetching unconfirmed tenures (we'd have no way to validate them). - test_debug!( + debug!( "Tenure {} is available but not yet processed", &wt.tenure_id_consensus_hash ); @@ -1331,7 +1328,7 @@ impl NakamotoDownloadStateMachine { highest_processed_block_id.clone(), ); - test_debug!("Request unconfirmed tenure state from neighbor {}", &naddr); + debug!("Request unconfirmed tenure state from neighbor {}", &naddr); downloaders.insert(naddr.clone(), unconfirmed_tenure_download); added += 1; false @@ -1390,7 +1387,7 @@ impl NakamotoDownloadStateMachine { HashMap>, HashMap, ) { - test_debug!("Run unconfirmed tenure downloaders"); + debug!("Run unconfirmed tenure downloaders"); let addrs: Vec<_> = downloaders.keys().map(|addr| addr.clone()).collect(); let mut finished = vec![]; @@ -1419,14 +1416,20 @@ impl NakamotoDownloadStateMachine { // send requests for (naddr, downloader) in downloaders.iter_mut() { if downloader.is_done() { + debug!( + "Downloader for {:?} is done (finished {})", + &downloader.unconfirmed_tenure_id(), + naddr + ); finished.push(naddr.clone()); continue; } if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", naddr); continue; } - test_debug!( + debug!( "Send request to {} for tenure {:?} (state {})", &naddr, &downloader.unconfirmed_tenure_id(), @@ -1455,11 +1458,11 @@ impl NakamotoDownloadStateMachine { // handle responses for (naddr, response) in neighbor_rpc.collect_replies(network) { let Some(downloader) = downloaders.get_mut(&naddr) else { - test_debug!("Got rogue response from {}", &naddr); + debug!("Got rogue response from {}", &naddr); continue; }; - test_debug!("Got response from {}", &naddr); + debug!("Got response from {}", &naddr); let blocks_opt = match downloader.handle_next_download_response( response, sortdb, @@ -1501,7 +1504,7 @@ impl NakamotoDownloadStateMachine { // don't start this unless the downloader is actually done (this should always be // the case, but don't tempt fate with an assert!) if downloader.is_done() { - test_debug!( + debug!( "Will fetch the highest complete tenure from {:?}", &downloader.unconfirmed_tenure_id() ); @@ -1510,9 +1513,7 @@ impl NakamotoDownloadStateMachine { } } } else { - test_debug!( - "Will not make highest-complete tenure downloader (not a Nakamoto tenure)" - ); + debug!("Will not make highest-complete tenure downloader (not a Nakamoto tenure)"); } unconfirmed_blocks.insert(naddr.clone(), blocks); @@ -1676,14 +1677,12 @@ impl NakamotoDownloadStateMachine { debug!("NakamotoDownloadStateMachine in state {}", &self.state); let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return HashMap::new(); }; - test_debug!( + debug!( "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", - burnchain_height, - network.burnchain_tip.block_height, - &self.state + burnchain_height, network.burnchain_tip.block_height, &self.state ); self.update_available_tenures( &invs.inventories, @@ -1704,7 +1703,7 @@ impl NakamotoDownloadStateMachine { // no longer mutably borrowed. let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return HashMap::new(); }; @@ -1759,7 +1758,7 @@ impl NakamotoDownloadStateMachine { // no longer mutably borrowed. let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - test_debug!("No network inventories"); + debug!("No network inventories"); return HashMap::new(); }; @@ -1824,7 +1823,7 @@ impl NakamotoDownloadStateMachine { ibd: bool, ) -> Result>, NetError> { self.nakamoto_tip = network.stacks_tip.block_id(); - test_debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); + debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); self.update_wanted_tenures(&network, sortdb, chainstate)?; self.update_processed_tenures(chainstate)?; let new_blocks = self.run_downloads(burnchain_height, network, sortdb, chainstate, ibd); From b2cb072658544d93dd71516f6c3390a27e20bac0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:28:25 -0400 Subject: [PATCH 0622/1400] chore: more debugging --- stackslib/src/net/download/nakamoto/tenure.rs | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 5e2e06c41a..21d06d1b2c 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -172,11 +172,12 @@ impl TenureStartEnd { let mut tenure_block_ids = AvailableTenures::new(); let mut last_tenure = 0; let mut last_tenure_ch = None; + debug!("Find available tenures in inventory {:?} rc {}", invs, rc); for (i, wt) in wanted_tenures.iter().enumerate() { // advance to next tenure-start sortition let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); if !invbits.get(bit).unwrap_or(false) { - test_debug!("i={} bit not set", i); + debug!("i={} bit not set", i); continue; } @@ -187,12 +188,12 @@ impl TenureStartEnd { let bit = u16::try_from(*j).expect("FATAL: more sortitions than u16::MAX"); invbits.get(bit).unwrap_or(false) }) else { - test_debug!("i={} out of wanted_tenures", i); + debug!("i={} out of wanted_tenures", i); break; }; let Some(wt_start) = wanted_tenures.get(wt_start_idx) else { - test_debug!("i={} no start wanted tenure", i); + debug!("i={} no start wanted tenure", i); break; }; @@ -200,12 +201,12 @@ impl TenureStartEnd { let bit = u16::try_from(*j).expect("FATAL: more sortitions than u16::MAX"); invbits.get(bit).unwrap_or(false) }) else { - test_debug!("i={} out of wanted_tenures", i); + debug!("i={} out of wanted_tenures", i); break; }; let Some(wt_end) = wanted_tenures.get(wt_end_index) else { - test_debug!("i={} no end wanted tenure", i); + debug!("i={} no end wanted tenure", i); break; }; @@ -217,7 +218,7 @@ impl TenureStartEnd { rc, wt.processed, ); - test_debug!( + debug!( "i={}, len={}; {:?}", i, wanted_tenures.len(), @@ -229,7 +230,7 @@ impl TenureStartEnd { let Some(next_wanted_tenures) = next_wanted_tenures else { // nothing more to do - test_debug!("No next_wanted_tenures"); + debug!("No next_wanted_tenures"); return Some(tenure_block_ids); }; @@ -237,10 +238,9 @@ impl TenureStartEnd { // the last tenure derived from it if let Some(last_tenure_ch) = last_tenure_ch.take() { if let Some(last_tenure) = tenure_block_ids.get_mut(&last_tenure_ch) { - test_debug!( + debug!( "Will directly fetch end-block {} for tenure {}", - &last_tenure.end_block_id, - &last_tenure.tenure_id_consensus_hash + &last_tenure.end_block_id, &last_tenure.tenure_id_consensus_hash ); last_tenure.fetch_end_block = true; } @@ -248,7 +248,7 @@ impl TenureStartEnd { let Some(next_invbits) = invs.tenures_inv.get(&rc.saturating_add(1)) else { // nothing more to do - test_debug!("no inventory for cycle {}", rc.saturating_add(1)); + debug!("no inventory for cycle {}", rc.saturating_add(1)); return Some(tenure_block_ids); }; @@ -256,7 +256,7 @@ impl TenureStartEnd { let iter_start = last_tenure; let iterator = wanted_tenures.get(iter_start..).unwrap_or(&[]); for (i, wt) in iterator.iter().enumerate() { - test_debug!( + debug!( "consider next wanted tenure which starts with i={} {:?}", iter_start + i, &wt @@ -265,7 +265,7 @@ impl TenureStartEnd { // advance to next tenure-start sortition let bit = u16::try_from(i + iter_start).expect("FATAL: more sortitions than u16::MAX"); if !invbits.get(bit).unwrap_or(false) { - test_debug!("i={} bit not set", i); + debug!("i={} bit not set", i); continue; } @@ -295,7 +295,7 @@ impl TenureStartEnd { }) }) else { - test_debug!( + debug!( "i={} out of wanted_tenures and next_wanted_tenures", iter_start + i ); @@ -314,7 +314,7 @@ impl TenureStartEnd { None } }) else { - test_debug!("i={} out of next_wanted_tenures", iter_start + i); + debug!("i={} out of next_wanted_tenures", iter_start + i); break; }; @@ -330,7 +330,7 @@ impl TenureStartEnd { ); tenure_start_end.fetch_end_block = true; - test_debug!( + debug!( "i={},len={},next_len={}; {:?}", iter_start + i, wanted_tenures.len(), From 3b2b9f90351fed7e341a451dd537dcc018140761 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:28:42 -0400 Subject: [PATCH 0623/1400] chore: more debugging, and also, don't set the downloader to idle unless it processes the message completely --- .../download/nakamoto/tenure_downloader.rs | 59 +++++++++---------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index c6e5ee0703..7197adf0b2 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -181,12 +181,9 @@ impl NakamotoTenureDownloader { start_signer_keys: RewardSet, end_signer_keys: RewardSet, ) -> Self { - test_debug!( + debug!( "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, - &tenure_id_consensus_hash, - &tenure_start_block_id, - &tenure_end_block_id, + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, ); Self { tenure_id_consensus_hash, @@ -270,7 +267,7 @@ impl NakamotoTenureDownloader { self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); } else if let Some(tenure_end_block) = self.tenure_end_block.take() { // we already have the tenure-end block, so immediately proceed to accept it. - test_debug!( + debug!( "Preemptively process tenure-end block {} for tenure {}", tenure_end_block.block_id(), &self.tenure_id_consensus_hash @@ -312,10 +309,9 @@ impl NakamotoTenureDownloader { else { return Err(NetError::InvalidState); }; - test_debug!( + debug!( "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, - &end_block_id + &self.naddr, &end_block_id ); self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); Ok(()) @@ -327,10 +323,9 @@ impl NakamotoTenureDownloader { self.state { if wait_deadline < Instant::now() { - test_debug!( + debug!( "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, - &end_block_id + &self.naddr, &end_block_id ); self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); } @@ -530,11 +525,9 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidState); }; - test_debug!( + debug!( "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, - &block_cursor, - count + &self.tenure_id_consensus_hash, &block_cursor, count ); if earliest_block.block_id() != tenure_start_block.block_id() { // still have more blocks to download @@ -572,24 +565,23 @@ impl NakamotoTenureDownloader { ) -> Result, ()> { let request = match self.state { NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - test_debug!("Request tenure-start block {}", &start_block_id); + debug!("Request tenure-start block {}", &start_block_id); StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) } NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { // we're waiting for some other downloader's block-fetch to complete - test_debug!( + debug!( "Waiting for tenure-end block {} until {:?}", - &_block_id, - _deadline + &_block_id, _deadline ); return Ok(None); } NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - test_debug!("Request tenure-end block {}", &end_block_id); + debug!("Request tenure-end block {}", &end_block_id); StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) } NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - test_debug!("Downloading tenure ending at {}", &end_block_id); + debug!("Downloading tenure ending at {}", &end_block_id); StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) } NakamotoTenureDownloadState::Done => { @@ -613,7 +605,7 @@ impl NakamotoTenureDownloader { neighbor_rpc: &mut NeighborRPC, ) -> Result { if neighbor_rpc.has_inflight(&self.naddr) { - test_debug!("Peer {} has an inflight request", &self.naddr); + debug!("Peer {} has an inflight request", &self.naddr); return Ok(true); } if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { @@ -651,10 +643,9 @@ impl NakamotoTenureDownloader { &mut self, response: StacksHttpResponse, ) -> Result>, NetError> { - self.idle = true; match self.state { NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - test_debug!( + debug!( "Got download response for tenure-start block {}", &_block_id ); @@ -663,23 +654,26 @@ impl NakamotoTenureDownloader { e })?; self.try_accept_tenure_start_block(block)?; + self.idle = true; Ok(None) } NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - test_debug!("Invalid state -- Got download response for WaitForTenureBlock"); + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + self.idle = true; Err(NetError::InvalidState) } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - test_debug!("Got download response to tenure-end block {}", &_block_id); + debug!("Got download response to tenure-end block {}", &_block_id); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); e })?; self.try_accept_tenure_end_block(&block)?; + self.idle = true; Ok(None) } NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - test_debug!( + debug!( "Got download response for tenure blocks ending at {}", &_end_block_id ); @@ -687,9 +681,14 @@ impl NakamotoTenureDownloader { warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); e })?; - self.try_accept_tenure_blocks(blocks) + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + self.idle = true; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => { + self.idle = true; + Err(NetError::InvalidState) } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), } } From 1c9a7050587c1453576a400b9e18ec48e4d3d1cb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:29:09 -0400 Subject: [PATCH 0624/1400] chore: more debugging --- .../nakamoto/tenure_downloader_set.rs | 95 +++++++++---------- 1 file changed, 44 insertions(+), 51 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 8a154637cf..337c8d1cd6 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -98,10 +98,9 @@ impl NakamotoTenureDownloaderSet { /// Assign the given peer to the given downloader state machine. Allocate a slot for it if /// needed. fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - test_debug!( + debug!( "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, - &naddr + &downloader.tenure_id_consensus_hash, &naddr ); if let Some(idx) = self.peers.get(&naddr) { self.downloaders[*idx] = Some(downloader); @@ -155,7 +154,7 @@ impl NakamotoTenureDownloaderSet { ) { for (naddr, downloader) in iter { if self.has_downloader(&naddr) { - test_debug!("Already have downloader for {}", &naddr); + debug!("Already have downloader for {}", &naddr); continue; } self.add_downloader(naddr, downloader); @@ -202,7 +201,7 @@ impl NakamotoTenureDownloaderSet { if downloader.is_done() { continue; } - test_debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); return false; } true @@ -220,10 +219,9 @@ impl NakamotoTenureDownloaderSet { return false; }; - test_debug!( + debug!( "Peer {} already bound to downloader for {}", - &naddr, - &_downloader.tenure_id_consensus_hash + &naddr, &_downloader.tenure_id_consensus_hash ); return true; } @@ -240,11 +238,9 @@ impl NakamotoTenureDownloaderSet { if downloader.naddr != naddr { continue; } - test_debug!( + debug!( "Assign peer {} to work on downloader for {} in state {}", - &naddr, - &downloader.tenure_id_consensus_hash, - &downloader.state + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state ); self.peers.insert(naddr, i); return true; @@ -263,15 +259,14 @@ impl NakamotoTenureDownloaderSet { continue; }; let Some(downloader) = downloader_opt else { - test_debug!("Remove peer {} for null download {}", &naddr, i); + debug!("Remove peer {} for null download {}", &naddr, i); idled.push(naddr.clone()); continue; }; if downloader.idle || downloader.is_waiting() { - test_debug!( + debug!( "Remove idled peer {} for tenure download {}", - &naddr, - &downloader.tenure_id_consensus_hash + &naddr, &downloader.tenure_id_consensus_hash ); idled.push(naddr.clone()); } @@ -317,7 +312,7 @@ impl NakamotoTenureDownloaderSet { &mut self, tenure_start_blocks: &HashMap, ) -> Vec { - test_debug!( + debug!( "handle tenure-end blocks: {:?}", &tenure_start_blocks.keys().collect::>() ); @@ -354,7 +349,7 @@ impl NakamotoTenureDownloaderSet { continue; }; if &downloader.tenure_id_consensus_hash == tenure_id { - test_debug!( + debug!( "Have downloader for tenure {} already (idle={}, waiting={}, state={})", tenure_id, downloader.idle, @@ -407,7 +402,7 @@ impl NakamotoTenureDownloaderSet { if !last_available_tenures.contains(&downloader.tenure_end_block_id) { continue; } - test_debug!( + debug!( "Transition downloader for {} from waiting to fetching", &downloader.tenure_id_consensus_hash ); @@ -431,11 +426,11 @@ impl NakamotoTenureDownloaderSet { count: usize, current_reward_cycles: &BTreeMap, ) { - test_debug!("schedule: {:?}", schedule); - test_debug!("available: {:?}", &available); - test_debug!("tenure_block_ids: {:?}", &tenure_block_ids); - test_debug!("inflight: {}", self.inflight()); - test_debug!( + debug!("schedule: {:?}", schedule); + debug!("available: {:?}", &available); + debug!("tenure_block_ids: {:?}", &tenure_block_ids); + debug!("inflight: {}", self.inflight()); + debug!( "count: {}, running: {}, scheduled: {}", count, self.num_downloaders(), @@ -450,24 +445,24 @@ impl NakamotoTenureDownloaderSet { break; }; if self.completed_tenures.contains(&ch) { - test_debug!("Already successfully downloaded tenure {}", &ch); + debug!("Already successfully downloaded tenure {}", &ch); schedule.pop_front(); continue; } let Some(neighbors) = available.get_mut(ch) else { // not found on any neighbors, so stop trying this tenure - test_debug!("No neighbors have tenure {}", ch); + debug!("No neighbors have tenure {}", ch); schedule.pop_front(); continue; }; if neighbors.is_empty() { // no more neighbors to try - test_debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {}", ch); schedule.pop_front(); continue; } let Some(naddr) = neighbors.pop() else { - test_debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {}", ch); schedule.pop_front(); continue; }; @@ -481,20 +476,20 @@ impl NakamotoTenureDownloaderSet { let Some(available_tenures) = tenure_block_ids.get(&naddr) else { // this peer doesn't have any known tenures, so try the others - test_debug!("No tenures available from {}", &naddr); + debug!("No tenures available from {}", &naddr); continue; }; let Some(tenure_info) = available_tenures.get(ch) else { // this peer does not have a tenure start/end block for this tenure, so try the // others. - test_debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); continue; }; let Some(Some(start_reward_set)) = current_reward_cycles .get(&tenure_info.start_reward_cycle) .map(|cycle_info| cycle_info.reward_set()) else { - test_debug!( + debug!( "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", tenure_info.start_reward_cycle, &tenure_info @@ -506,7 +501,7 @@ impl NakamotoTenureDownloaderSet { .get(&tenure_info.end_reward_cycle) .map(|cycle_info| cycle_info.reward_set()) else { - test_debug!( + debug!( "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", tenure_info.end_reward_cycle, &tenure_info @@ -515,7 +510,7 @@ impl NakamotoTenureDownloaderSet { continue; }; - test_debug!( + debug!( "Download tenure {} (start={}, end={}) (rc {},{})", &ch, &tenure_info.start_block_id, @@ -532,7 +527,7 @@ impl NakamotoTenureDownloaderSet { end_reward_set.clone(), ); - test_debug!("Request tenure {} from neighbor {}", ch, &naddr); + debug!("Request tenure {} from neighbor {}", ch, &naddr); self.add_downloader(naddr, tenure_download); schedule.pop_front(); } @@ -561,27 +556,25 @@ impl NakamotoTenureDownloaderSet { // send requests for (naddr, index) in self.peers.iter() { if neighbor_rpc.has_inflight(&naddr) { - test_debug!("Peer {} has an inflight request", &naddr); + debug!("Peer {} has an inflight request", &naddr); continue; } let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - test_debug!("No downloader for {}", &naddr); + debug!("No downloader for {}", &naddr); continue; }; if downloader.is_done() { - test_debug!("Downloader for {} is done", &naddr); + debug!("Downloader for {} is done", &naddr); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; } - test_debug!( + debug!( "Send request to {} for tenure {} (state {})", - &naddr, - &downloader.tenure_id_consensus_hash, - &downloader.state + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state ); let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - test_debug!("Downloader for {} failed; this peer is dead", &naddr); + debug!("Downloader for {} failed; this peer is dead", &naddr); neighbor_rpc.add_dead(network, naddr); continue; }; @@ -595,12 +588,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - test_debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {}", &naddr); self.clear_downloader(&naddr); } } for done_naddr in finished.drain(..) { - test_debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {}", &done_naddr); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { @@ -610,14 +603,14 @@ impl NakamotoTenureDownloaderSet { // handle responses for (naddr, response) in neighbor_rpc.collect_replies(network) { let Some(index) = self.peers.get(&naddr) else { - test_debug!("No downloader for {}", &naddr); + debug!("No downloader for {}", &naddr); continue; }; let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - test_debug!("No downloader for {}", &naddr); + debug!("No downloader for {}", &naddr); continue; }; - test_debug!("Got response from {}", &naddr); + debug!("Got response from {}", &naddr); let Ok(blocks_opt) = downloader .handle_next_download_response(response) @@ -626,7 +619,7 @@ impl NakamotoTenureDownloaderSet { e }) else { - test_debug!("Failed to handle download response from {}", &naddr); + debug!("Failed to handle download response from {}", &naddr); neighbor_rpc.add_dead(network, &naddr); continue; }; @@ -635,7 +628,7 @@ impl NakamotoTenureDownloaderSet { continue; }; - test_debug!( + debug!( "Got {} blocks for tenure {}", blocks.len(), &downloader.tenure_id_consensus_hash @@ -651,12 +644,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - test_debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {}", &naddr); self.clear_downloader(naddr); } } for done_naddr in finished.drain(..) { - test_debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {}", &done_naddr); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { From bd918aab999445633587b7bdc4e0eaf33ef73b74 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:29:21 -0400 Subject: [PATCH 0625/1400] fix: a peer isn't broken if it sends us data when we expected a handshake first --- stackslib/src/net/inv/epoch2x.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index b3092d8f12..fc5f073b2e 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -609,7 +609,9 @@ impl NeighborBlockStats { let mut broken = false; let mut stale = false; - if nack_data.error_code == NackErrorCodes::Throttled { + if nack_data.error_code == NackErrorCodes::Throttled + || nack_data.error_code == NackErrorCodes::HandshakeRequired + { // TODO: do something smarter here, like just back off return NodeStatus::Dead; } else if nack_data.error_code == NackErrorCodes::NoSuchBurnchainBlock { @@ -2125,6 +2127,7 @@ impl PeerNetwork { break; } + debug!("Inv sync state is {:?}", &stats.state); let again = match stats.state { InvWorkState::GetPoxInvBegin => self .inv_getpoxinv_begin(sortdb, nk, stats, request_timeout) From 6f7db31987ea43cdc0718aa527bd4df046bbc521 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:29:55 -0400 Subject: [PATCH 0626/1400] fix: try to fetch the reward cycle _after_ our highest one, so the downloader has it on a reward cycle boundary. Don't treat it as an error if the node doesn't have it yet (which it won't, most of the time, except on reward cycle boundaries) --- stackslib/src/net/inv/nakamoto.rs | 63 ++++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 491d0bcaca..5b09ace396 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -30,8 +30,8 @@ use crate::net::db::PeerDB; use crate::net::neighbors::comms::PeerNetworkComms; use crate::net::p2p::PeerNetwork; use crate::net::{ - Error as NetError, GetNakamotoInvData, NakamotoInvData, NeighborAddress, NeighborComms, - NeighborKey, StacksMessage, StacksMessageType, + Error as NetError, GetNakamotoInvData, NackErrorCodes, NakamotoInvData, NeighborAddress, + NeighborComms, NeighborKey, StacksMessage, StacksMessageType, }; use crate::util_lib::db::Error as DBError; @@ -86,14 +86,14 @@ impl InvTenureInfo { tenure_id_consensus_hash, )? .map(|tenure| { - test_debug!("BlockFound tenure for {}", &tenure_id_consensus_hash); + debug!("BlockFound tenure for {}", &tenure_id_consensus_hash); Self { tenure_id_consensus_hash: tenure.tenure_id_consensus_hash, parent_tenure_id_consensus_hash: tenure.prev_tenure_id_consensus_hash, } }) .or_else(|| { - test_debug!("No BlockFound tenure for {}", &tenure_id_consensus_hash); + debug!("No BlockFound tenure for {}", &tenure_id_consensus_hash); None })) } @@ -224,12 +224,16 @@ impl InvGenerator { }; let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash.clone(); - test_debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, cur_sortition_info = {:?}", cur_height, &cur_consensus_hash, &cur_tenure_opt, cur_sortition_info); + debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, cur_sortition_info = {:?}", cur_height, &cur_consensus_hash, &cur_tenure_opt, cur_sortition_info); if let Some(cur_tenure_info) = cur_tenure_opt.as_ref() { // a tenure was active when this sortition happened... if cur_tenure_info.tenure_id_consensus_hash == cur_consensus_hash { // ...and this tenure started in this sortition + debug!( + "Tenure was started for {} (height {})", + cur_consensus_hash, cur_height + ); tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -238,11 +242,19 @@ impl InvGenerator { )?; } else { // ...but this tenure did not start in this sortition + debug!( + "Tenure was NOT started for {} (bit {})", + cur_consensus_hash, cur_height + ); tenure_status.push(false); } } else { // no active tenure during this sortition. Check the parent sortition to see if a // tenure begain there. + debug!( + "No winning sortition for {} (bit {})", + cur_consensus_hash, cur_height + ); tenure_status.push(false); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -260,6 +272,10 @@ impl InvGenerator { } tenure_status.reverse(); + debug!( + "Tenure bits off of {} and {}: {:?}", + nakamoto_tip, &tip.consensus_hash, &tenure_status + ); Ok(tenure_status) } } @@ -370,7 +386,7 @@ impl NakamotoTenureInv { /// Adjust the next reward cycle to query. /// Returns the reward cycle to query. pub fn next_reward_cycle(&mut self) -> u64 { - test_debug!("Next reward cycle: {}", self.cur_reward_cycle + 1); + debug!("Next reward cycle: {}", self.cur_reward_cycle + 1); let query_rc = self.cur_reward_cycle; self.cur_reward_cycle = self.cur_reward_cycle.saturating_add(1); query_rc @@ -383,7 +399,7 @@ impl NakamotoTenureInv { if self.start_sync_time + inv_sync_interval <= now && (self.cur_reward_cycle >= cur_rc || !self.online) { - test_debug!("Reset inv comms for {}", &self.neighbor_address); + debug!("Reset inv comms for {}", &self.neighbor_address); self.online = true; self.start_sync_time = now; self.cur_reward_cycle = start_rc; @@ -473,7 +489,11 @@ impl NakamotoTenureInv { StacksMessageType::Nack(nack_data) => { info!("{:?}: remote peer NACKed our GetNakamotoInv", network.get_local_peer(); "error_code" => nack_data.error_code); - self.set_online(false); + + if nack_data.error_code != NackErrorCodes::NoSuchBurnchainBlock { + // any other error besides this one is a problem + self.set_online(false); + } return Ok(false); } _ => { @@ -557,7 +577,7 @@ impl NakamotoInvStateMachine { let reorg = PeerNetwork::is_reorg(self.last_sort_tip.as_ref(), tip, sortdb); if reorg { // drop the last two reward cycles - test_debug!("Detected reorg! Refreshing inventory consensus hashes"); + debug!("Detected reorg! Refreshing inventory consensus hashes"); let highest_rc = self .reward_cycle_consensus_hashes .last_key_value() @@ -585,10 +605,9 @@ impl NakamotoInvStateMachine { ) .expect("FATAL: snapshot occurred before system start"); - test_debug!( + debug!( "Load all reward cycle consensus hashes from {} to {}", - highest_rc, - tip_rc + highest_rc, tip_rc ); for rc in highest_rc..=tip_rc { if self.reward_cycle_consensus_hashes.contains_key(&rc) { @@ -599,7 +618,7 @@ impl NakamotoInvStateMachine { warn!("Failed to load consensus hash for reward cycle {}", rc); return Err(DBError::NotFoundError.into()); }; - test_debug!("Inv reward cycle consensus hash for {} is {}", rc, &ch); + debug!("Inv reward cycle consensus hash for {} is {}", rc, &ch); self.reward_cycle_consensus_hashes.insert(rc, ch); } Ok(tip_rc) @@ -628,6 +647,7 @@ impl NakamotoInvStateMachine { // make sure we know all consensus hashes for all reward cycles. let current_reward_cycle = self.update_reward_cycle_consensus_hashes(&network.burnchain_tip, sortdb)?; + let nakamoto_start_height = network .get_epoch_by_epoch_id(StacksEpochId::Epoch30) .start_height; @@ -639,6 +659,12 @@ impl NakamotoInvStateMachine { // we're updating inventories, so preserve the state we have let mut new_inventories = HashMap::new(); let event_ids: Vec = network.iter_peer_event_ids().map(|e_id| *e_id).collect(); + + debug!( + "Send GetNakamotoInv to up to {} peers (ibd={})", + event_ids.len(), + ibd + ); for event_id in event_ids.into_iter() { let Some(convo) = network.get_p2p_convo(event_id) else { continue; @@ -677,12 +703,15 @@ impl NakamotoInvStateMachine { ) }); - let proceed = inv.getnakamotoinv_begin(network, current_reward_cycle); + // try to get all of the reward cycles we know about, plus the next one. We try to get + // the next one as well in case we're at a reward cycle boundary, but we're not at the + // chain tip -- the block downloader still needs that next inventory to proceed. + let proceed = inv.getnakamotoinv_begin(network, current_reward_cycle.saturating_add(1)); let inv_rc = inv.reward_cycle(); new_inventories.insert(naddr.clone(), inv); if self.comms.has_inflight(&naddr) { - test_debug!( + debug!( "{:?}: still waiting for reply from {}", network.get_local_peer(), &naddr @@ -732,7 +761,7 @@ impl NakamotoInvStateMachine { let num_msgs = replies.len(); for (naddr, reply) in replies.into_iter() { - test_debug!( + debug!( "{:?}: got reply from {}: {:?}", network.get_local_peer(), &naddr, @@ -833,7 +862,7 @@ impl PeerNetwork { /// Return whether or not we learned something pub fn do_network_inv_sync_nakamoto(&mut self, sortdb: &SortitionDB, ibd: bool) -> bool { if cfg!(test) && self.connection_opts.disable_inv_sync { - test_debug!("{:?}: inv sync is disabled", &self.local_peer); + debug!("{:?}: inv sync is disabled", &self.local_peer); return false; } From 4df8d6d869c5a2a70bc9394a75ad01ea3a85d807 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:47:17 -0400 Subject: [PATCH 0627/1400] chore: more debug output --- .../nakamoto/tenure_downloader_unconfirmed.rs | 32 +++++++++---------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index d51e99d5a1..c96f718d2b 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -194,8 +194,8 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); } - test_debug!("Got tenure info {:?}", remote_tenure_tip); - test_debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); // authenticate consensus hashes against canonical chain history let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( @@ -381,10 +381,9 @@ impl NakamotoUnconfirmedTenureDownloader { ); } - test_debug!( + debug!( "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, - tenure_rc + parent_tenure_rc, tenure_rc ); self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); @@ -547,7 +546,7 @@ impl NakamotoUnconfirmedTenureDownloader { break; } - test_debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); // NOTE: this field can get updated by the downloader while this state-machine is in // this state. @@ -597,7 +596,7 @@ impl NakamotoUnconfirmedTenureDownloader { let highest_processed_block_height = *self.highest_processed_block_height.as_ref().unwrap_or(&0); - test_debug!("Finished receiving unconfirmed tenure"); + debug!("Finished receiving unconfirmed tenure"); return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { blocks .into_iter() @@ -621,7 +620,7 @@ impl NakamotoUnconfirmedTenureDownloader { }; let next_block_id = earliest_block.header.parent_block_id.clone(); - test_debug!( + debug!( "Will resume fetching unconfirmed tenure blocks starting at {}", &next_block_id ); @@ -729,10 +728,9 @@ impl NakamotoUnconfirmedTenureDownloader { return Err(NetError::InvalidState); }; - test_debug!( + debug!( "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, - &self.naddr, + &tenure_tip.parent_consensus_hash, &self.naddr, ); let ntd = NakamotoTenureDownloader::new( tenure_tip.parent_consensus_hash.clone(), @@ -790,7 +788,7 @@ impl NakamotoUnconfirmedTenureDownloader { neighbor_rpc: &mut NeighborRPC, ) -> Result<(), NetError> { if neighbor_rpc.has_inflight(&self.naddr) { - test_debug!("Peer {} has an inflight request", &self.naddr); + debug!("Peer {} has an inflight request", &self.naddr); return Ok(()); } if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { @@ -831,9 +829,9 @@ impl NakamotoUnconfirmedTenureDownloader { ) -> Result>, NetError> { match &self.state { NakamotoUnconfirmedDownloadState::GetTenureInfo => { - test_debug!("Got tenure-info response"); + debug!("Got tenure-info response"); let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - test_debug!("Got tenure-info response: {:?}", &remote_tenure_info); + debug!("Got tenure-info response: {:?}", &remote_tenure_info); self.try_accept_tenure_info( sortdb, local_sort_tip, @@ -844,16 +842,16 @@ impl NakamotoUnconfirmedTenureDownloader { Ok(None) } NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - test_debug!("Got tenure start-block response"); + debug!("Got tenure start-block response"); let block = response.decode_nakamoto_block()?; self.try_accept_unconfirmed_tenure_start_block(block)?; Ok(None) } NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - test_debug!("Got unconfirmed tenure blocks response"); + debug!("Got unconfirmed tenure blocks response"); let blocks = response.decode_nakamoto_tenure()?; let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - test_debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); Ok(accepted_opt) } NakamotoUnconfirmedDownloadState::Done => { From 1bbdacf73a09436ddb633c0d68e9edcca6310f71 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:47:45 -0400 Subject: [PATCH 0628/1400] chore: more debug output --- stackslib/src/net/neighbors/comms.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 31c62a1f8f..8fdf38d87b 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -466,12 +466,19 @@ impl PeerNetworkComms { Ok(None) => { if let Some(rh) = req_opt { // keep trying + debug!("{:?}: keep polling {}", network.get_local_peer(), naddr); inflight.insert(naddr, rh); } continue; } Err(_e) => { // peer was already marked as dead in the given network set + debug!( + "{:?}: peer {} is dead: {:?}", + network.get_local_peer(), + naddr, + &_e + ); continue; } }; From d0d90360e6082fd53e2103a85dcef15ed8b7071c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:47:57 -0400 Subject: [PATCH 0629/1400] chore: more debug output --- stackslib/src/net/neighbors/rpc.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/stackslib/src/net/neighbors/rpc.rs b/stackslib/src/net/neighbors/rpc.rs index c75074222d..9b0d2a1bdd 100644 --- a/stackslib/src/net/neighbors/rpc.rs +++ b/stackslib/src/net/neighbors/rpc.rs @@ -109,16 +109,22 @@ impl NeighborRPC { Ok(Some(response)) => response, Ok(None) => { // keep trying + debug!("Still waiting for next reply from {}", &naddr); inflight.insert(naddr, (event_id, request_opt)); continue; } Err(NetError::WaitingForDNS) => { // keep trying + debug!( + "Could not yet poll next reply from {}: waiting for DNS", + &naddr + ); inflight.insert(naddr, (event_id, request_opt)); continue; } Err(_e) => { // declare this neighbor as dead by default + debug!("Failed to poll next reply from {}: {:?}", &naddr, &_e); dead.push(naddr); continue; } @@ -201,6 +207,10 @@ impl NeighborRPC { }) })?; + debug!( + "Send request to {} on event {}: {:?}", + &naddr, event_id, &request + ); self.state.insert(naddr, (event_id, Some(request))); Ok(()) } From 2ec2bc0d3fb4d1c928cde698a696a65542cf0f67 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:48:16 -0400 Subject: [PATCH 0630/1400] chore: more debug output --- stackslib/src/net/p2p.rs | 127 ++++++++++++++++++++------------------- 1 file changed, 65 insertions(+), 62 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 861a6e6cfa..4a52945521 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -659,11 +659,9 @@ impl PeerNetwork { let (p2p_handle, bound_p2p_addr) = net.bind(my_addr)?; let (http_handle, bound_http_addr) = net.bind(http_addr)?; - test_debug!( + debug!( "{:?}: bound on p2p {:?}, http {:?}", - &self.local_peer, - bound_p2p_addr, - bound_http_addr + &self.local_peer, bound_p2p_addr, bound_http_addr ); self.network = Some(net); @@ -913,6 +911,12 @@ impl PeerNetwork { return Err(e); } Ok(sz) => { + if sz > 0 { + debug!( + "Sent {} bytes on p2p socket {:?} for conversation {:?}", + sz, client_sock, convo + ); + } total_sent += sz; if sz == 0 { break; @@ -1202,7 +1206,7 @@ impl PeerNetwork { let next_event_id = match self.network { None => { - test_debug!("{:?}: network not connected", &self.local_peer); + debug!("{:?}: network not connected", &self.local_peer); return Err(net_error::NotConnected); } Some(ref mut network) => { @@ -1510,7 +1514,7 @@ impl PeerNetwork { (convo.to_neighbor_key(), Some(neighbor)) } None => { - test_debug!( + debug!( "No such neighbor in peer DB, but will ban nevertheless: {:?}", convo.to_neighbor_key() ); @@ -1674,11 +1678,9 @@ impl PeerNetwork { // already connected? if let Some(event_id) = self.get_event_id(&neighbor_key) { - test_debug!( + debug!( "{:?}: already connected to {:?} on event {}", - &self.local_peer, - &neighbor_key, - event_id + &self.local_peer, &neighbor_key, event_id ); return Err(net_error::AlreadyConnected(event_id, neighbor_key.clone())); } @@ -1956,7 +1958,7 @@ impl PeerNetwork { match self.events.get(&peer_key) { None => { // not connected - test_debug!("Could not sign for peer {:?}: not connected", peer_key); + debug!("Could not sign for peer {:?}: not connected", peer_key); Err(net_error::PeerNotConnected) } Some(event_id) => self.sign_for_p2p(*event_id, message_payload), @@ -1976,7 +1978,7 @@ impl PeerNetwork { message_payload, ); } - test_debug!("Could not sign for peer {}: not connected", event_id); + debug!("Could not sign for peer {}: not connected", event_id); Err(net_error::PeerNotConnected) } @@ -1997,7 +1999,7 @@ impl PeerNetwork { message_payload, ); } - test_debug!("Could not sign for peer {}: not connected", event_id); + debug!("Could not sign for peer {}: not connected", event_id); Err(net_error::PeerNotConnected) } @@ -2071,7 +2073,7 @@ impl PeerNetwork { match (self.peers.remove(&event_id), self.sockets.remove(&event_id)) { (Some(convo), Some(sock)) => (convo, sock), (Some(convo), None) => { - test_debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); + debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); self.peers.insert(event_id, convo); return Err(net_error::PeerNotConnected); } @@ -2084,7 +2086,7 @@ impl PeerNetwork { return Err(net_error::PeerNotConnected); } (None, None) => { - test_debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); + debug!("{:?}: Rogue socket event {}", &self.local_peer, event_id); return Err(net_error::PeerNotConnected); } }; @@ -2213,7 +2215,7 @@ impl PeerNetwork { ) { Ok((convo_unhandled, alive)) => (convo_unhandled, alive), Err(_e) => { - test_debug!( + debug!( "{:?}: Connection to {:?} failed: {:?}", &self.local_peer, self.get_p2p_convo(*event_id), @@ -2225,7 +2227,7 @@ impl PeerNetwork { }; if !alive { - test_debug!( + debug!( "{:?}: Connection to {:?} is no longer alive", &self.local_peer, self.get_p2p_convo(*event_id), @@ -2412,11 +2414,9 @@ impl PeerNetwork { } }; if neighbor.allowed < 0 || (neighbor.allowed as u64) > now { - test_debug!( + debug!( "{:?}: event {} is allowed: {:?}", - &self.local_peer, - event_id, - &nk + &self.local_peer, event_id, &nk ); safe.insert(*event_id); } @@ -2503,17 +2503,19 @@ impl PeerNetwork { let mut relay_handles = std::mem::replace(&mut self.relay_handles, HashMap::new()); for (event_id, handle_list) in relay_handles.iter_mut() { if handle_list.len() == 0 { + debug!("No handles for event {}", event_id); drained.push(*event_id); continue; } - test_debug!( + debug!( "Flush {} relay handles to event {}", handle_list.len(), event_id ); while handle_list.len() > 0 { + debug!("Flush {} relay handles", handle_list.len()); let res = self.with_p2p_convo(*event_id, |_network, convo, client_sock| { if let Some(handle) = handle_list.front_mut() { let (num_sent, flushed) = @@ -2525,12 +2527,9 @@ impl PeerNetwork { } }; - test_debug!( + debug!( "Flushed relay handle to {:?} ({:?}): sent={}, flushed={}", - client_sock, - convo, - num_sent, - flushed + client_sock, convo, num_sent, flushed ); return Ok((num_sent, flushed)); } @@ -2541,6 +2540,7 @@ impl PeerNetwork { Ok(Ok(x)) => x, Ok(Err(_)) | Err(_) => { // connection broken; next list + debug!("Relay handle broken to event {}", event_id); broken.push(*event_id); break; } @@ -2548,7 +2548,7 @@ impl PeerNetwork { if !flushed && num_sent == 0 { // blocked on this peer's socket - test_debug!("Relay handle to event {} is blocked", event_id); + debug!("Relay handle to event {} is blocked", event_id); break; } @@ -2582,7 +2582,7 @@ impl PeerNetwork { /// Return true if we finish, and true if we're throttled fn do_network_neighbor_walk(&mut self, ibd: bool) -> bool { if cfg!(test) && self.connection_opts.disable_neighbor_walk { - test_debug!("neighbor walk is disabled"); + debug!("neighbor walk is disabled"); return true; } @@ -2780,7 +2780,7 @@ impl PeerNetwork { fn need_public_ip(&mut self) -> bool { if !self.public_ip_learned { // IP was given, not learned. nothing to do - test_debug!("{:?}: IP address was given to us", &self.local_peer); + debug!("{:?}: IP address was given to us", &self.local_peer); return false; } if self.local_peer.public_ip_address.is_some() @@ -2788,7 +2788,7 @@ impl PeerNetwork { >= get_epoch_time_secs() { // still fresh - test_debug!("{:?}: learned IP address is still fresh", &self.local_peer); + debug!("{:?}: learned IP address is still fresh", &self.local_peer); return false; } let throttle_timeout = if self.local_peer.public_ip_address.is_none() { @@ -2851,7 +2851,7 @@ impl PeerNetwork { match self.do_learn_public_ip() { Ok(b) => { if !b { - test_debug!("{:?}: try do_learn_public_ip again", &self.local_peer); + debug!("{:?}: try do_learn_public_ip again", &self.local_peer); return false; } } @@ -2938,7 +2938,7 @@ impl PeerNetwork { for (_, block, _) in network_result.blocks.iter() { if block_set.contains(&block.block_hash()) { - test_debug!("Duplicate block {}", block.block_hash()); + debug!("Duplicate block {}", block.block_hash()); } block_set.insert(block.block_hash()); } @@ -2946,7 +2946,7 @@ impl PeerNetwork { for (_, mblocks, _) in network_result.confirmed_microblocks.iter() { for mblock in mblocks.iter() { if microblock_set.contains(&mblock.block_hash()) { - test_debug!("Duplicate microblock {}", mblock.block_hash()); + debug!("Duplicate microblock {}", mblock.block_hash()); } microblock_set.insert(mblock.block_hash()); } @@ -3760,7 +3760,7 @@ impl PeerNetwork { } None => { // skip this step -- no DNS client available - test_debug!( + debug!( "{:?}: no DNS client provided; skipping block download", &self.local_peer ); @@ -3866,7 +3866,7 @@ impl PeerNetwork { } None => { // skip this step -- no DNS client available - test_debug!( + debug!( "{:?}: no DNS client provided; skipping block download", &self.local_peer ); @@ -3915,7 +3915,11 @@ impl PeerNetwork { convo.to_neighbor_key(), ), None => { - test_debug!("No such neighbor event={}", event_id); + debug!( + "{:?}: No such neighbor event={}", + self.get_local_peer(), + event_id + ); return None; } }; @@ -3924,10 +3928,9 @@ impl PeerNetwork { let reciprocal_event_id = match self.find_reciprocal_event(event_id) { Some(re) => re, None => { - test_debug!( + debug!( "{:?}: no reciprocal conversation for {:?}", - &self.local_peer, - &neighbor_key + &self.local_peer, &neighbor_key ); return None; } @@ -3941,32 +3944,26 @@ impl PeerNetwork { convo.to_neighbor_key(), ), None => { - test_debug!( + debug!( "{:?}: No reciprocal conversation for {} (event={})", - &self.local_peer, - &neighbor_key, - event_id + &self.local_peer, &neighbor_key, event_id ); return None; } }; if !is_authenticated && !reciprocal_is_authenticated { - test_debug!( + debug!( "{:?}: {:?} and {:?} are not authenticated", - &self.local_peer, - &neighbor_key, - &reciprocal_neighbor_key + &self.local_peer, &neighbor_key, &reciprocal_neighbor_key ); return None; } if !is_outbound && !reciprocal_is_outbound { - test_debug!( + debug!( "{:?}: {:?} and {:?} are not outbound", - &self.local_peer, - &neighbor_key, - &reciprocal_neighbor_key + &self.local_peer, &neighbor_key, &reciprocal_neighbor_key ); return None; } @@ -3994,7 +3991,7 @@ impl PeerNetwork { /// for. Add them to our network pingbacks fn schedule_network_pingbacks(&mut self, event_ids: Vec) { if cfg!(test) && self.connection_opts.disable_pingbacks { - test_debug!("{:?}: pingbacks are disabled for testing", &self.local_peer); + debug!("{:?}: pingbacks are disabled for testing", &self.local_peer); return; } @@ -4076,7 +4073,7 @@ impl PeerNetwork { } } - test_debug!( + debug!( "{:?}: have {} pingbacks scheduled", &self.local_peer, self.walk_pingbacks.len() @@ -4247,7 +4244,7 @@ impl PeerNetwork { .as_stacks_nakamoto() .is_some(), }; - test_debug!( + debug!( "{:?}: Parent Stacks tip off of {} is {:?}", self.get_local_peer(), &stacks_tip_block_id, @@ -4261,7 +4258,7 @@ impl PeerNetwork { if self.current_reward_sets.len() > 3 { self.current_reward_sets.retain(|old_rc, _| { if (*old_rc).saturating_add(2) < rc { - test_debug!("Drop reward cycle info for cycle {}", old_rc); + debug!("Drop reward cycle info for cycle {}", old_rc); return false; } true @@ -4343,10 +4340,9 @@ impl PeerNetwork { anchor_block_hash: anchor_block_header.anchored_header.block_hash(), }; - test_debug!( + debug!( "Store cached reward set for reward cycle {} anchor block {}", - rc, - &rc_info.anchor_block_hash + rc, &rc_info.anchor_block_hash ); self.current_reward_sets.insert(rc, rc_info); } @@ -4469,6 +4465,13 @@ impl PeerNetwork { }; // update cached burnchain view for /v2/info + debug!( + "{:?}: chain view for burn block {} has stacks tip consensus {}", + &self.local_peer, + new_chain_view.burn_block_height, + &new_chain_view.rc_consensus_hash + ); + self.chain_view = new_chain_view; self.chain_view_stable_consensus_hash = new_chain_view_stable_consensus_hash; } @@ -4538,7 +4541,7 @@ impl PeerNetwork { .get_last_selected_anchor_block_txid()? .unwrap_or(Txid([0x00; 32])); - test_debug!( + debug!( "{:?}: chain view is {:?}", &self.get_local_peer(), &self.chain_view @@ -4588,12 +4591,12 @@ impl PeerNetwork { }; self.parent_stacks_tip = parent_stacks_tip; - test_debug!( + debug!( "{:?}: canonical Stacks tip is now {:?}", self.get_local_peer(), &self.stacks_tip ); - test_debug!( + debug!( "{:?}: parent canonical Stacks tip is now {:?}", self.get_local_peer(), &self.parent_stacks_tip From 12a2f48ed2c1dfb28ece0efad0d87a5d6112d203 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:48:28 -0400 Subject: [PATCH 0631/1400] chore: more debug output --- stackslib/src/net/stackerdb/db.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index d95d3ebbdb..1dab3f4052 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -324,6 +324,8 @@ impl<'a> StackerDBTx<'a> { } } + debug!("Reset slot {} of {}", slot_id, smart_contract); + // new slot, or existing slot with a different signer let qry = "INSERT OR REPLACE INTO chunks (stackerdb_id,signer,slot_id,version,write_time,data,data_hash,signature) VALUES (?1,?2,?3,?4,?5,?6,?7,?8)"; let mut stmt = self.sql_tx.prepare(&qry)?; From 0ec2cb53bccca34f295660e67d93b467d2617de1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:48:47 -0400 Subject: [PATCH 0632/1400] fix: if there are no peers, then immediately reset stackerdb sync --- stackslib/src/net/stackerdb/sync.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 8444ed5e55..85e76ea524 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -639,9 +639,10 @@ impl StackerDBSync { self.replicas = replicas; } debug!( - "{:?}: connect_begin: establish StackerDB sessions to {} neighbors", + "{:?}: connect_begin: establish StackerDB sessions to {} neighbors (out of {} p2p peers)", network.get_local_peer(), - self.replicas.len() + self.replicas.len(), + network.get_num_p2p_convos() ); if self.replicas.len() == 0 { // nothing to do @@ -1227,8 +1228,11 @@ impl StackerDBSync { let done = self.connect_begin(network)?; if done { self.state = StackerDBSyncState::ConnectFinish; - blocked = false; + } else { + // no replicas; try again + self.state = StackerDBSyncState::Finished; } + blocked = false; } StackerDBSyncState::ConnectFinish => { let done = self.connect_try_finish(network)?; @@ -1276,6 +1280,11 @@ impl StackerDBSync { { // someone pushed newer chunk data to us, and getting chunks is // enabled, so immediately go request them + debug!( + "{:?}: immediately retry StackerDB GetChunks on {} due to PushChunk NACK", + network.get_local_peer(), + &self.smart_contract_id + ); self.recalculate_chunk_request_schedule(network)?; self.state = StackerDBSyncState::GetChunks; } else { From 58d880e58fa01da4ab4488783ee975b9ede3810e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:49:07 -0400 Subject: [PATCH 0633/1400] chore: API sync --- stackslib/src/net/tests/download/nakamoto.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 9de9fb087b..5937f43384 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1480,7 +1480,6 @@ fn test_make_tenure_downloaders() { NakamotoDownloadStateMachine::load_tenure_start_blocks( &wanted_tenures, chainstate, - &nakamoto_tip, &mut tenure_start_blocks, ) .unwrap(); From 491877a221d1900f5aa1fec082a9a57085a97188 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:49:22 -0400 Subject: [PATCH 0634/1400] fix: poll every second --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index bdf3bd4c3d..d03bd422b2 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1334,7 +1334,7 @@ impl Config { /// the poll time is dependent on the first attempt time. pub fn get_poll_time(&self) -> u64 { let poll_timeout = if self.node.miner { - cmp::min(5000, self.miner.first_attempt_time_ms / 2) + cmp::min(1000, self.miner.first_attempt_time_ms / 2) } else { 5000 }; From 98b25fd5965a9800615d87f5602912acfe9bc7af Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:51:02 -0400 Subject: [PATCH 0635/1400] feat: store block signatures as we get them from other signers, and provide a menas of loading them back --- stacks-signer/src/signerdb.rs | 240 ++++++++++++++++++++++++++++++++-- 1 file changed, 228 insertions(+), 12 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 74cefbc44b..7266e6eb75 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -17,9 +17,10 @@ use std::path::Path; use std::time::SystemTime; -use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::util_lib::db::{ - query_row, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, + query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, + Error as DBError, }; use clarity::types::chainstate::BurnchainHeaderHash; use clarity::util::get_epoch_time_secs; @@ -29,11 +30,42 @@ use rusqlite::{ }; use serde::{Deserialize, Serialize}; use slog::{slog_debug, slog_error}; +use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error}; use wsts::net::NonceRequest; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +/// A vote across the signer set for a block +pub struct NakamotoBlockVote { + /// Signer signature hash (i.e. block hash) of the Nakamoto block + pub signer_signature_hash: Sha512Trunc256Sum, + /// Whether or not the block was rejected + pub rejected: bool, +} + +impl StacksMessageCodec for NakamotoBlockVote { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signer_signature_hash)?; + if self.rejected { + write_next(fd, &1u8)?; + } + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signer_signature_hash = read_next(fd)?; + let rejected_byte: Option = read_next(fd).ok(); + let rejected = rejected_byte.is_some(); + Ok(Self { + signer_signature_hash, + rejected, + }) + } +} + #[derive(Serialize, Deserialize, Debug, PartialEq, Default)] /// Information specific to Signer V1 pub struct BlockInfoV1 { @@ -153,7 +185,7 @@ pub struct SignerDb { db: Connection, } -static CREATE_BLOCKS_TABLE: &str = " +static CREATE_BLOCKS_TABLE_1: &str = " CREATE TABLE IF NOT EXISTS blocks ( reward_cycle INTEGER NOT NULL, signer_signature_hash TEXT NOT NULL, @@ -165,7 +197,7 @@ CREATE TABLE IF NOT EXISTS blocks ( PRIMARY KEY (reward_cycle, signer_signature_hash) ) STRICT"; -static CREATE_INDEXES: &str = " +static CREATE_INDEXES_1: &str = " CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); CREATE INDEX IF NOT EXISTS blocks_valid ON blocks ((json_extract(block_info, '$.valid'))); @@ -197,19 +229,66 @@ static DROP_SCHEMA_0: &str = " DROP TABLE IF EXISTS blocks; DROP TABLE IF EXISTS db_config;"; +static DROP_SCHEMA_1: &str = " + DROP TABLE IF EXISTS burn_blocks; + DROP TABLE IF EXISTS signer_states; + DROP TABLE IF EXISTS blocks; + DROP TABLE IF EXISTS db_config;"; + +static CREATE_BLOCKS_TABLE_2: &str = " +CREATE TABLE IF NOT EXISTS blocks ( + reward_cycle INTEGER NOT NULL, + signer_signature_hash TEXT NOT NULL, + block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + broadcasted INTEGER NOT NULL, + stacks_height INTEGER NOT NULL, + burn_block_height INTEGER NOT NULL, + PRIMARY KEY (reward_cycle, signer_signature_hash) +) STRICT"; + +static CREATE_BLOCK_SIGNATURES_TABLE: &str = r#" +CREATE TABLE IF NOT EXISTS block_signatures ( + -- The block sighash commits to all of the stacks and burnchain state as of its parent, + -- as well as the tenure itself so there's no need to include the reward cycle. Just + -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, + -- and stacks forks. + signer_signature_hash TEXT NOT NULL, + -- signtaure itself + signature TEXT NOT NULL, + PRIMARY KEY (signature) +) STRICT;"#; + +static CREATE_INDEXES_2: &str = r#" +CREATE INDEX IF NOT EXISTS block_reward_cycle_and_signature ON block_signatures(signer_signature_hash); +"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, CREATE_BURN_STATE_TABLE, - CREATE_BLOCKS_TABLE, + CREATE_BLOCKS_TABLE_1, CREATE_SIGNER_STATE_TABLE, - CREATE_INDEXES, + CREATE_INDEXES_1, "INSERT INTO db_config (version) VALUES (1);", ]; +static SCHEMA_2: &[&str] = &[ + DROP_SCHEMA_1, + CREATE_DB_CONFIG, + CREATE_BURN_STATE_TABLE, + CREATE_BLOCKS_TABLE_2, + CREATE_SIGNER_STATE_TABLE, + CREATE_BLOCK_SIGNATURES_TABLE, + CREATE_INDEXES_1, + CREATE_INDEXES_2, + "INSERT INTO db_config (version) VALUES (2);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 1; + pub const SCHEMA_VERSION: u32 = 2; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -253,6 +332,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 1 to schema 2 + fn schema_2_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 2 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_2.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). @@ -262,7 +355,8 @@ impl SignerDb { let version = Self::get_schema_version(&sql_tx)?; match version { 0 => Self::schema_1_migration(&sql_tx)?, - 1 => break, + 1 => Self::schema_2_migration(&sql_tx)?, + 2 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -392,6 +486,7 @@ impl SignerDb { let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); let signed_over = &block_info.signed_over; + let broadcasted = false; let vote = block_info .vote .as_ref() @@ -403,14 +498,16 @@ impl SignerDb { "sighash" => %hash, "block_id" => %block_id, "signed" => %signed_over, + "broadcasted" => %broadcasted, "vote" => vote ); self.db .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", + "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", params![ u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, signed_over, + &broadcasted, u64_to_sql(block_info.block.header.chain_length)?, block_info.block.header.consensus_hash.to_hex(), ], @@ -427,6 +524,70 @@ impl SignerDb { Ok(result.is_some()) } + + /// Record an observed block signature + pub fn add_block_signature( + &self, + block_sighash: &Sha512Trunc256Sum, + signature: &MessageSignature, + ) -> Result<(), DBError> { + let qry = "INSERT OR REPLACE INTO block_signatures (signer_signature_hash, signature) VALUES (?1, ?2);"; + let args = params![ + block_sighash, + serde_json::to_string(signature).map_err(|e| DBError::SerializationError(e))? + ]; + + debug!("Inserting block signature."; + "sighash" => %block_sighash, + "signature" => %signature); + + self.db.execute(qry, args)?; + Ok(()) + } + + /// Get all signatures for a block + pub fn get_block_signatures( + &self, + block_sighash: &Sha512Trunc256Sum, + ) -> Result, DBError> { + let qry = "SELECT signature FROM block_signatures WHERE signer_signature_hash = ?1"; + let args = params![block_sighash]; + let sigs_txt: Vec = query_rows(&self.db, qry, args)?; + let mut sigs = vec![]; + for sig_txt in sigs_txt.into_iter() { + let sig = serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError)?; + sigs.push(sig); + } + Ok(sigs) + } + + /// Mark a block as having been broadcasted + pub fn set_block_broadcasted( + &self, + reward_cycle: u64, + block_sighash: &Sha512Trunc256Sum, + ) -> Result<(), DBError> { + let qry = "UPDATE blocks SET broadcasted = 1 WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; + let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + + debug!("Marking block {} as broadcasted", block_sighash); + self.db.execute(qry, args)?; + Ok(()) + } + + /// Is a block broadcasted already + pub fn is_block_broadcasted( + &self, + reward_cycle: u64, + block_sighash: &Sha512Trunc256Sum, + ) -> Result { + let qry = + "SELECT broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; + let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + + let broadcasted: i64 = query_row(&self.db, qry, args)?.unwrap_or(0); + Ok(broadcasted != 0) + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -454,13 +615,12 @@ mod tests { use std::fs; use std::path::PathBuf; - use blockstack_lib::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, - }; + use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::secp256k1::MessageSignature; use libsigner::BlockProposal; use super::*; + use crate::signerdb::NakamotoBlockVote; fn _wipe_db(db_path: &PathBuf) { if fs::metadata(db_path).is_ok() { @@ -703,4 +863,60 @@ mod tests { Some("3.45.0".to_string()) ); } + + #[test] + fn add_and_get_block_signatures() { + let db_path = tmp_db_path(); + let db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let block_id = Sha512Trunc256Sum::from_data("foo".as_bytes()); + let sig1 = MessageSignature([0x11; 65]); + let sig2 = MessageSignature([0x22; 65]); + + assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![]); + + db.add_block_signature(&block_id, &sig1).unwrap(); + assert_eq!( + db.get_block_signatures(&block_id).unwrap(), + vec![sig1.clone()] + ); + + db.add_block_signature(&block_id, &sig2).unwrap(); + assert_eq!( + db.get_block_signatures(&block_id).unwrap(), + vec![sig1.clone(), sig2.clone()] + ); + } + + #[test] + fn test_and_set_block_broadcasted() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let (block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.burn_height = 1; + }); + + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + + assert!(!db + .is_block_broadcasted( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash() + ) + .unwrap()); + db.set_block_broadcasted( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash(), + ) + .unwrap(); + assert!(db + .is_block_broadcasted( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash() + ) + .unwrap()); + } } From 3da95c6ed6d46c760c89a1271ffc017e3ce0bde2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:51:31 -0400 Subject: [PATCH 0636/1400] feat: watch for signer-post events from other signers, and store their signatures. If we get enough, then put them into the target block's header and upload the signed block to the node --- stacks-signer/src/v0/signer.rs | 229 +++++++++++++++++++++++++++++++-- 1 file changed, 220 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 93927b03fd..ba2559d78b 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -12,9 +12,11 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{BTreeMap, HashMap}; use std::fmt::Debug; use std::sync::mpsc::Sender; +use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::PrivateKey; @@ -24,6 +26,8 @@ use libsigner::v0::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMe use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -50,6 +54,8 @@ pub struct Signer { pub signer_addresses: Vec, /// The reward cycle this signer belongs to pub reward_cycle: u64, + /// Reward set signer addresses and their weights + pub signer_weights: HashMap, /// SignerDB for state management pub signer_db: SignerDb, /// Configuration for proposal evaluation @@ -109,13 +115,23 @@ impl SignerTrait for Signer { match event { SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response(block_validate_response) + self.handle_block_validate_response(stacks_client, block_validate_response) } SignerEvent::SignerMessages(_signer_set, messages) => { debug!( - "{self}: Received {} messages from the other signers. Ignoring...", + "{self}: Received {} messages from the other signers", messages.len() ); + // try and gather signatures + for message in messages { + let SignerMessage::BlockResponse(block_response) = message else { + continue; + }; + let BlockResponse::Accepted((block_hash, signature)) = block_response else { + continue; + }; + self.handle_block_signature(stacks_client, block_hash, signature); + } } SignerEvent::MinerMessages(messages, miner_pubkey) => { debug!( @@ -202,16 +218,41 @@ impl From for Signer { let signer_db = SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); + + // compute signer addresses *in reward cycle order* + let signer_ids_and_addrs: BTreeMap<_, _> = signer_config + .signer_entries + .signer_ids + .iter() + .map(|(addr, id)| (*id, addr.clone())) + .collect(); + + let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); + + let signer_weights = signer_addresses + .iter() + .map(|addr| { + let Some(signer_id) = signer_config.signer_entries.signer_ids.get(addr) else { + panic!("Malformed config: no signer ID for {}", addr); + }; + let Some(key_ids) = signer_config.signer_entries.signer_key_ids.get(signer_id) + else { + panic!( + "Malformed config: no key IDs for signer ID {} ({})", + signer_id, addr + ); + }; + (addr.clone(), key_ids.len()) + }) + .collect(); + Self { private_key: signer_config.stacks_private_key, stackerdb, mainnet: signer_config.mainnet, signer_id: signer_config.signer_id, - signer_addresses: signer_config - .signer_entries - .signer_ids - .into_keys() - .collect(), + signer_addresses, + signer_weights, signer_slot_ids: signer_config.signer_slot_ids.clone(), reward_cycle: signer_config.reward_cycle, signer_db, @@ -260,7 +301,7 @@ impl Signer { ); return; } - // TODO: should add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. + // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); if let Some(block_info) = self @@ -387,8 +428,13 @@ impl Signer { } /// Handle the block validate response returned from our prior calls to submit a block for validation - fn handle_block_validate_response(&mut self, block_validate_response: &BlockValidateResponse) { + fn handle_block_validate_response( + &mut self, + stacks_client: &StacksClient, + block_validate_response: &BlockValidateResponse, + ) { debug!("{self}: Received a block validate response: {block_validate_response:?}"); + let mut signature_opt = None; let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); @@ -414,6 +460,8 @@ impl Signer { .private_key .sign(&signer_signature_hash.0) .expect("Failed to sign block"); + + signature_opt = Some(signature.clone()); ( BlockResponse::accepted(signer_signature_hash, signature), block_info, @@ -461,5 +509,168 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + + if let Some(signature) = signature_opt { + // have to save the signature _after_ the block info + self.handle_block_signature( + stacks_client, + &block_info.signer_signature_hash(), + &signature, + ); + } + } + + /// Compute the signing weight and total weight, given a list of signatures + fn compute_signature_weight( + &self, + block_hash: &Sha512Trunc256Sum, + sigs: &[MessageSignature], + ) -> (u32, u32) { + let signing_weight = sigs.iter().fold(0usize, |signing_weight, sig| { + let weight = if let Ok(public_key) = + Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), sig) + { + let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); + let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); + *stacker_weight + } else { + 0 + }; + signing_weight.saturating_add(weight) + }); + + let total_weight = self + .signer_weights + .values() + .fold(0usize, |acc, val| acc.saturating_add(*val)); + ( + u32::try_from(signing_weight) + .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")), + u32::try_from(total_weight) + .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")), + ) + } + + /// Handle an observed signature from another signer + fn handle_block_signature( + &mut self, + stacks_client: &StacksClient, + block_hash: &Sha512Trunc256Sum, + signature: &MessageSignature, + ) { + debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); + + // authenticate the signature -- it must be signed by one of the stacking set + let is_valid_sig = self + .signer_addresses + .iter() + .find(|addr| { + let Ok(public_key) = + Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) + else { + return false; + }; + let stacker_address = StacksAddress::p2pkh(true, &public_key); + + // it only matters that the address hash bytes match + stacker_address.bytes == addr.bytes + }) + .is_some(); + + if !is_valid_sig { + debug!("{self}: Receive invalid signature {signature}. Will not store."); + return; + } + + self.signer_db + .add_block_signature(block_hash, signature) + .unwrap_or_else(|_| panic!("{self}: Failed to save block signature")); + + // do we have enough signatures to broadcast? + let signatures = self + .signer_db + .get_block_signatures(block_hash) + .unwrap_or_else(|_| panic!("{self}: Failed to load block signatures")); + + let (signature_weight, total_weight) = + self.compute_signature_weight(block_hash, &signatures); + let min_weight = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight) + .unwrap_or_else(|_| { + panic!("{self}: Failed to compute threshold weight for {total_weight}") + }); + + if min_weight > signature_weight { + debug!( + "{self}: Not enough signatures on block {} (have {}, need at least {}/{})", + block_hash, signature_weight, min_weight, total_weight + ); + return; + } + + // have enough signatures to broadcast! + // have we broadcasted before? + if self + .signer_db + .is_block_broadcasted(self.reward_cycle, block_hash) + .unwrap_or_else(|_| { + panic!("{self}: failed to determine if block {block_hash} was broadcasted") + }) + { + debug!("{self}: will not re-broadcast block {}", block_hash); + return; + } + + let Ok(Some(block_info)) = self + .signer_db + .block_lookup(self.reward_cycle, block_hash) + .map_err(|e| { + warn!("{self}: Failed to load block {block_hash}: {e:?})"); + e + }) + else { + warn!("{self}: No such block {block_hash}"); + return; + }; + + // put signatures in order by signer address (i.e. reward cycle order) + let addrs_to_sigs: HashMap<_, _> = signatures + .into_iter() + .filter_map(|sig| { + let Ok(public_key) = Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), &sig) + else { + return None; + }; + let addr = StacksAddress::p2pkh(self.mainnet, &public_key); + Some((addr, sig)) + }) + .collect(); + + let signatures: Vec<_> = self + .signer_addresses + .iter() + .filter_map(|addr| addrs_to_sigs.get(addr).cloned()) + .collect(); + + let mut block = block_info.block; + block.header.signer_signature = signatures; + + let broadcasted = stacks_client + .post_block(&block) + .map_err(|e| { + warn!( + "{self}: Failed to post block {block_hash} (id {}): {e:?}", + &block.block_id() + ); + e + }) + .is_ok(); + + if broadcasted { + self.signer_db + .set_block_broadcasted(self.reward_cycle, block_hash) + .unwrap_or_else(|_| { + panic!("{self}: failed to determine if block {block_hash} was broadcasted") + }); + } } } From 7b6fce043dad0104f2884771d68a2153bb0d25ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:52:08 -0400 Subject: [PATCH 0637/1400] chore: clean up imports --- stacks-signer/src/v1/signer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index f78f3b9e29..c23dbdda90 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -21,7 +21,7 @@ use std::time::Instant; use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; +use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::SIGNERS_VOTING_FUNCTION_NAME; use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; @@ -57,7 +57,7 @@ use crate::chainstate::SortitionsView; use crate::client::{ClientError, SignerSlotID, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerCommand, SignerResult}; -use crate::signerdb::{BlockInfo, SignerDb}; +use crate::signerdb::{BlockInfo, NakamotoBlockVote, SignerDb}; use crate::v1::coordinator::CoordinatorSelector; use crate::Signer as SignerTrait; From d35992a60bf4f34622c4988a71452440d0902e28 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:52:17 -0400 Subject: [PATCH 0638/1400] chore: NakamotoBlockVote belongs in the signer --- stackslib/src/chainstate/nakamoto/mod.rs | 35 ++++++------------------ 1 file changed, 8 insertions(+), 27 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 09794c4775..cc999a2158 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -377,6 +377,10 @@ pub trait StacksDBIndexed { .is_none() { // tenure not started + debug!( + "No tenure-start block for {} off of {}", + tenure_id_consensus_hash, tip + ); return Ok(None); } if self @@ -387,6 +391,10 @@ pub trait StacksDBIndexed { .is_none() { // tenure has started, but is not done yet + debug!( + "Tenure {} not finished off of {}", + tenure_id_consensus_hash, tip + ); return Ok(Some(false)); } @@ -609,33 +617,6 @@ impl FromRow for NakamotoBlockHeader { } } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -/// A vote across the signer set for a block -pub struct NakamotoBlockVote { - pub signer_signature_hash: Sha512Trunc256Sum, - pub rejected: bool, -} - -impl StacksMessageCodec for NakamotoBlockVote { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.signer_signature_hash)?; - if self.rejected { - write_next(fd, &1u8)?; - } - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let signer_signature_hash = read_next(fd)?; - let rejected_byte: Option = read_next(fd).ok(); - let rejected = rejected_byte.is_some(); - Ok(Self { - signer_signature_hash, - rejected, - }) - } -} - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NakamotoBlock { pub header: NakamotoBlockHeader, From 322e04cddaf6bf56d800aef5b9b67d95cb085580 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:52:33 -0400 Subject: [PATCH 0639/1400] chore: pass chainstate to v0 signer loop so we can poll for signatures from a processed block posted by the signers --- testnet/stacks-node/src/nakamoto_node/miner.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 527117fb4d..a1c1fedd95 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -439,14 +439,22 @@ impl BlockMinerThread { }, )?; + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + *attempts += 1; - let signature = coordinator.begin_sign_v0( + let signature = coordinator.run_sign_v0( new_block, burn_block_height, *attempts, &tip, &self.burnchain, &sort_db, + &mut chain_state, &stackerdbs, &self.globals.counters, &self.burn_election_block.consensus_hash, From 812deb736fdcd732bd99f47936e2ed9b8a53c530 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 13:53:12 -0400 Subject: [PATCH 0640/1400] feat: look in the chainstate for a staging block posted by signers with the signatures we're waiting for, so we can unblock the miner if the block arrives from any signer before their signatures --- .../src/nakamoto_node/sign_coordinator.rs | 66 +++++++++++++++++-- 1 file changed, 60 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b6e42b87ee..a5e0fc0e8a 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -26,6 +26,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; +use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; use stacks::libstackerdb::StackerDBChunkData; @@ -53,7 +54,7 @@ use crate::Config; /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? -static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(50); +static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); /// The `SignCoordinator` struct represents a WSTS FIRE coordinator whose /// sole function is to serve as the coordinator for Nakamoto block signing. @@ -202,7 +203,6 @@ impl SignCoordinator { reward_set: &RewardSet, message_key: Scalar, config: &Config, - // v1: bool, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { @@ -630,8 +630,13 @@ impl SignCoordinator { /// Start gathering signatures for a Nakamoto block. /// This function begins by sending a `BlockProposal` message /// to the signers, and then waits for the signers to respond - /// with their signatures. - pub fn begin_sign_v0( + /// with their signatures. It does so in two ways, concurrently: + /// * It waits for signer StackerDB messages with signatures. If enough signatures can be + /// found, then the block can be broadcast. + /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. + pub fn run_sign_v0( &mut self, block: &NakamotoBlock, burn_block_height: u64, @@ -639,6 +644,7 @@ impl SignCoordinator { burn_tip: &BlockSnapshot, burnchain: &Burnchain, sortdb: &SortitionDB, + chain_state: &mut StacksChainState, stackerdbs: &StackerDBs, counters: &Counters, election_sortition: &ConsensusHash, @@ -699,12 +705,16 @@ impl SignCoordinator { let mut total_weight_signed: u32 = 0; let mut gathered_signatures = BTreeMap::new(); - info!("SignCoordinator: beginning to watch for block signatures."; + info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; "threshold" => self.weight_threshold, ); let start_ts = Instant::now(); while start_ts.elapsed() <= self.signing_round_timeout { + // one of two things can happen: + // * we get enough signatures from stackerdb from the signers, OR + // * we see our block get processed in our chainstate (meaning, the signers broadcasted + // the block and our node got it and processed it) let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { Ok(event) => event, Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { @@ -716,9 +726,52 @@ impl SignCoordinator { )) } }; + // look in the nakamoto staging db -- a block can only get stored there if it has + // enough signing weight to clear the threshold + if let Ok(Some((block, _sz))) = chain_state + .nakamoto_blocks_db() + .get_nakamoto_block(&block.block_id()) + .map_err(|e| { + warn!( + "Failed to query chainstate for block {}: {:?}", + &block.block_id(), + &e + ); + e + }) + { + debug!("SignCoordinator: Found signatures in relayed block"); + return Ok(block.header.signer_signature); + } + // we don't have the block we ostensibly mined, but perhaps the tenure has advanced + // anyway? If so, then give up. + let canonical_stacks_header = + NakamotoChainState::get_canonical_block_header(chain_state.db(), sortdb) + .map_err(|e| { + let msg = format!("Failed to query canonical stacks tip: {:?}", &e); + warn!("{}", &msg); + NakamotoNodeError::SignerSignatureError(msg) + })? + .ok_or_else(|| { + let msg = "No canonical stacks tip".to_string(); + warn!("{}", &msg); + NakamotoNodeError::SignerSignatureError(msg) + })?; + + if canonical_stacks_header.anchored_header.height() > block.header.chain_length { + info!( + "SignCoordinator: our block {} is superseded by block {}", + block.header.block_id(), + canonical_stacks_header.index_block_hash() + ); + break; + } + + // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); + if !is_signer_event { debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); continue; @@ -775,6 +828,7 @@ impl SignCoordinator { "Processed signature but didn't validate over the expected block. Returning error."; "signature" => %signature, "block_signer_signature_hash" => %block_sighash, + "response_hash" => %response_hash, "slot_id" => slot_id, ); continue; @@ -809,7 +863,7 @@ impl SignCoordinator { .checked_add(signer_entry.weight) .expect("FATAL: total weight signed exceeds u32::MAX"); } - debug!("Signature Added to block"; + debug!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, From 522dcf27f35bd0357e2865eac69d5b1e8d3cf70c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 2 Aug 2024 14:06:33 -0400 Subject: [PATCH 0641/1400] feat: timout connections to observers Without a timout for these connections, the node can get stuck here and it is not obvious what is happening. With this timeout, we will see log messages that will point to the problem. --- testnet/stacks-node/src/event_dispatcher.rs | 30 +++++++++++++-------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 5a72e4ca0a..ccff517d7b 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -318,6 +318,7 @@ impl EventObserver { }; let backoff = Duration::from_millis((1.0 * 1_000.0) as u64); + let connection_timeout = Duration::from_secs(5); loop { let body = body.clone(); @@ -326,19 +327,26 @@ impl EventObserver { req.set_body(body); let response = async_std::task::block_on(async { - let stream = match TcpStream::connect(self.endpoint.clone()).await { - Ok(stream) => stream, - Err(err) => { + match async_std::future::timeout( + connection_timeout, + TcpStream::connect(self.endpoint.clone()), + ) + .await + { + Ok(Ok(stream)) => match client::connect(stream, req).await { + Ok(response) => Some(response), + Err(err) => { + warn!("Event dispatcher: rpc invocation failed - {:?}", err); + None + } + }, + Ok(Err(err)) => { warn!("Event dispatcher: connection failed - {:?}", err); - return None; + None } - }; - - match client::connect(stream, req).await { - Ok(response) => Some(response), - Err(err) => { - warn!("Event dispatcher: rpc invocation failed - {:?}", err); - return None; + Err(_) => { + error!("Event dispatcher: connection attempt timed out"); + None } } }); From 69872cf8640f42d780edb1134f2875284db2080f Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 2 Aug 2024 11:07:52 -0700 Subject: [PATCH 0642/1400] feat: fix block-to-reward-cycle lookup logic off-by-one error --- .../src/chainstate/nakamoto/coordinator/mod.rs | 2 +- testnet/stacks-node/src/tests/signer/mod.rs | 13 +++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 17cfed5cd6..15cc7f0852 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -546,7 +546,7 @@ pub fn load_nakamoto_reward_set( "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - prepare_end_height.saturating_sub(1), + prepare_end_height, chain_state, burnchain, sort_db, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 35db96c845..175022d68c 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -64,7 +64,8 @@ use crate::tests::nakamoto_integrations::{ naka_neon_integration_conf, next_block_and_mine_commit, POX_4_DEFAULT_STACKER_BALANCE, }; use crate::tests::neon_integrations::{ - next_block_and_wait, run_until_burnchain_height, test_observer, wait_for_runloop, + get_chain_info, next_block_and_wait, run_until_burnchain_height, test_observer, + wait_for_runloop, }; use crate::tests::to_addr; use crate::{BitcoinRegtestController, BurnchainController}; @@ -473,15 +474,15 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest u64 { - let block_height = self + let block_height = get_chain_info(&self.running_nodes.conf).burn_block_height; + let rc = self .running_nodes - .btc_regtest_controller - .get_headers_height(); - self.running_nodes .btc_regtest_controller .get_burnchain() .block_height_to_reward_cycle(block_height) - .unwrap() + .unwrap(); + info!("Get current reward cycle: block_height = {block_height}, rc = {rc}"); + rc } fn get_signer_index(&self, reward_cycle: u64) -> SignerSlotID { diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9a6d362e59..f08f0f87ed 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2136,8 +2136,8 @@ fn signer_set_rollover() { let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); assert_eq!(current_signers.len(), new_num_signers as usize); for signer in current_signers.iter() { - assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); - assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); } info!("---- Mining a block to verify new signer set -----"); From caa0c831cd60fc8d6c9380d91d21f9c6de50a7c5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 2 Aug 2024 15:32:31 -0400 Subject: [PATCH 0643/1400] chore: update deprecated VSCode extension --- .vscode/extensions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/extensions.json b/.vscode/extensions.json index be7e11c2a8..00035443cb 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -2,7 +2,7 @@ "recommendations": [ "rust-lang.rust-analyzer", "vadimcn.vscode-lldb", - "serayuzgur.crates", + "fill-labs.dependi", "editorconfig.editorconfig", ] } From 19076c1e5aa3ee7e032c72320ee2c96aaecf11cd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 2 Aug 2024 15:40:29 -0400 Subject: [PATCH 0644/1400] chore: only timout on the `TcpStream::connect` Don't timeout on the `client::connect` since this has the potential for an observer to receive duplicate events. --- testnet/stacks-node/src/event_dispatcher.rs | 38 ++++++++++----------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index ccff517d7b..58f567b4b7 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -6,7 +6,9 @@ use std::thread::sleep; use std::time::Duration; use async_h1::client; +use async_std::future::timeout; use async_std::net::TcpStream; +use async_std::task; use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; @@ -326,26 +328,24 @@ impl EventObserver { req.append_header("Content-Type", "application/json"); req.set_body(body); - let response = async_std::task::block_on(async { - match async_std::future::timeout( - connection_timeout, - TcpStream::connect(self.endpoint.clone()), - ) - .await - { - Ok(Ok(stream)) => match client::connect(stream, req).await { - Ok(response) => Some(response), - Err(err) => { - warn!("Event dispatcher: rpc invocation failed - {:?}", err); - None + let response = task::block_on(async { + let stream = + match timeout(connection_timeout, TcpStream::connect(&self.endpoint)).await { + Ok(Ok(stream)) => stream, + Ok(Err(err)) => { + warn!("Event dispatcher: connection failed - {:?}", err); + return None; } - }, - Ok(Err(err)) => { - warn!("Event dispatcher: connection failed - {:?}", err); - None - } - Err(_) => { - error!("Event dispatcher: connection attempt timed out"); + Err(_) => { + error!("Event dispatcher: connection attempt timed out"); + return None; + } + }; + + match client::connect(stream, req).await { + Ok(response) => Some(response), + Err(err) => { + warn!("Event dispatcher: rpc invocation failed - {:?}", err); None } } From 74efa2ca2012f0f652ac88c662db5b45849ff4b5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 2 Aug 2024 16:15:28 -0400 Subject: [PATCH 0645/1400] Fix off by one error in mock_sign_epoch_25 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 42 ++++++++++++---------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a6cc42078..c2a4d8ce67 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -175,9 +175,8 @@ impl SignerTest { .wrapping_add(reward_cycle_len) .wrapping_add(1); - let next_reward_cycle_boundary = epoch_25_reward_cycle_boundary - .wrapping_add(reward_cycle_len) - .saturating_sub(1); + let next_reward_cycle_boundary = + epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); run_until_burnchain_height( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, @@ -2146,7 +2145,6 @@ fn mock_sign_epoch_25() { info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); // Mine until epoch 3.0 and ensure that no more mock signatures are received - let mut reward_cycle = signer_test.get_current_reward_cycle(); let mut stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, @@ -2163,19 +2161,33 @@ fn mock_sign_epoch_25() { assert_eq!(signer_slot_ids.len(), num_signers); // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition let main_poll_time = Instant::now(); - let mut current_burn_block_height = signer_test + while signer_test .running_nodes .btc_regtest_controller - .get_headers_height(); - while current_burn_block_height + 1 < epoch_3_start_height { - current_burn_block_height = signer_test + .get_headers_height() + < epoch_3_start_height + { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + let current_burn_block_height = signer_test .running_nodes .btc_regtest_controller .get_headers_height(); - let current_reward_cycle = signer_test.get_current_reward_cycle(); - if current_reward_cycle != reward_cycle { - debug!("Rolling over reward cycle to {:?}", current_reward_cycle); - reward_cycle = current_reward_cycle; + if current_burn_block_height + % signer_test + .running_nodes + .conf + .get_burnchain() + .pox_constants + .reward_cycle_length as u64 + == 0 + { + reward_cycle += 1; + debug!("Rolling over reward cycle to {:?}", reward_cycle); stackerdb = StackerDB::new( &signer_test.running_nodes.conf.node.rpc_bind, StacksPrivateKey::new(), // We are just reading so don't care what the key is @@ -2190,12 +2202,6 @@ fn mock_sign_epoch_25() { .collect(); assert_eq!(signer_slot_ids.len(), num_signers); } - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); let mut mock_signatures = vec![]; let mock_poll_time = Instant::now(); debug!("Waiting for mock signatures for burn block height {current_burn_block_height}"); From 7d07a5f80254724b9e06b87de9804e93979dd117 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 16:37:20 -0400 Subject: [PATCH 0646/1400] feat: decode nakamoto block --- stackslib/src/main.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 8660e0e9a7..fa54b34b86 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -55,7 +55,7 @@ use blockstack_lib::chainstate::burn::db::sortdb::{ use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewardSetProvider}; -use blockstack_lib::chainstate::nakamoto::NakamotoChainState; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; use blockstack_lib::chainstate::stacks::db::{ ChainStateBootData, StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo, @@ -243,6 +243,25 @@ fn main() { process::exit(0); } + if argv[1] == "decode-nakamoto-block" { + if argv.len() < 3 { + eprintln!("Usage: {} decode-nakamoto-block BLOCK_HEX", argv[0]); + process::exit(1); + } + + let block_hex = &argv[2]; + let block_data = hex_bytes(block_hex).unwrap_or_else(|_| panic!("Failed to decode hex")); + let block = NakamotoBlock::consensus_deserialize(&mut io::Cursor::new(&block_data)) + .map_err(|_e| { + eprintln!("Failed to decode block"); + process::exit(1); + }) + .unwrap(); + + println!("{:#?}", &block); + process::exit(0); + } + if argv[1] == "decode-net-message" { let data: String = argv[2].clone(); let buf = if data == "-" { From af274c17e93462f599a8bfa8b73b55b287caf975 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 16:37:29 -0400 Subject: [PATCH 0647/1400] fix: fix regressed unit test --- stackslib/src/net/tests/download/nakamoto.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 5937f43384..57bd557186 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1483,6 +1483,10 @@ fn test_make_tenure_downloaders() { &mut tenure_start_blocks, ) .unwrap(); + + // remove malleablized blocks + tenure_start_blocks.retain(|_, block| block.header.version == 0); + assert_eq!(tenure_start_blocks.len(), wanted_tenures.len()); for wt in wanted_tenures_with_blocks { From 5404200c6676bbd8692016e61aac229bab2484d8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 16:37:29 -0400 Subject: [PATCH 0648/1400] fix: fix regressed unit test --- stackslib/src/net/tests/download/nakamoto.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 5937f43384..57bd557186 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1483,6 +1483,10 @@ fn test_make_tenure_downloaders() { &mut tenure_start_blocks, ) .unwrap(); + + // remove malleablized blocks + tenure_start_blocks.retain(|_, block| block.header.version == 0); + assert_eq!(tenure_start_blocks.len(), wanted_tenures.len()); for wt in wanted_tenures_with_blocks { From 11feb3dfcb83501ac20d4230f68914a4f629d8f0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 16:39:06 -0400 Subject: [PATCH 0649/1400] chore: info, not debug --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 4a626e8ab1..919864f09d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -452,7 +452,7 @@ impl Signer { stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, ) { - debug!("{self}: Received a block validate response: {block_validate_response:?}"); + info!("{self}: Received a block validate response: {block_validate_response:?}"); let mut signature_opt = None; let (response, block_info) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { From e36bb1ebc86a10c22f7472caf01f86be0668c4c6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 23:32:16 -0400 Subject: [PATCH 0650/1400] chore: more debug output --- stackslib/src/net/download/nakamoto/tenure.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 21d06d1b2c..a2a3b3eddd 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -174,6 +174,8 @@ impl TenureStartEnd { let mut last_tenure_ch = None; debug!("Find available tenures in inventory {:?} rc {}", invs, rc); for (i, wt) in wanted_tenures.iter().enumerate() { + debug!("consider wanted tenure which starts with i={} {:?}", i, &wt); + // advance to next tenure-start sortition let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); if !invbits.get(bit).unwrap_or(false) { From d86ff5c628649211cd906aa9f03ad8a778395293 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 23:32:34 -0400 Subject: [PATCH 0651/1400] fix: don't rely on processed tenure cache for inv generation -- it's not coherent with the canonical tip --- stackslib/src/net/inv/nakamoto.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 5b09ace396..9fa43e448d 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -125,9 +125,12 @@ impl InvGenerator { tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { + /* + TODO: marf'ed cache? if let Some(info_opt) = self.processed_tenures.get(&tenure_id_consensus_hash) { return Ok((*info_opt).clone()); }; + */ // not cached so go load it let loaded_info_opt = InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; From 72c5e4863a0cde0aa68358565af6f5f38d623326 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 23:32:55 -0400 Subject: [PATCH 0652/1400] chore: fault injection for block push --- testnet/stacks-node/src/config.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index dc0820a39a..0f13943b1c 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1829,6 +1829,8 @@ pub struct NodeConfig { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: bool, pub require_affirmed_anchor_blocks: bool, + /// Fault injection for failing to push blocks + pub fault_injection_block_push_fail_probability: Option, // fault injection for hiding blocks. // not part of the config file. pub fault_injection_hide_blocks: bool, @@ -2115,6 +2117,7 @@ impl Default for NodeConfig { use_test_genesis_chainstate: None, always_use_affirmation_maps: false, require_affirmed_anchor_blocks: true, + fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, chain_liveness_poll_time_secs: 300, stacker_dbs: vec![], @@ -2572,6 +2575,8 @@ pub struct NodeConfigFile { pub chain_liveness_poll_time_secs: Option, /// Stacker DBs we replicate pub stacker_dbs: Option>, + /// fault injection: fail to push blocks with this probability (0-100) + pub fault_injection_block_push_fail_probability: Option, } impl NodeConfigFile { @@ -2650,6 +2655,14 @@ impl NodeConfigFile { .iter() .filter_map(|contract_id| QualifiedContractIdentifier::parse(contract_id).ok()) .collect(), + fault_injection_block_push_fail_probability: if self + .fault_injection_block_push_fail_probability + .is_some() + { + self.fault_injection_block_push_fail_probability + } else { + default_node_config.fault_injection_block_push_fail_probability + }, }; Ok(node_config) } From b9bf11ae859a1d53f81c46ac50e4d662f91851d8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 2 Aug 2024 23:33:06 -0400 Subject: [PATCH 0653/1400] feat: drop blocks from block push with a configurable probability --- .../stacks-node/src/nakamoto_node/miner.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a1c1fedd95..891ee64717 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -25,6 +25,7 @@ use hashbrown::HashSet; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::StackerDBSession; +use rand::{thread_rng, Rng}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; @@ -619,8 +620,27 @@ impl BlockMinerThread { return Ok(()); } - // forward to p2p thread + // forward to p2p thread, but do fault injection let block_id = block.block_id(); + let drop_prob = self + .config + .node + .fault_injection_block_push_fail_probability + .unwrap_or(0) + .max(100); + let will_drop = if drop_prob > 0 { + let throw: u8 = thread_rng().gen_range(0..100); + throw < drop_prob + } else { + false + }; + + if will_drop { + info!("Fault injection: drop block {}", &block_id); + return Ok(()); + } + + debug!("Broadcasting block {}", &block_id); if let Err(e) = self.p2p_handle.broadcast_message( vec![], StacksMessageType::NakamotoBlocks(NakamotoBlocksData { From ae946dd344ff90d8d4f84cd2de8474367f60eb09 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 4 Aug 2024 11:26:20 -0400 Subject: [PATCH 0654/1400] test: finish deadlock test --- .../chainstate/nakamoto/coordinator/tests.rs | 45 +++++++++++-------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 91a8d9f965..000ccc59ce 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -2495,33 +2495,40 @@ fn process_next_nakamoto_block_deadlock() { info!("Creating peer"); let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + let (chainstate, _) = &mut peer + .stacks_node + .as_mut() + .unwrap() + .chainstate + .reopen() + .unwrap(); // Lock the sortdb info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); let mut sortition_db = peer.sortdb().reopen().unwrap(); let sort_tx = sortition_db.tx_begin().unwrap(); + info!(" ------------------------------- SORTDB LOCKED"); - // Start another thread that opens the sortdb, waits 10s, then tries to - // lock the chainstate db. This should cause a deadlock if the block - // processing is not acquiring the locks in the correct order. - info!(" ------------------------------- SPAWNING BLOCKER THREAD"); - let blocker_thread = std::thread::spawn(move || { - // Wait a bit, to ensure the tenure will have grabbed any locks it needs - std::thread::sleep(std::time::Duration::from_secs(10)); + let miner_thread = std::thread::spawn(move || { + info!(" ------------------------------- MINING TENURE"); + let (block, burn_height, ..) = + peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); + peer.try_process_block(&block).unwrap(); + info!(" ------------------------------- TENURE MINED"); + }); - // Lock the chainstate db - info!(" ------------------------------- TRYING TO LOCK THE CHAINSTATE"); - let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; - let (chainstate_tx, _) = chainstate.chainstate_tx_begin().unwrap(); + // Wait a bit, to ensure the tenure will have grabbed any locks it needs + std::thread::sleep(std::time::Duration::from_secs(10)); - info!(" ------------------------------- SORTDB AND CHAINSTATE LOCKED"); - info!(" ------------------------------- BLOCKER THREAD FINISHED"); - }); + // Lock the chainstate db + info!(" ------------------------------- TRYING TO LOCK THE CHAINSTATE"); + let chainstate_tx = chainstate.chainstate_tx_begin().unwrap(); - info!(" ------------------------------- MINING TENURE"); - let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); - info!(" ------------------------------- TENURE MINED"); + info!(" ------------------------------- SORTDB AND CHAINSTATE LOCKED"); + drop(chainstate_tx); + drop(sort_tx); + info!(" ------------------------------- MAIN THREAD FINISHED"); - // Wait for the blocker thread to finish - blocker_thread.join().unwrap(); + // Wait for the blocker and miner threads to finish + miner_thread.join().unwrap(); } From b557827cec222552a4a044305642160189f36737 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 4 Aug 2024 12:09:11 -0400 Subject: [PATCH 0655/1400] test: insert stall to ensure proper timing of test With this change in place, `process_next_nakamoto_block_deadlock` will timeout without the fix, and run successfully with the fix in place. --- .../chainstate/nakamoto/coordinator/tests.rs | 20 +++++++++++++------ stackslib/src/chainstate/nakamoto/mod.rs | 15 ++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 000ccc59ce..aecf8c62a2 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -48,6 +48,7 @@ use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ NakamotoBlock, NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, + TEST_PROCESS_BLOCK_STALL, }; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::pox_4_tests::{get_stacking_minimum, get_tip}; @@ -2495,6 +2496,7 @@ fn process_next_nakamoto_block_deadlock() { info!("Creating peer"); let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + let mut sortition_db = peer.sortdb().reopen().unwrap(); let (chainstate, _) = &mut peer .stacks_node .as_mut() @@ -2503,20 +2505,26 @@ fn process_next_nakamoto_block_deadlock() { .reopen() .unwrap(); - // Lock the sortdb - info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); - let mut sortition_db = peer.sortdb().reopen().unwrap(); - let sort_tx = sortition_db.tx_begin().unwrap(); - info!(" ------------------------------- SORTDB LOCKED"); + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); let miner_thread = std::thread::spawn(move || { info!(" ------------------------------- MINING TENURE"); let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); - peer.try_process_block(&block).unwrap(); info!(" ------------------------------- TENURE MINED"); }); + // Wait a bit, to ensure the miner has reached the stall + std::thread::sleep(std::time::Duration::from_secs(10)); + + // Lock the sortdb + info!(" ------------------------------- TRYING TO LOCK THE SORTDB"); + let sort_tx = sortition_db.tx_begin().unwrap(); + info!(" ------------------------------- SORTDB LOCKED"); + + // Un-stall the block processing + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + // Wait a bit, to ensure the tenure will have grabbed any locks it needs std::thread::sleep(std::time::Duration::from_secs(10)); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 1fab3ba9b1..0674c49b52 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -270,6 +270,10 @@ lazy_static! { ]; } +// Cause an artifical stall in block-processing, for testing. +#[cfg(any(test, feature = "testing"))] +pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); + /// Trait for common MARF getters between StacksDBConn and StacksDBTx pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; @@ -1722,6 +1726,17 @@ impl NakamotoChainState { canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { + #[cfg(any(test, feature = "testing"))] + { + if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block processing is stalled due to testing directive."); + while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block processing is no longer stalled due to testing directive."); + } + } let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db())? From 7f9c9bf7a201ce36aa1fd7f3ba947ce55b8a3797 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 5 Aug 2024 10:45:13 -0700 Subject: [PATCH 0656/1400] fix: remove extra logging --- testnet/stacks-node/src/nakamoto_node/miner.rs | 16 ---------------- .../src/nakamoto_node/sign_coordinator.rs | 9 +-------- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 3 files changed, 2 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 29b2195af9..18862630da 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -357,14 +357,6 @@ impl BlockMinerThread { .block_height_to_reward_cycle(burn_election_height) .expect("FATAL: no reward cycle for sortition"); - #[cfg(test)] - { - info!( - "---- Fetching reward info at height {} for cycle {} ----", - burn_election_height, reward_cycle - ); - } - let reward_info = match load_nakamoto_reward_set( reward_cycle, &self.burn_election_block.sortition_id, @@ -393,14 +385,6 @@ impl BlockMinerThread { )); }; - #[cfg(test)] - { - info!( - "---- New reward set has {} signers ----", - reward_set.clone().signers.unwrap_or(vec![]).len(), - ); - } - self.signer_set_cache = Some(reward_set.clone()); Ok(reward_set) } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b933c8f7e2..db442ac46b 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -642,13 +642,6 @@ impl SignCoordinator { counters: &Counters, election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { - #[cfg(test)] - { - info!( - "---- Sign coordinator starting. Burn tip height: {} ----", - burn_tip.block_height - ); - } let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); let sign_iter_id = block_attempt; let reward_cycle_id = burnchain @@ -742,7 +735,7 @@ impl SignCoordinator { continue; }; if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - info!("Received signer event for other reward cycle. Ignoring."); + debug!("Received signer event for other reward cycle. Ignoring."); continue; }; let slot_ids = modified_slots diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c2a4d8ce67..dd298d27ea 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2080,6 +2080,7 @@ fn empty_sortition() { assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); found_rejection = true; } else { + error!("Unexpected message type: {:?}", message); panic!("Unexpected message type"); } } From 6682ce811cd7b04cf2e1d4a7b8f0bcf602ac1b07 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 5 Aug 2024 13:05:35 -0700 Subject: [PATCH 0657/1400] fix: increase timeout while waiting for mock miner to sync --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 23270867b8..5642017dfd 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7254,7 +7254,7 @@ fn mock_mining() { let mock_miner_timeout = Instant::now(); while follower_naka_mined_blocks.load(Ordering::SeqCst) <= follower_naka_mined_blocks_before { - if mock_miner_timeout.elapsed() >= Duration::from_secs(30) { + if mock_miner_timeout.elapsed() >= Duration::from_secs(60) { panic!( "Timed out waiting for mock miner block {}", follower_naka_mined_blocks_before + 1 From 946769a6de59a1b7a6588ed36a2f79fadbf339e8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:51:04 -0400 Subject: [PATCH 0658/1400] chore: pass sender reference to avoid a clone --- libsigner/src/runloop.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index b0f026f35f..f423c9a8dd 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -53,7 +53,7 @@ pub trait SignerRunLoop { &mut self, event: Option>, cmd: Option, - res: Sender, + res: &Sender, ) -> Option; /// This is the main loop body for the signer. It continuously receives events from @@ -70,6 +70,7 @@ pub trait SignerRunLoop { result_send: Sender, mut event_stop_signaler: EVST, ) -> Option { + info!("Signer runloop begin"); loop { let poll_timeout = self.get_event_timeout(); let next_event_opt = match event_recv.recv_timeout(poll_timeout) { @@ -83,7 +84,7 @@ pub trait SignerRunLoop { // Do not block for commands let next_command_opt = command_recv.try_recv().ok(); if let Some(final_state) = - self.run_one_pass(next_event_opt, next_command_opt, result_send.clone()) + self.run_one_pass(next_event_opt, next_command_opt, &result_send) { info!("Runloop exit; signaling event-receiver to stop"); event_stop_signaler.send(); From 66889deee2e54be0ba5f43f446ec6b6d4ef0e1d3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:51:24 -0400 Subject: [PATCH 0659/1400] chore: pass a reference to the sender end of the runloop result channel to avoid clones --- stacks-signer/src/lib.rs | 2 +- stacks-signer/src/runloop.rs | 4 ++-- stacks-signer/src/v0/signer.rs | 3 ++- stacks-signer/src/v1/signer.rs | 18 +++++++++--------- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 15c0a25c3d..dd94b8f3bb 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -71,7 +71,7 @@ pub trait Signer: Debug + Display { stacks_client: &StacksClient, sortition_state: &mut Option, event: Option<&SignerEvent>, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ); /// Process a command diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 3e2ff53438..284ea6ce19 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -419,7 +419,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> &mut self, event: Option>, cmd: Option, - res: Sender>, + res: &Sender>, ) -> Option> { debug!( "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", @@ -452,7 +452,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> &self.stacks_client, &mut self.sortition_state, event.as_ref(), - res.clone(), + res, current_reward_cycle, ); // After processing event, run the next command for each signer diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 919864f09d..985ddd4a26 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -92,7 +92,7 @@ impl SignerTrait for Signer { stacks_client: &StacksClient, sortition_state: &mut Option, event: Option<&SignerEvent>, - _res: Sender>, + _res: &Sender>, current_reward_cycle: u64, ) { let event_parity = match event { @@ -676,6 +676,7 @@ impl Signer { let mut block = block_info.block; block.header.signer_signature = signatures; + debug!("{self}: Broadcasting Stacks block {} to node", &block.block_id()); let broadcasted = stacks_client .post_block(&block) .map_err(|e| { diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 0d2834ca7e..476bb4feab 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -164,7 +164,7 @@ impl SignerTrait for Signer { stacks_client: &StacksClient, _sortition_state: &mut Option, event: Option<&SignerEvent>, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ) { let event_parity = match event { @@ -182,13 +182,13 @@ impl SignerTrait for Signer { return; } if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) { + if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { error!("{self}: failed to refresh DKG: {e}"); } } self.refresh_coordinator(); if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) { + if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { error!("{self}: failed to refresh DKG: {e}"); } } @@ -366,7 +366,7 @@ impl Signer { pub fn read_dkg_stackerdb_messages( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { if self.state != State::Uninitialized { @@ -626,7 +626,7 @@ impl Signer { &mut self, stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ) { let mut block_info = match block_validate_response { @@ -718,7 +718,7 @@ impl Signer { fn handle_signer_messages( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: &Sender>, messages: &[SignerMessage], current_reward_cycle: u64, ) { @@ -761,7 +761,7 @@ impl Signer { fn handle_packets( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: &Sender>, packets: &[Packet], current_reward_cycle: u64, ) { @@ -1435,7 +1435,7 @@ impl Signer { /// Send any operation results across the provided channel fn send_operation_results( &mut self, - res: Sender>, + res: &Sender>, operation_results: Vec, ) { let nmb_results = operation_results.len(); @@ -1469,7 +1469,7 @@ impl Signer { pub fn refresh_dkg( &mut self, stacks_client: &StacksClient, - res: Sender>, + res: &Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { // First attempt to retrieve the aggregate key from the contract. From cd4be4c132e77d536fb1ca3e1b800466563d1834 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:51:48 -0400 Subject: [PATCH 0660/1400] fix: pass coordinator communication channels to relayer when processing an uploaded block, so the coordinator can be woken up if the block is new --- stackslib/src/net/api/postblock_v3.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index bcf994d488..df7a7eae73 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -121,7 +121,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { .ok_or(NetError::SendError("`block` not set".into()))?; let response = node - .with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { + .with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { let mut handle_conn = sortdb.index_handle_at_tip(); let stacks_tip = network.stacks_tip.block_id(); Relayer::process_new_nakamoto_block( @@ -131,7 +131,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { chainstate, &stacks_tip, &block, - None, + rpc_args.coord_comms, NakamotoBlockObtainMethod::Uploaded, ) }) From 8657a28c35d6b4a5811aed8fda32b6f89f1ca310 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:52:12 -0400 Subject: [PATCH 0661/1400] chore: remove commented-out code --- stackslib/src/net/inv/nakamoto.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 9fa43e448d..0a2ea4dc63 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -125,12 +125,7 @@ impl InvGenerator { tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { - /* - TODO: marf'ed cache? - if let Some(info_opt) = self.processed_tenures.get(&tenure_id_consensus_hash) { - return Ok((*info_opt).clone()); - }; - */ + // TODO: MARF-aware cache // not cached so go load it let loaded_info_opt = InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; From 0f2a1d8525d1732ff7700b79b37936a5437b9321 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:52:22 -0400 Subject: [PATCH 0662/1400] fix: pass coordinator comms to RPCHandlerArgs --- stackslib/src/net/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index da323be3e7..65a598afff 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -96,6 +96,8 @@ use crate::util_lib::boot::boot_code_tx_auth; use crate::util_lib::db::{DBConn, Error as db_error}; use crate::util_lib::strings::UrlString; +use crate::chainstate::coordinator::comm::CoordinatorChannels; + /// Implements RPC API pub mod api; /// Implements `ASEntry4` object, which is used in db.rs to store the AS number of an IP address. @@ -631,6 +633,8 @@ pub struct RPCHandlerArgs<'a> { pub fee_estimator: Option<&'a dyn FeeEstimator>, /// tx runtime cost metric pub cost_metric: Option<&'a dyn CostMetric>, + /// coordinator channels + pub coord_comms: Option<&'a CoordinatorChannels> } impl<'a> RPCHandlerArgs<'a> { From d108bece302a5f0bd827017c363f70dfbd08002d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:52:41 -0400 Subject: [PATCH 0663/1400] chore: more information on handling a nakamoto block --- stackslib/src/net/relay.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 32dc7d065a..92a1ebb480 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -805,9 +805,10 @@ impl Relayer { obtained_method: NakamotoBlockObtainMethod, ) -> Result { debug!( - "Handle incoming Nakamoto block {}/{}", + "Handle incoming Nakamoto block {}/{} obtained via {}", &block.header.consensus_hash, - &block.header.block_hash() + &block.header.block_hash(), + &obtained_method, ); // do we have this block? don't lock the DB needlessly if so. From d1e33c630fdd6199770764319f462dbbef7a034e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:52:59 -0400 Subject: [PATCH 0664/1400] feat: StacksTipChanged and SignersRejected error variants --- testnet/stacks-node/src/nakamoto_node.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index d9f44cc67b..cde9c1da1f 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -76,6 +76,10 @@ pub enum Error { SnapshotNotFoundForChainTip, /// The burnchain tip changed while this operation was in progress BurnchainTipChanged, + /// The Stacks tip changed while this operation was in progress + StacksTipChanged, + /// Signers rejected a block + SignersRejected, /// Error while spawning a subordinate thread SpawnError(std::io::Error), /// Injected testing errors From eb3041eca5f117d72eca3f8318d2558b67f2dccd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:53:18 -0400 Subject: [PATCH 0665/1400] feat: signers rejecting or the signers choosing a different stacks tip are reasons to abort mining --- testnet/stacks-node/src/nakamoto_node/miner.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 42b2de7966..7e4e3408fa 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -308,8 +308,20 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!("Error while gathering signatures: {e:?}. Will try mining again."); - continue; + match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"); + return Err(e); + } + _ => { + error!("Error while gathering signatures: {e:?}. Will try mining again."); + continue; + } + } } }; @@ -661,7 +673,7 @@ impl BlockMinerThread { .node .fault_injection_block_push_fail_probability .unwrap_or(0) - .max(100); + .min(100); let will_drop = if drop_prob > 0 { let throw: u8 = thread_rng().gen_range(0..100); throw < drop_prob From c2d61dd2bdd4fa50d91d37f82f8e8e02b531c65b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:53:40 -0400 Subject: [PATCH 0666/1400] chore: pass coordiantor comms to RPCHandlerArgs --- testnet/stacks-node/src/nakamoto_node/peer.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 1fd5325623..7483ce5115 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -266,6 +266,7 @@ impl PeerThread { cost_estimator: Some(cost_estimator.as_ref()), cost_metric: Some(cost_metric.as_ref()), fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), + coord_comms: Some(&self.globals.coord_comms), ..RPCHandlerArgs::default() }; self.net.run( From e8a0db6912d5db2fcd0281f725cd39636386ed97 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:54:00 -0400 Subject: [PATCH 0667/1400] fix: partial fix for #5046 --- .../stacks-node/src/nakamoto_node/relayer.rs | 31 +++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 8a5b75463b..3ee862364c 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -416,8 +416,30 @@ impl RelayerThread { MinerDirective::StopTenure } } else { - MinerDirective::ContinueTenure { - new_burn_view: consensus_hash, + let ih = self.sortdb.index_handle(&sn.sortition_id); + let parent_sn = ih.get_last_snapshot_with_sortition(sn.block_height).expect( + "FATAL: failed to query sortition DB for last snapshot with non-empty tenure", + ); + + let parent_epoch = + SortitionDB::get_stacks_epoch(self.sortdb.conn(), parent_sn.block_height) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); + + let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), sn.block_height) + .expect("FATAL: failed to query sortition DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); + + if parent_epoch.epoch_id != cur_epoch.epoch_id { + // this is the first-ever sortition, so definitely mine + MinerDirective::BeginTenure { + parent_tenure_start: committed_index_hash, + burnchain_tip: sn, + } + } else { + MinerDirective::ContinueTenure { + new_burn_view: consensus_hash, + } } }; Ok(directive) @@ -748,7 +770,10 @@ impl RelayerThread { )?; let new_miner_handle = std::thread::Builder::new() - .name(format!("miner.{parent_tenure_start}")) + .name(format!( + "miner.{parent_tenure_start} (bound ({},{})", + &self.config.node.p2p_bind, &self.config.node.rpc_bind + )) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) .map_err(|e| { From c499dc324d4c0ff5efa5da78c42814798c2ae9e2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:54:50 -0400 Subject: [PATCH 0668/1400] feat: if enough signers reject a block, abandon attempts to wait for signatures --- .../src/nakamoto_node/sign_coordinator.rs | 59 ++++++++++++++++--- 1 file changed, 52 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 5f2aba5b8f..d53dfa5319 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -71,6 +71,7 @@ pub struct SignCoordinator { signing_round_timeout: Duration, signer_entries: HashMap, weight_threshold: u32, + total_weight: u32, pub next_signer_bitvec: BitVec<4000>, } @@ -300,6 +301,7 @@ impl SignCoordinator { next_signer_bitvec, signer_entries: signer_public_keys, weight_threshold: threshold, + total_weight, }; return Ok(sign_coordinator); } @@ -321,6 +323,7 @@ impl SignCoordinator { next_signer_bitvec, signer_entries: signer_public_keys, weight_threshold: threshold, + total_weight, }) } @@ -409,6 +412,7 @@ impl SignCoordinator { } #[cfg_attr(test, mutants::skip)] + #[cfg(any(test, feature = "testing"))] pub fn begin_sign_v1( &mut self, block: &NakamotoBlock, @@ -703,6 +707,7 @@ impl SignCoordinator { }; let mut total_weight_signed: u32 = 0; + let mut total_reject_weight: u32 = 0; let mut gathered_signatures = BTreeMap::new(); info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; @@ -726,9 +731,10 @@ impl SignCoordinator { )) } }; + // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold - if let Ok(Some((block, _sz))) = chain_state + if let Ok(Some((stored_block, _sz))) = chain_state .nakamoto_blocks_db() .get_nakamoto_block(&block.block_id()) .map_err(|e| { @@ -741,7 +747,7 @@ impl SignCoordinator { }) { debug!("SignCoordinator: Found signatures in relayed block"); - return Ok(block.header.signer_signature); + return Ok(stored_block.header.signer_signature); } // we don't have the block we ostensibly mined, but perhaps the tenure has advanced @@ -759,13 +765,20 @@ impl SignCoordinator { NakamotoNodeError::SignerSignatureError(msg) })?; - if canonical_stacks_header.anchored_header.height() > block.header.chain_length { + debug!( + "run_sign_v0: our canonical tip is currently {}/{}", + &canonical_stacks_header.consensus_hash, + &canonical_stacks_header.anchored_header.block_hash() + ); + if canonical_stacks_header.anchored_header.height() >= block.header.chain_length + && canonical_stacks_header.index_block_hash() != block.header.block_id() + { info!( - "SignCoordinator: our block {} is superseded by block {}", + "SignCoordinator: our block {} is superceded by block {}", block.header.block_id(), canonical_stacks_header.index_block_hash() ); - break; + return Err(NakamotoNodeError::StacksTipChanged); } // check to see if this event we got is a signer event @@ -809,8 +822,40 @@ impl SignCoordinator { response_hash, signature, ))) => (response_hash, signature), - SignerMessageV0::BlockResponse(BlockResponse::Rejected(_)) => { - debug!("Received rejected block response. Ignoring."); + SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + if rejected_data.signer_signature_hash + == block.header.signer_signature_hash() + { + debug!( + "Signer {} rejected our block {}/{}", + slot_id, + &block.header.consensus_hash, + &block.header.block_hash() + ); + total_reject_weight = total_reject_weight + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + + if total_reject_weight.saturating_add(self.weight_threshold) + > self.total_weight + { + debug!( + "{}/{} signers vote to reject our block {}/{}", + total_reject_weight, + self.total_weight, + &block.header.consensus_hash, + &block.header.block_hash() + ); + return Err(NakamotoNodeError::SignersRejected); + } + } else { + debug!("Received rejected block response for a block besides my own. Ignoring."); + } continue; } SignerMessageV0::BlockProposal(_) => { From 7c43e3e92e061b2704f086080de7320280cdd7f3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:55:07 -0400 Subject: [PATCH 0669/1400] chore: debug signer tests --- testnet/stacks-node/src/tests/signer/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 7fe508407b..7ba63d4738 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -220,6 +220,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { - debug!("Waiting for Status..."); + debug!( + "Waiting for Status from {} signers...", + self.spawned_signers.len() + ); let now = std::time::Instant::now(); let mut states = Vec::with_capacity(self.spawned_signers.len()); for signer in self.spawned_signers.iter() { @@ -251,6 +256,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { + debug!( + "wait_for_states: got {}-th status, {:?}", + states.len(), + &state_info + ); states.push(state_info); } } From bd0ecd8bfc95e31fa456fbfa27f3e6d473dfd103 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:55:18 -0400 Subject: [PATCH 0670/1400] chore: longer timeout for waiting for signers to respond to status checks --- testnet/stacks-node/src/tests/signer/v0.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 351620f36a..07b0e89528 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -182,7 +182,7 @@ impl SignerTest { ); debug!("Waiting for signer set calculation."); let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(30); + let short_timeout = Duration::from_secs(60); let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event @@ -253,7 +253,7 @@ impl SignerTest { ); debug!("Waiting for signer set calculation."); let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(30); + let short_timeout = Duration::from_secs(60); let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event @@ -1265,7 +1265,15 @@ fn multiple_miners() { if btc_blocks_mined > max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + + info!( + "Issue next block-build request\ninfo 1: {:?}\ninfo 2: {:?}\n", + &info_1, &info_2 + ); signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure From 3f776a8a6acc570f2444ecb024a4cf88e560a8bc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 5 Aug 2024 23:58:00 -0400 Subject: [PATCH 0671/1400] chore: cargo fmt --- stacks-signer/src/v0/signer.rs | 5 ++++- stackslib/src/net/mod.rs | 5 ++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 985ddd4a26..df42315635 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -676,7 +676,10 @@ impl Signer { let mut block = block_info.block; block.header.signer_signature = signatures; - debug!("{self}: Broadcasting Stacks block {} to node", &block.block_id()); + debug!( + "{self}: Broadcasting Stacks block {} to node", + &block.block_id() + ); let broadcasted = stacks_client .post_block(&block) .map_err(|e| { diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 65a598afff..3ba4292f1c 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -64,6 +64,7 @@ use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::{Error as burnchain_error, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::{ConsensusHash, Opcodes}; +use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::Error as coordinator_error; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::boot::{ @@ -96,8 +97,6 @@ use crate::util_lib::boot::boot_code_tx_auth; use crate::util_lib::db::{DBConn, Error as db_error}; use crate::util_lib::strings::UrlString; -use crate::chainstate::coordinator::comm::CoordinatorChannels; - /// Implements RPC API pub mod api; /// Implements `ASEntry4` object, which is used in db.rs to store the AS number of an IP address. @@ -634,7 +633,7 @@ pub struct RPCHandlerArgs<'a> { /// tx runtime cost metric pub cost_metric: Option<&'a dyn CostMetric>, /// coordinator channels - pub coord_comms: Option<&'a CoordinatorChannels> + pub coord_comms: Option<&'a CoordinatorChannels>, } impl<'a> RPCHandlerArgs<'a> { From bef8c905d69996b18d7fe92db95bb1b2cccee85e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 5 Aug 2024 23:59:33 -0400 Subject: [PATCH 0672/1400] refactor: pull stalling logic out into functions --- .../chainstate/nakamoto/coordinator/tests.rs | 7 +++-- stackslib/src/chainstate/nakamoto/mod.rs | 31 +++++++++++++------ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index aecf8c62a2..ae6ce99112 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -47,7 +47,8 @@ use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, + disable_process_block_stall, enable_process_block_stall, NakamotoBlock, + NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, TEST_PROCESS_BLOCK_STALL, }; use crate::chainstate::stacks::address::PoxAddress; @@ -2505,7 +2506,7 @@ fn process_next_nakamoto_block_deadlock() { .reopen() .unwrap(); - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); + enable_process_block_stall(); let miner_thread = std::thread::spawn(move || { info!(" ------------------------------- MINING TENURE"); @@ -2523,7 +2524,7 @@ fn process_next_nakamoto_block_deadlock() { info!(" ------------------------------- SORTDB LOCKED"); // Un-stall the block processing - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + disable_process_block_stall(); // Wait a bit, to ensure the tenure will have grabbed any locks it needs std::thread::sleep(std::time::Duration::from_secs(10)); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0674c49b52..b2106d8e75 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -274,6 +274,25 @@ lazy_static! { #[cfg(any(test, feature = "testing"))] pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +fn stall_block_processing() { + if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block processing is stalled due to testing directive."); + while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block processing is no longer stalled due to testing directive."); + } +} + +pub fn enable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); +} + +pub fn disable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); +} + /// Trait for common MARF getters between StacksDBConn and StacksDBTx pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; @@ -1727,16 +1746,8 @@ impl NakamotoChainState { dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { #[cfg(any(test, feature = "testing"))] - { - if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block processing is stalled due to testing directive."); - while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block processing is no longer stalled due to testing directive."); - } - } + stall_block_processing(); + let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db())? From 54ec4f5a67e130d21f644e99311c60a81cad7aaa Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 6 Aug 2024 07:34:59 -0700 Subject: [PATCH 0673/1400] fix: remove unnecessary clones --- stacks-signer/src/lib.rs | 2 +- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index abc2db331b..13b14bd358 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -147,7 +147,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner cmd_send, res_recv, _phantom: std::marker::PhantomData, - config: config.clone(), + config, } } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5642017dfd..13de8a350c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -105,7 +105,7 @@ use crate::tests::neon_integrations::{ }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, - set_random_binds, to_addr, + to_addr, }; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index dd298d27ea..2bd54ecff7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2393,7 +2393,7 @@ fn signer_set_rollover() { let short_timeout = Duration::from_secs(20); // Verify that naka_conf has our new signer's event observers - for toml in new_signer_configs.clone() { + for toml in &new_signer_configs { let signer_config = SignerConfig::load_from_str(&toml).unwrap(); let endpoint = format!("{}", signer_config.endpoint); assert!(signer_test From a04a5133ea797d65a5b05fd32ebf8f810a217950 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 6 Aug 2024 10:48:23 -0400 Subject: [PATCH 0674/1400] fix: fix build error --- libsigner/src/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index c584572ba7..fbe1e59089 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -76,7 +76,7 @@ impl SignerRunLoop>, Command, T> for Sim &mut self, event: Option>, _cmd: Option, - _res: Sender>>, + _res: &Sender>>, ) -> Option>> { debug!("Got event: {:?}", &event); if let Some(event) = event { From 7dbeeea4e603ac4045cb137ad013f03850c389f2 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 6 Aug 2024 10:31:06 -0700 Subject: [PATCH 0675/1400] default to localhost for burnchain config samples --- testnet/stacks-node/conf/mainnet-follower-conf.toml | 2 +- testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index ba42fb6657..143c604a12 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -8,7 +8,7 @@ prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" mode = "mainnet" -peer_host = "bitcoin.hiro.so" +peer_host = "127.0.0.1" # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 75785454dc..9c3f609c7c 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -10,4 +10,4 @@ prometheus_bind = "0.0.0.0:9153" [burnchain] chain = "bitcoin" mode = "mainnet" -peer_host = "bitcoin.hiro.so" +peer_host = "127.0.0.1" From 588ec07c1e0f1279cc112468e29791bdcd35c370 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 6 Aug 2024 12:46:51 -0700 Subject: [PATCH 0676/1400] update pgp key for encrypting messages --- SECURITY.md | 65 +++++++++++------------------------------------------ 1 file changed, 13 insertions(+), 52 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index e9001abe0a..d3d4ada23d 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -21,61 +21,22 @@ You may also contact us with any questions or to report a vulnerabilty using the | Name | Fingerprint | | ------- | ------------------ | -| security@stacks.org | 8A8B 3C3B 803A 0603 8FB5 3F69 357C 80AB 0885 87A | +| security@stacks.org | ABA3 7FA3 6DBB A591 B0E5 5949 0E94 D065 B32E C7E6 | ``` -----BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBGSBJLgBEACb6bnuvchM5wzmCBh8tvb5Fc90AGmUC9Bfiw85kTNr5N+6Y+fj -Gcyy2ohUEh+5hQE2pJLYzWcEM8ZFomxuwuDkjEkwJHnMofTwPgeP5P9CJUgcOHDa -v/mzzSXze8nhcADiez6QMP1f1m32FoeLpjFyAPkxSzGDv1L8gMHCJn/d1lZyyl52 -1VO6kN6eazEuA9fCCK+ZjUWz5pZCs6QVQ2+3clOoEX+ycomult4/yJhwMHucIPbL -uUGJvpKXkHEi05G2H57mz8sHvz0euRNGTiEUQeVIzaLUmUuViij9KsKk0DSGj3yq -kI+zOcgjAGTMSK00i6bdBV+XZfZlg0uIATr7EGHnb3Lzbvn8lfo/3jaJlQu5elEf -ZlA2nE2dPUfhzY3t8GoroHrbqJaJFd9eZtfTMzwW11KdOzqa0V5FRUkxcBIb81+p -jb2o/YKGWPExX2cHOTYmUdQFM6AtLpif4pMeV11d52vy8LCsjZDwUSZM6lmcg+rL -o2dbBgLvBblHXRtS4UFvx7tHitl5DOk5ZZik3r3jWQmAUXVDBBpq2gaVkponliYv -iVeG+mRLoe+qpvQRMCaw5Rlth0MhqQ26tmpGUIavaFbDqARC8FeIfdov6bUP5/sJ -gaktJrED5T5hNks/N661/AJ8N7JCHJx1exW4TK052PZ2/hHxNSuUEm96VwARAQAB -tClzZWN1cml0eUBzdGFja3Mub3JnIDxzZWN1cml0eUBzdGFja3Mub3JnPokCVAQT -AQgAPhYhBIqLPDuAOgYDj7U/aTV8gKsIhYegBQJkgSS4AhsDBQkHhh87BQsJCAcC -BhUKCQgLAgQWAgMBAh4BAheAAAoJEDV8gKsIhYegWg8P/RsoODRC8QWYnc5oq2Yb -cJSR/0uRcWZVZC/guC553ax89Aro50YsWvd8Z2uakuKKRoc8aPfC4SL1Mufrncwo -9/pIoiB9NQhTAbnp7hBnF5dnIX+Jq4lQIqwG5E36juNiU23qglx3ZZxM5wZrkRi0 -5lsFHpjU4XRkaNgNs6vyiHmtzyR+iESEBY9szfWCRTK8DgOJPLrfDAnc5JWTq7iL -H8pUpClo5p0XFN39lgdhbEISRXaMqY0HJqAI9JKE5UxxRG2uuGbdeHTYu6ji+gz+ -g+gooyVYIVzXVAJHgD9tDsazD+n61epglF0qK0hb+NaRL/2F6KBpmpzY+iDmDkPu -5TTybS52Cm8zSUAsk5w/GSnknep929Cj5hhaD9ijHcLEV0VKSiN0edIPe+Nd57KK -sfggS4l8deD1OjcTxhawRiaKcthdWjm15DzP9WuYEURSpJZAmdSd5Cqx3bSconhW -iYjxAlgZb7t/OJr6N6YQZjga14kwjxia94WNiTz2UQLr/vYAJWQj9RypxL0IrFwr -pJcFnLKec68jLk8opg4LrY9O/gKHQuPDT1EEQ4ssknJAzKxCwrOwCrDvuIzeqzIx -L1mBAtCzF4Q/J1BlmFEIZ7022BycpzPL0VuOrgmyQ6FzEqiKme7Vy/PVWN7H7HhC -qmL2/g9lYt0+gPZazsc8f3NGuQINBGSBJLgBEADTnQe5mX60ClQqigDjAhypkFZY -6k1V850Gp93YsfMYMgzLcyywMo25RT904AF0X72mjF82YZmzOE/b1oSF4ns3nBIg -vCIiEsWTtFMZgerWKcHlYPE0VWR4iGC5DiOLbmrECPQ0JucEErJZWvypgot2R3p/ -hAkEV0CjZp8qObgBf+ViZmfMAkclVtJ5AFB0SQjx6m4ounpKV9euO2db302oMIbM -ssM1F2Dsl7oicAreHOdVZ5OLUkk5nrXmLHtIt6QppPVbWkJA9ArBwAHZ39vLQTBZ -YbehZxWDxzW/HK00CEzb70BwK0HZYFYt9lQwGRUou8dvtk3+nFRsfpAlFoHSLXjp -N+uZBnqQhUeyzT81PkavHpAGTq5ExgT13nyE9vJCPuf5lpthuWiUQYBHu5tUym6G -vHRpT1OyqcbUQUlS+iK24dwxglk2S/NYYOsKyRJ8AhLFQGqMHxlpqNsQ5wxFthZo -ayiP7CwaJFfB5TUe4zWpbMM545BPNQodcB8Njb62tj0ZoAgEbhXerMGrVfUGf6AL -FxcyGhGpjkRI4+e8HfDpiObMw2notIUMXJoYQv3Yf7X/n8QPX2EZDaB8dG43r2Hh -EeEDi6+WOI77LtdVDck71ZXqLukCrusO9HZ6GlB0ohqndRgueGztP82Af3W74Ohj -dEOcK0HC26dKPWhk2wARAQABiQI8BBgBCAAmFiEEios8O4A6BgOPtT9pNXyAqwiF -h6AFAmSBJLgCGwwFCQeGHzsACgkQNXyAqwiFh6CT4A//aOMVH/XIXngvfC/xOdDy -3JnZLtu4kmLfcvxbqEGrNhz1AW4t0Uivt9dgBb4VemgQajhYZyjdLgFhYGvCf446 -V1C79qWa1lwESmSWL63+rXNZMNV+siqnVhICrXw4FhCKP2tfnZ5uT03qTbu0S+9N -4bARjXkfYSxhVqeGmO/ZwuuHXQUojt/XNWBFbbKKM1Y6PlvfWrmX/S2cDAf0QgBd -MMLu7phbUjMzQDsenwiueWaRvDnsQB5GzwOiJheQuKLS1rYlJGnW2cwqjQtQnnC3 -YVb4iCialhAL/GWwjR/r7a6ZxuAB0j2zjKsaxtEMoTaVX3EW3Aoy73dvew0wyakq -OCchiIIJVvB6uXGufqAVVBJAgG7MQIEZLt7M6YSu0gYTdsEnNo7WZYMsX+/NGQ8G -5hguIJZl3MRtax1yPK0e0ergaDaetAhfWwQH2ltAVQColm3LfuLpcyoxYMhdiN86 -ggy4c1t0dS8owuAEdoKScOkOdENYEGF4mkd7nLkU5miaOMxg2NO9prCSpwwxDtt3 -XLkl0yw+0W0rM2Wu5pC0Xw21Cva+uBm3+kfyIRqrtc1Vb3ZrGKzCNQcAvvxq9XM5 -VeE6JLwVj8OP1TFuwmpJJeD5LTZDT0SvmjRB8OuxLwEHHjYtdm0ae0n2Cbou9Y0X -hmf6grobEcyS0PCsLHn3r7Y= -=/YN2 +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEZrJ2wBYJKwYBBAHaRw8BAQdADVWSZGbVgc0SE8XmXkRonl85wXrPHkl9bN0B +jKFBIRS0KXNlY3VyaXR5QHN0YWNrcy5vcmcgPHNlY3VyaXR5QHN0YWNrcy5vcmc+ +iJAEExYIADgWIQSro3+jbbulkbDlWUkOlNBlsy7H5gUCZrJ2wAIbAwULCQgHAgYV +CgkICwIEFgIDAQIeAQIXgAAKCRAOlNBlsy7H5tznAQC6iKqtjCqn2RjtCkr2V6xe +kCe92RfwWsG0415jVpVlDgEA350TCqIT1Jwyqz2aNT2TQ9F6fyKzAiNpLVRImOLH +4Aq4OARmsnbAEgorBgEEAZdVAQUBAQdAvwusRitvUX9hSC8NKS48VTT3LVvZvn87 +JQXRc2CngAEDAQgHiHgEGBYIACAWIQSro3+jbbulkbDlWUkOlNBlsy7H5gUCZrJ2 +wAIbDAAKCRAOlNBlsy7H5oCNAQDae9VhB98HMOvZ99ZuSEyLqXxKjK7xT2P0y1Tm +GuUnNAEAhI+1BjFvO/Hy50DcZTmHWvHJ6/dzibw5Ah+oE458IQo= +=yhSO -----END PGP PUBLIC KEY BLOCK----- ``` From d165ed38eafe8426928b9eef9960f3beea67e91b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 6 Aug 2024 14:57:21 -0500 Subject: [PATCH 0677/1400] ci: fix the empty_sortitions signers test in CI --- testnet/stacks-node/src/tests/signer/v0.rs | 45 ++++++++++++---------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 351620f36a..c48e5bcfc8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2054,35 +2054,38 @@ fn empty_sortition() { .collect(); assert_eq!(signer_slot_ids.len(), num_signers); - // The miner's proposed block should get rejected by the signers - let start_polling = Instant::now(); - let mut found_rejection = false; - while !found_rejection { - std::thread::sleep(Duration::from_secs(1)); - let messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::BlockResponse) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - for message in messages { + // The miner's proposed block should get rejected by all the signers + let mut found_rejections = Vec::new(); + wait_for(short_timeout.as_secs(), || { + for slot_id in signer_slot_ids.iter() { + if found_rejections.contains(slot_id) { + continue; + } + let mut latest_msgs = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &[*slot_id] + ).expect("Failed to get message from stackerdb"); + assert!(latest_msgs.len() <= 1); + let Some(latest_msg) = latest_msgs.pop() else { + info!("No message yet from slot #{slot_id}, will wait to try again"); + continue; + }; if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason_code, .. - })) = message + })) = latest_msg { assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); - found_rejection = true; + found_rejections.push(*slot_id); } else { - panic!("Unexpected message type"); + info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); } } - assert!( - start_polling.elapsed() <= short_timeout, - "Timed out after waiting for response from signer" - ); - } + // wait until we've found rejections for all the signers + Ok(found_rejections.len() == signer_slot_ids.len()) + }).unwrap(); signer_test.shutdown(); } From 571dfee691e1eb8a6f5978331296156104d62c4a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 6 Aug 2024 20:59:51 -0400 Subject: [PATCH 0678/1400] chore: resolve feature flag issue --- .../chainstate/nakamoto/coordinator/tests.rs | 5 +-- stackslib/src/chainstate/nakamoto/mod.rs | 40 ++++++++++--------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index ae6ce99112..569114aa12 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -44,12 +44,11 @@ use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; +use crate::chainstate::nakamoto::test_stall::*; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ - disable_process_block_stall, enable_process_block_stall, NakamotoBlock, - NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, - TEST_PROCESS_BLOCK_STALL, + NakamotoBlock, NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConnRef, }; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::pox_4_tests::{get_stacking_minimum, get_tip}; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b2106d8e75..d059a96cb6 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -270,27 +270,29 @@ lazy_static! { ]; } -// Cause an artifical stall in block-processing, for testing. -#[cfg(any(test, feature = "testing"))] -pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); - -fn stall_block_processing() { - if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block processing is stalled due to testing directive."); - while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); +#[cfg(test)] +mod test_stall { + pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = + std::sync::Mutex::new(None); + + pub fn stall_block_processing() { + if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block processing is stalled due to testing directive."); + while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block processing is no longer stalled due to testing directive."); } - info!("Block processing is no longer stalled due to testing directive."); } -} -pub fn enable_process_block_stall() { - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); -} + pub fn enable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); + } -pub fn disable_process_block_stall() { - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + pub fn disable_process_block_stall() { + TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + } } /// Trait for common MARF getters between StacksDBConn and StacksDBTx @@ -1745,8 +1747,8 @@ impl NakamotoChainState { canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { - #[cfg(any(test, feature = "testing"))] - stall_block_processing(); + #[cfg(test)] + test_stall::stall_block_processing(); let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = From e7bd86c6aad868a0c98f25c0e221ef300899d07f Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Wed, 7 Aug 2024 17:41:47 +0200 Subject: [PATCH 0679/1400] fix: handle empty type_args in define-trait definition --- clarity/src/vm/types/signatures.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 293c36fd5a..9ba833ba7f 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1655,6 +1655,9 @@ impl TypeSignature { epoch: StacksEpochId, clarity_version: ClarityVersion, ) -> Result> { + if type_args.is_empty() { + return Err(CheckErrors::InvalidTypeDescription); + } let mut trait_signature: BTreeMap = BTreeMap::new(); let functions_types = type_args[0] .match_list() From 9ab4f034737e9bdf585ad95531271a2d42cc7601 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Wed, 7 Aug 2024 20:05:29 +0300 Subject: [PATCH 0680/1400] feat: remove `unit-tests` from the `check-tests` job --- .github/workflows/stacks-core-tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 70ef457ce7..98eb5cf92c 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -187,7 +187,6 @@ jobs: if: always() needs: - full-genesis - - unit-tests - open-api-validation - core-contracts-clarinet-test steps: From 530b6894d1c85a267e61e9e8008971cbca285247 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 7 Aug 2024 21:16:15 +0300 Subject: [PATCH 0681/1400] docs & script for running cargo mutants locally on CI limitation --- contrib/tools/local-mutation-testing.sh | 84 ++++++++++++++++++ docs/mutation-testing.md | 109 ++++++++++++++++++++++++ 2 files changed, 193 insertions(+) create mode 100644 contrib/tools/local-mutation-testing.sh create mode 100644 docs/mutation-testing.md diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh new file mode 100644 index 0000000000..a3d563682c --- /dev/null +++ b/contrib/tools/local-mutation-testing.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +set -euo pipefail + +# Install cargo-mutants +cargo install --version 24.7.1 cargo-mutants --locked + +# Create diff file between current branch and develop branch +git diff origin/develop...HEAD > git.diff + +# Remove git diff files about removed/renamed files +awk ' + /^diff --git/ { + diff_line = $0 + getline + if ($0 !~ /^(deleted file mode|similarity index)/) { + print diff_line + print + } + } + !/^(diff --git|deleted file mode|similarity index|rename from|rename to)/ {print} +' git.diff > processed.diff + +# Extract mutants based on the processed diff +cargo mutants --in-diff processed.diff --list > all_mutants.txt + +# Create a directory for organizing mutants +mkdir -p mutants_by_package + +# Organize mutants into files based on their main folder +while IFS= read -r line; do + package=$(echo "$line" | cut -d'/' -f1) + + case $package in + "stackslib") + echo "$line" >> "mutants_by_package/stackslib.txt" + ;; + "testnet") + echo "$line" >> "mutants_by_package/stacks-node.txt" + ;; + "stacks-signer") + echo "$line" >> "mutants_by_package/stacks-signer.txt" + ;; + *) + echo "$line" >> "mutants_by_package/small-packages.txt" + ;; + esac +done < all_mutants.txt + +# Function to run mutants for a package +run_mutants() { + local package=$1 + local threshold=$2 + local output_dir=$3 + local mutant_file="mutants_by_package/${package}.txt" + + if [ ! -f "$mutant_file" ]; then + echo "No mutants found for $package" + return + fi + + local regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "$mutant_file" | paste -sd'|' -) + local mutant_count=$(cargo mutants -F "$regex_pattern" -E ": replace .{1,2} with .{1,2} in " --list | wc -l) + + if [ "$mutant_count" -gt "$threshold" ]; then + echo "Running mutants for $package ($mutant_count mutants)" + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "$output_dir" \ + --test-tool=nextest \ + --package "$package" \ + -- --all-targets --test-threads 1 + + echo $? > "${output_dir}/exit_code.txt" + else + echo "Skipping $package, only $mutant_count mutants (threshold: $threshold)" + fi +} + +# Run mutants for each wanted package +run_mutants "stacks-signer" 500 "./stacks-signer_mutants" +run_mutants "stacks-node" 540 "./stacks-node_mutants" +run_mutants "stackslib" 72 "./stackslib_mutants" diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md new file mode 100644 index 0000000000..7c635f3915 --- /dev/null +++ b/docs/mutation-testing.md @@ -0,0 +1,109 @@ +# Mutation Testing + +This document describes how to run mutation testing locally to mimic the outcome of a PR, without the CI limitation it provides by timing out after 6 hours. +[Here is the script](../contrib/tools/local-mutation-testing.sh) to run mutation locally running the mutants created by the changes between the current branch and develop. It does automatically all the steps explained below. + +From the root level of the stacks-core repository run +```sh +./contrib/tools/local-mutation-testing.sh +``` + +## Prerequirements + +Install the cargo mutants library +```sh +cargo install --version 24.7.1 cargo-mutants --locked +``` + + +## Steps +1. be on source branch you would use for the PR. +2. create diff file comparing this branch with the `develop` branch + ```sh + git diff origin/develop..HEAD > git.diff + ``` +3. clean up the diff file and create auxiliary files + ```sh + awk ' + /^diff --git/ { + diff_line = $0 + getline + if ($0 !~ /^(deleted file mode|similarity index)/) { + print diff_line + print + } + } + !/^(diff --git|deleted file mode|similarity index|rename from|rename to)/ {print} + ' git.diff > processed.diff + + # Extract mutants based on the processed diff + cargo mutants --in-diff processed.diff --list > all_mutants.txt + + # Create a directory for organizing mutants + mkdir -p mutants_by_package + + # Organize mutants into files based on their main folder + while IFS= read -r line; do + package=$(echo "$line" | cut -d'/' -f1) + + case $package in + "stackslib") + echo "$line" >> "mutants_by_package/stackslib.txt" + ;; + "testnet") + echo "$line" >> "mutants_by_package/stacks-node.txt" + ;; + "stacks-signer") + echo "$line" >> "mutants_by_package/stacks-signer.txt" + ;; + *) + echo "$line" >> "mutants_by_package/small-packages.txt" + ;; + esac + done < all_mutants.txt + ``` +4. based on the package required to run the mutants for + a. stackslib package + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stackslib.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./stackslib_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + b. stacks-node (testnet) package + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/testnet.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./testnet_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + c. stacks-signer + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stacks-signer.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./stacks-signer_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` + d. all other packages combined + ```sh + regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/small-packages.txt" | paste -sd'|' -) + + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + -F "$regex_pattern" \ + -E ": replace .{1,2} with .{1,2} in " \ + --output "./small-packages_mutants" \ + --test-tool=nextest \ + -- --all-targets --test-threads 1 + ``` From 14594ba59e9c95964704b37c15bc3a3189fe1a29 Mon Sep 17 00:00:00 2001 From: ASuciuX <151519329+ASuciuX@users.noreply.github.com> Date: Wed, 7 Aug 2024 21:31:13 +0300 Subject: [PATCH 0682/1400] Update docs/mutation-testing.md Co-authored-by: wileyj <2847772+wileyj@users.noreply.github.com> --- docs/mutation-testing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md index 7c635f3915..c94b5ca36c 100644 --- a/docs/mutation-testing.md +++ b/docs/mutation-testing.md @@ -1,7 +1,7 @@ # Mutation Testing This document describes how to run mutation testing locally to mimic the outcome of a PR, without the CI limitation it provides by timing out after 6 hours. -[Here is the script](../contrib/tools/local-mutation-testing.sh) to run mutation locally running the mutants created by the changes between the current branch and develop. It does automatically all the steps explained below. +[Here is the script](../contrib/tools/local-mutation-testing.sh) to run the tests locally by running the mutants created by the changes between `HEAD` and develop. It does automatically all the steps explained below. From the root level of the stacks-core repository run ```sh From e21a0f1940f9aef73783e387448237e9e847d097 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 7 Aug 2024 22:08:34 +0300 Subject: [PATCH 0683/1400] changed script to be executable and add capitalization to the numbered lists --- contrib/tools/local-mutation-testing.sh | 0 docs/mutation-testing.md | 19 ++++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) mode change 100644 => 100755 contrib/tools/local-mutation-testing.sh diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh old mode 100644 new mode 100755 diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md index 7c635f3915..383b3ff8eb 100644 --- a/docs/mutation-testing.md +++ b/docs/mutation-testing.md @@ -1,7 +1,8 @@ # Mutation Testing This document describes how to run mutation testing locally to mimic the outcome of a PR, without the CI limitation it provides by timing out after 6 hours. -[Here is the script](../contrib/tools/local-mutation-testing.sh) to run mutation locally running the mutants created by the changes between the current branch and develop. It does automatically all the steps explained below. +[Here is the script](../contrib/tools/local-mutation-testing.sh) to run mutation locally running the mutants created by the changes between the current branch and develop. +It does automatically all the steps explained below. From the root level of the stacks-core repository run ```sh @@ -17,12 +18,12 @@ cargo install --version 24.7.1 cargo-mutants --locked ## Steps -1. be on source branch you would use for the PR. -2. create diff file comparing this branch with the `develop` branch +1. Be on source branch you would use for the PR. +2. Create diff file comparing this branch with the `develop` branch ```sh git diff origin/develop..HEAD > git.diff ``` -3. clean up the diff file and create auxiliary files +3. Clean up the diff file and create auxiliary files ```sh awk ' /^diff --git/ { @@ -62,8 +63,8 @@ cargo install --version 24.7.1 cargo-mutants --locked esac done < all_mutants.txt ``` -4. based on the package required to run the mutants for - a. stackslib package +4. Based on the package required to run the mutants for + a. Stackslib package ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stackslib.txt" | paste -sd'|' -) @@ -74,7 +75,7 @@ cargo install --version 24.7.1 cargo-mutants --locked --test-tool=nextest \ -- --all-targets --test-threads 1 ``` - b. stacks-node (testnet) package + b. Stacks-node (testnet) package ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/testnet.txt" | paste -sd'|' -) @@ -85,7 +86,7 @@ cargo install --version 24.7.1 cargo-mutants --locked --test-tool=nextest \ -- --all-targets --test-threads 1 ``` - c. stacks-signer + c. Stacks-signer ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stacks-signer.txt" | paste -sd'|' -) @@ -96,7 +97,7 @@ cargo install --version 24.7.1 cargo-mutants --locked --test-tool=nextest \ -- --all-targets --test-threads 1 ``` - d. all other packages combined + d. All other packages combined ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/small-packages.txt" | paste -sd'|' -) From f1dcde6939ab04f8683b597c5fa278548d08c26e Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 7 Aug 2024 13:22:03 -0700 Subject: [PATCH 0684/1400] removing chain directive from sample configs --- testnet/stacks-node/conf/mainnet-follower-conf.toml | 1 - testnet/stacks-node/conf/mainnet-miner-conf.toml | 1 - testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 1 - testnet/stacks-node/conf/testnet-follower-conf.toml | 1 - testnet/stacks-node/conf/testnet-miner-conf.toml | 1 - 5 files changed, 5 deletions(-) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 143c604a12..c309463389 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -6,7 +6,6 @@ bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a60592 prometheus_bind = "0.0.0.0:9153" [burnchain] -chain = "bitcoin" mode = "mainnet" peer_host = "127.0.0.1" diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index fc526f0878..4d258b33f0 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -10,7 +10,6 @@ miner = true mine_microblocks = false # Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) [burnchain] -chain = "bitcoin" mode = "mainnet" peer_host = "127.0.0.1" username = "" diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 9c3f609c7c..8e966a8a0f 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -8,6 +8,5 @@ bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a60592 prometheus_bind = "0.0.0.0:9153" [burnchain] -chain = "bitcoin" mode = "mainnet" peer_host = "127.0.0.1" diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index f5fb2c04b0..de0973f2c7 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -6,7 +6,6 @@ bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a9568 prometheus_bind = "0.0.0.0:9153" [burnchain] -chain = "bitcoin" mode = "krypton" peer_host = "bitcoin.regtest.hiro.so" peer_port = 18444 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index e565fd0ee2..9b0d88ad42 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -6,7 +6,6 @@ bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a9568 prometheus_bind = "0.0.0.0:9153" [burnchain] -chain = "bitcoin" mode = "krypton" peer_host = "127.0.0.1" username = "" From 1c79018e40f9b093ab918bdbb15cbab7018e3606 Mon Sep 17 00:00:00 2001 From: Hugo C <911307+hugocaillard@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:52:00 +0200 Subject: [PATCH 0685/1400] refactor: addess review Co-authored-by: Aaron Blankstein --- clarity/src/vm/types/signatures.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 9ba833ba7f..a214c79b40 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1655,11 +1655,10 @@ impl TypeSignature { epoch: StacksEpochId, clarity_version: ClarityVersion, ) -> Result> { - if type_args.is_empty() { - return Err(CheckErrors::InvalidTypeDescription); - } let mut trait_signature: BTreeMap = BTreeMap::new(); - let functions_types = type_args[0] + let functions_types = type_args + .get(0) + .ok_or_else(|| CheckErrors::InvalidTypeDescription)? .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; From 66793b11aea36e0dbe8460b409c18e90aa5659bb Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:52:58 +0200 Subject: [PATCH 0686/1400] refactor: format --- clarity/src/vm/types/signatures.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index a214c79b40..280258e026 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1657,8 +1657,8 @@ impl TypeSignature { ) -> Result> { let mut trait_signature: BTreeMap = BTreeMap::new(); let functions_types = type_args - .get(0) - .ok_or_else(|| CheckErrors::InvalidTypeDescription)? + .get(0) + .ok_or_else(|| CheckErrors::InvalidTypeDescription)? .match_list() .ok_or(CheckErrors::DefineTraitBadSignature)?; From 09a8427d96d32235cccb524f85e2bdfd522a4840 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:46:18 -0400 Subject: [PATCH 0687/1400] chore: fault-injection to disable block broadcast --- stacks-signer/src/config.rs | 2 ++ stacks-signer/src/runloop.rs | 1 + stacks-signer/src/v0/signer.rs | 8 ++++++++ 3 files changed, 11 insertions(+) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 66cf5a5f7d..68f6141ee8 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -157,6 +157,8 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// Broadcast a block to the node if we gather enough signatures from other signers + pub broadcast_signed_blocks: bool, } /// The parsed configuration for the signer diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 284ea6ce19..b3d467fb00 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -271,6 +271,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, + broadcast_signed_blocks: true, }) } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index df42315635..4aac1b11df 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -63,6 +63,8 @@ pub struct Signer { pub signer_db: SignerDb, /// Configuration for proposal evaluation pub proposal_config: ProposalEvalConfig, + /// Whether or not to broadcast signed blocks if we gather all signatures + pub broadcast_signed_blocks: bool, } impl std::fmt::Display for Signer { @@ -276,6 +278,7 @@ impl From for Signer { reward_cycle: signer_config.reward_cycle, signer_db, proposal_config, + broadcast_signed_blocks: signer_config.broadcast_signed_blocks, } } } @@ -580,6 +583,11 @@ impl Signer { block_hash: &Sha512Trunc256Sum, signature: &MessageSignature, ) { + if !self.broadcast_signed_blocks { + debug!("{self}: Will ignore block-accept signature, since configured not to broadcast signed blocks"); + return; + } + debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); // authenticate the signature -- it must be signed by one of the stacking set From 2e9541e89fac7569172ef9ed12b5e5c0cbe0186d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:46:39 -0400 Subject: [PATCH 0688/1400] chore: copyright statements and docstrings for event dispatcher --- testnet/stacks-node/src/config.rs | 16 +++++++++ testnet/stacks-node/src/event_dispatcher.rs | 36 +++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 0f13943b1c..f5c7c7bfbd 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1,3 +1,19 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::{HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index f0ae639e27..0a60bd3593 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1,3 +1,19 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -673,19 +689,39 @@ impl EventObserver { } } +/// Events received from block-processing. +/// Stacks events are structured as JSON, and are grouped by topic. An event observer can +/// subscribe to one or more specific event streams, or the "any" stream to receive all of them. #[derive(Clone)] pub struct EventDispatcher { + /// List of configured event observers to which events will be posted. + /// The fields below this contain indexes into this list. registered_observers: Vec, + /// Smart contract-specific events, keyed by (contract-id, event-name). Values are indexes into `registered_observers`. contract_events_observers_lookup: HashMap<(QualifiedContractIdentifier, String), HashSet>, + /// Asset event observers, keyed by fully-qualified asset identifier. Values are indexes into + /// `registered_observers. assets_observers_lookup: HashMap>, + /// Index into `registered_observers` that will receive burn block events burn_block_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive mempool events mempool_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive microblock events microblock_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive STX events stx_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive all events any_event_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive block miner events (Stacks 2.5 and + /// lower) miner_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive microblock miner events (Stacks 2.5 and + /// lower) mined_microblocks_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive StackerDB events stackerdb_observers_lookup: HashSet, + /// Index into `registered_observers` that will receive block proposal events (Nakamoto and + /// later) block_proposal_observers_lookup: HashSet, } From ca66c0609162dc89d636b8332bd386026a9b292d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:46:56 -0400 Subject: [PATCH 0689/1400] feat: allow the initiative-raiser to list their caller ID --- testnet/stacks-node/src/globals.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 675a747480..b1ddf2e82b 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -71,7 +71,7 @@ pub struct Globals { previous_best_tips: Arc>>, /// Initiative flag. /// Raised when the main loop should wake up and do something. - initiative: Arc>, + initiative: Arc>>, } // Need to manually implement Clone, because [derive(Clone)] requires @@ -123,7 +123,7 @@ impl Globals { start_mining_height: Arc::new(Mutex::new(start_mining_height)), estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), previous_best_tips: Arc::new(Mutex::new(BTreeMap::new())), - initiative: Arc::new(Mutex::new(false)), + initiative: Arc::new(Mutex::new(None)), } } @@ -435,10 +435,10 @@ impl Globals { } /// Raise the initiative flag - pub fn raise_initiative(&self) { + pub fn raise_initiative(&self, raiser: String) { match self.initiative.lock() { Ok(mut initiative) => { - *initiative = true; + *initiative = Some(raiser); } Err(_e) => { error!("FATAL: failed to lock initiative"); @@ -448,11 +448,10 @@ impl Globals { } /// Clear the initiative flag and return its value - pub fn take_initiative(&self) -> bool { + pub fn take_initiative(&self) -> Option { match self.initiative.lock() { Ok(mut initiative) => { - let ret = *initiative; - *initiative = false; + let ret = (*initiative).take(); ret } Err(_e) => { From daf0257e0de66fd9eaa74c08858c29265ec1dc3d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:47:26 -0400 Subject: [PATCH 0690/1400] fix: don't raise initiative needlessly --- testnet/stacks-node/src/nakamoto_node.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index cde9c1da1f..6e57b8023e 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -275,7 +275,6 @@ impl StacksNode { )) .map_err(|_| Error::ChannelClosed)?; - self.globals.raise_initiative(); Ok(()) } From fbf83379fb5a970b74942a0c0ff56cc5852ed16a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:47:46 -0400 Subject: [PATCH 0691/1400] chore: refactor fault-injection code into its own methods; add fault-injection for block-processing after block-broadcast --- .../stacks-node/src/nakamoto_node/miner.rs | 104 +++++++++++++----- 1 file changed, 77 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7e4e3408fa..cd9d821d6b 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -63,6 +63,8 @@ use crate::run_loop::RegisteredKey; #[cfg(test)] pub static TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); #[cfg(test)] +pub static TEST_BLOCK_ANNOUNCE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(test)] pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); /// If the miner was interrupted while mining a block, how long should the @@ -181,6 +183,67 @@ impl BlockMinerThread { } } + #[cfg(test)] + fn fault_injection_block_broadcast_stall(new_block: &NakamotoBlock) { + if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Fault injection: Broadcasting is stalled due to testing directive."; + "stacks_block_id" => %new_block.block_id(), + "stacks_block_hash" => %new_block.header.block_hash(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Fault injection: Broadcasting is no longer stalled due to testing directive."; + "block_id" => %new_block.block_id(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + } + } + + #[cfg(not(test))] + fn fault_injection_block_broadcast_stall(_ignored: &NakamotoBlock) {} + + #[cfg(test)] + fn fault_injection_block_announce_stall(new_block: &NakamotoBlock) { + if *TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Fault injection: Block announcement is stalled due to testing directive."; + "stacks_block_id" => %new_block.block_id(), + "stacks_block_hash" => %new_block.header.block_hash(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + while *TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Fault injection: Block announcement is no longer stalled due to testing directive."; + "block_id" => %new_block.block_id(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + } + } + + #[cfg(not(test))] + fn fault_injection_block_announce_stall(_ignored: &NakamotoBlock) {} + + #[cfg(test)] + fn fault_injection_skip_block_broadcast() -> bool { + if *TEST_SKIP_P2P_BROADCAST.lock().unwrap() == Some(true) { + return true; + } + false + } + + #[cfg(not(test))] + fn fault_injection_skip_block_broadcast() -> bool { + false + } + /// Stop a miner tenure by blocking the miner and then joining the tenure thread pub fn stop_miner( globals: &Globals, @@ -279,27 +342,7 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - #[cfg(test)] - { - if *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Broadcasting is stalled due to testing directive."; - "stacks_block_id" => %new_block.block_id(), - "stacks_block_hash" => %new_block.header.block_hash(), - "height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash - ); - while *TEST_BROADCAST_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Broadcasting is no longer stalled due to testing directive."; - "block_id" => %new_block.block_id(), - "height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash - ); - } - } - + Self::fault_injection_block_broadcast_stall(&new_block); let (reward_set, signer_signature) = match self.gather_signatures( &mut new_block, self.burn_block.block_height, @@ -338,14 +381,20 @@ impl BlockMinerThread { "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - self.globals.coord().announce_new_stacks_block(); } + // update mined-block counters and mined-tenure counters self.globals.counters.bump_naka_mined_blocks(); if self.mined_blocks.is_empty() { // this is the first block of the tenure, bump tenure counter self.globals.counters.bump_naka_mined_tenures(); } + + // wake up chains coordinator + Self::fault_injection_block_announce_stall(&new_block); + self.globals.coord().announce_new_stacks_block(); + + // store mined block self.mined_blocks.push(new_block); } @@ -638,11 +687,12 @@ impl BlockMinerThread { block: &NakamotoBlock, reward_set: RewardSet, ) -> Result<(), ChainstateError> { - #[cfg(test)] - { - if *TEST_SKIP_P2P_BROADCAST.lock().unwrap() == Some(true) { - return Ok(()); - } + if Self::fault_injection_skip_block_broadcast() { + warn!( + "Fault injection: Skipping block broadcast for {}", + block.block_id() + ); + return Ok(()); } let mut sortition_handle = sort_db.index_handle_at_ch(&block.header.consensus_hash)?; From 497c773be31f7ad0d71fd382f82e49885f028810 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:48:18 -0400 Subject: [PATCH 0692/1400] chore: API sync --- testnet/stacks-node/src/nakamoto_node/peer.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 7483ce5115..b825cfe46f 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -352,7 +352,9 @@ impl PeerThread { "P2P: Dispatched result to Relayer! {} results remaining", self.results_with_data.len() ); - self.globals.raise_initiative(); + self.globals.raise_initiative( + "PeerThread::run_one_pass() with data-bearing network result".to_string(), + ); } } From ee0f56bb019861bf536bf0f7a274ba0e113145aa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:49:38 -0400 Subject: [PATCH 0693/1400] chore: some refactoring to separate miner directive choice from sortition handling (so the former can be tested in a unit test) --- .../stacks-node/src/nakamoto_node/relayer.rs | 184 ++++++++++++------ 1 file changed, 125 insertions(+), 59 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 3ee862364c..888f0548bb 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -375,39 +375,16 @@ impl RelayerThread { } } - /// Given the pointer to a recently processed sortition, see if we won the sortition. - /// - /// Returns a directive to the relayer thread to either start, stop, or continue a tenure. - pub fn process_sortition( - &mut self, - consensus_hash: ConsensusHash, - burn_hash: BurnchainHeaderHash, + /// Choose a miner directive based on the outcome of a sortition + pub(crate) fn choose_miner_directive( + config: &Config, + sortdb: &SortitionDB, + sn: BlockSnapshot, + won_sortition: bool, committed_index_hash: StacksBlockId, - ) -> Result { - let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) - .expect("FATAL: failed to query sortition DB") - .expect("FATAL: unknown consensus hash"); - - self.globals.set_last_sortition(sn.clone()); - - let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); - - info!( - "Relayer: Process sortition"; - "sortition_ch" => %consensus_hash, - "burn_hash" => %burn_hash, - "burn_height" => sn.block_height, - "winning_txid" => %sn.winning_block_txid, - "committed_parent" => %committed_index_hash, - "won_sortition?" => won_sortition, - ); - - if won_sortition { - increment_stx_blocks_mined_counter(); - } - + ) -> MinerDirective { let directive = if sn.sortition { - if won_sortition || self.config.get_node_config(false).mock_mining { + if won_sortition || config.get_node_config(false).mock_mining { MinerDirective::BeginTenure { parent_tenure_start: committed_index_hash, burnchain_tip: sn, @@ -416,17 +393,16 @@ impl RelayerThread { MinerDirective::StopTenure } } else { - let ih = self.sortdb.index_handle(&sn.sortition_id); + let ih = sortdb.index_handle(&sn.sortition_id); let parent_sn = ih.get_last_snapshot_with_sortition(sn.block_height).expect( "FATAL: failed to query sortition DB for last snapshot with non-empty tenure", ); - let parent_epoch = - SortitionDB::get_stacks_epoch(self.sortdb.conn(), parent_sn.block_height) - .expect("FATAL: failed to query sortiiton DB for epoch") - .expect("FATAL: no epoch defined for existing sortition"); + let parent_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); - let cur_epoch = SortitionDB::get_stacks_epoch(self.sortdb.conn(), sn.block_height) + let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sn.block_height) .expect("FATAL: failed to query sortition DB for epoch") .expect("FATAL: no epoch defined for existing sortition"); @@ -438,11 +414,65 @@ impl RelayerThread { } } else { MinerDirective::ContinueTenure { - new_burn_view: consensus_hash, + new_burn_view: sn.consensus_hash, } } }; - Ok(directive) + directive + } + + /// Given the pointer to a recently processed sortition, see if we won the sortition, and + /// determine what miner action (if any) to take. + /// + /// Returns a directive to the relayer thread to either start, stop, or continue a tenure, if + /// this sortition matches the sortition tip. + /// + /// Otherwise, returns None, meaning no action will be taken. + fn process_sortition( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + committed_index_hash: StacksBlockId, + ) -> Result, NakamotoNodeError> { + let sn = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &consensus_hash) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown consensus hash"); + + // always clear this even if this isn't the latest sortition + let won_sortition = sn.sortition && self.last_commits.remove(&sn.winning_block_txid); + if won_sortition { + increment_stx_blocks_mined_counter(); + } + self.globals.set_last_sortition(sn.clone()); + + info!( + "Relayer: Process sortition"; + "sortition_ch" => %consensus_hash, + "burn_hash" => %burn_hash, + "burn_height" => sn.block_height, + "winning_txid" => %sn.winning_block_txid, + "committed_parent" => %committed_index_hash, + "won_sortition?" => won_sortition, + ); + + let cur_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB"); + + if cur_sn.consensus_hash != consensus_hash { + info!("Relayer: Current sortition {} is ahead of processed sortition {}; taking no action", &cur_sn.consensus_hash, consensus_hash); + self.globals + .raise_initiative("process_sortition".to_string()); + return Ok(None); + } + + let directive = Self::choose_miner_directive( + &self.config, + &self.sortdb, + sn, + won_sortition, + committed_index_hash, + ); + Ok(Some(directive)) } /// Constructs and returns a LeaderKeyRegisterOp out of the provided params @@ -899,11 +929,17 @@ impl RelayerThread { burn_hash: BurnchainHeaderHash, committed_index_hash: StacksBlockId, ) -> bool { - let Ok(miner_instruction) = - self.process_sortition(consensus_hash, burn_hash, committed_index_hash) - else { - return false; - }; + let miner_instruction = + match self.process_sortition(consensus_hash, burn_hash, committed_index_hash) { + Ok(Some(miner_instruction)) => miner_instruction, + Ok(None) => { + return true; + } + Err(e) => { + warn!("Relayer: process_sortition returned {:?}", &e); + return false; + } + }; match miner_instruction { MinerDirective::BeginTenure { @@ -946,6 +982,22 @@ impl RelayerThread { true } + #[cfg(test)] + fn fault_injection_skip_block_commit(&self) -> bool { + self.globals + .counters + .naka_skip_commit_op + .0 + .lock() + .unwrap() + .unwrap_or(false) + } + + #[cfg(not(test))] + fn fault_injection_skip_block_commit(&self) -> bool { + false + } + /// Generate and submit the next block-commit, and record it locally fn issue_block_commit( &mut self, @@ -953,20 +1005,25 @@ impl RelayerThread { tip_block_bh: BlockHeaderHash, ) -> Result<(), NakamotoNodeError> { let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; - #[cfg(test)] - { - if self - .globals - .counters - .naka_skip_commit_op - .0 - .lock() - .unwrap() - .unwrap_or(false) - { - warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); - return Ok(()); - } + if self.fault_injection_skip_block_commit() { + warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); + return Ok(()); + } + + // last chance -- is this still the stacks tip? + let (cur_stacks_tip_ch, cur_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap_or_else( + |e| { + panic!("Failed to load canonical stacks tip: {:?}", &e); + }, + ); + + if cur_stacks_tip_ch != tip_block_ch || cur_stacks_tip_bh != tip_block_bh { + info!( + "Stacks tip changed prior to commit: {}/{} != {}/{}", + &cur_stacks_tip_ch, &cur_stacks_tip_bh, &tip_block_ch, &tip_block_bh + ); + return Err(NakamotoNodeError::StacksTipChanged); } // sign and broadcast @@ -990,6 +1047,7 @@ impl RelayerThread { "Relayer: Submitted block-commit"; "tip_consensus_hash" => %tip_block_ch, "tip_block_hash" => %tip_block_bh, + "tip_block_id" => %StacksBlockId::new(&tip_block_ch, &tip_block_bh), "txid" => %txid, ); @@ -1005,6 +1063,7 @@ impl RelayerThread { /// Determine what the relayer should do to advance the chain. /// * If this isn't a miner, then it's always nothing. /// * Otherwise, if we haven't done so already, go register a VRF public key + /// * If the stacks chain tip or burnchain tip has changed, then issue a block-commit fn initiative(&mut self) -> Option { if !self.is_miner { return None; @@ -1066,6 +1125,8 @@ impl RelayerThread { debug!("Relayer: initiative to commit"; "sortititon tip" => %sort_tip.consensus_hash, "stacks tip" => %stacks_tip, + "stacks_tip_ch" => %stacks_tip_ch, + "stacks_tip_bh" => %stacks_tip_bh, "last-commit burn view" => %self.last_committed.as_ref().map(|cmt| cmt.get_burn_tip().consensus_hash.to_string()).unwrap_or("(not set)".to_string()), "last-commit ongoing tenure" => %self.last_committed.as_ref().map(|cmt| cmt.get_tenure_id().to_string()).unwrap_or("(not set)".to_string()), "burnchain view changed?" => %burnchain_changed, @@ -1095,7 +1156,7 @@ impl RelayerThread { while self.globals.keep_running() { let raised_initiative = self.globals.take_initiative(); let timed_out = Instant::now() >= self.next_initiative; - let directive = if raised_initiative || timed_out { + let directive = if raised_initiative.is_some() || timed_out { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() @@ -1119,6 +1180,11 @@ impl RelayerThread { } }; + debug!("Relayer: main loop directive"; + "directive" => %directive, + "raised_initiative" => %raised_initiative.unwrap_or("relay_rcv".to_string()), + "timed_out" => %timed_out); + if !self.handle_directive(directive) { break; } From 45333547bc9d6a840f1ac0252396a4fd055d0291 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:50:48 -0400 Subject: [PATCH 0694/1400] feat: get event dispatcher --- testnet/stacks-node/src/run_loop/boot_nakamoto.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 97bf8dd4e0..0d509b297c 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -26,6 +26,7 @@ use stacks::core::StacksEpochExtension; use stacks::net::p2p::PeerNetwork; use stacks_common::types::{StacksEpoch, StacksEpochId}; +use crate::event_dispatcher::EventDispatcher; use crate::globals::NeonGlobals; use crate::neon::Counters; use crate::neon_node::LeaderKeyRegistrationState; @@ -124,6 +125,14 @@ impl BootRunLoop { } } + /// Get the event dispatcher + pub fn get_event_dispatcher(&self) -> EventDispatcher { + match &self.active_loop { + InnerLoops::Epoch2(x) => x.get_event_dispatcher(), + InnerLoops::Epoch3(x) => x.get_event_dispatcher(), + } + } + /// The main entry point for the run loop. This starts either a 2.x-neon or 3.x-nakamoto /// node depending on the current burnchain height. pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { From aa9144622d3efaccf1659e98f5c314d6d38930b5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:51:05 -0400 Subject: [PATCH 0695/1400] chore: API sync --- testnet/stacks-node/src/run_loop/nakamoto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 3ecd4f1e7d..44a6c0fba9 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -711,7 +711,7 @@ impl RunLoop { sortition_db_height ); last_tenure_sortition_height = sortition_db_height; - globals.raise_initiative(); + globals.raise_initiative("runloop-synced".to_string()); } } } From f845f8b7bf5572303ccc349ac63647e5e575b974 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 10:51:14 -0400 Subject: [PATCH 0696/1400] fix: fix forked-tenure-is-ignored test --- .../src/tests/nakamoto_integrations.rs | 99 ++++++++++++++++--- 1 file changed, 84 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b8cb47d579..9534daf158 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -94,7 +94,7 @@ use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; -use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; @@ -3774,16 +3774,26 @@ fn forked_tenure_is_ignored() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - info!("Starting tenure A."); + info!("Starting Tenure A."); wait_for_first_naka_block_commit(60, &commits_submitted); // In the next block, the miner should win the tenure and submit a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); - Ok(commits_count > commits_before && blocks_count > blocks_before) + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before + 1 + && blocks_count > blocks_before + && blocks_processed > blocks_processed_before) }) .unwrap(); @@ -3791,16 +3801,23 @@ fn forked_tenure_is_ignored() { .unwrap() .unwrap(); - // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted + info!("Tenure A block: {}", &block_tenure_a.index_block_hash()); + + // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted. + // Stall the miner thread; only wait until the number of submitted commits increases. TEST_BROADCAST_STALL.lock().unwrap().replace(true); + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(true); let blocks_before = mined_blocks.load(Ordering::SeqCst); let commits_before = commits_submitted.load(Ordering::SeqCst); - info!("Starting tenure B."); + + info!("Starting Tenure B."); + next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); Ok(commits_count > commits_before) }) .unwrap(); + signer_vote_if_needed( &btc_regtest_controller, &naka_conf, @@ -3808,13 +3825,15 @@ fn forked_tenure_is_ignored() { &signers, ); - info!("Commit op is submitted; unpause tenure B's block"); + info!("Commit op is submitted; unpause Tenure B's block"); - // Unpause the broadcast of Tenure B's block, do not submit commits. + // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to + // be processed test_skip_commit_op.0.lock().unwrap().replace(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); - // Wait for a stacks block to be broadcasted + // Wait for a stacks block to be broadcasted. + // However, it will not be processed. let start_time = Instant::now(); while mined_blocks.load(Ordering::SeqCst) <= blocks_before { assert!( @@ -3824,31 +3843,65 @@ fn forked_tenure_is_ignored() { thread::sleep(Duration::from_secs(1)); } - info!("Tenure B broadcasted a block. Issue the next bitcon block and unstall block commits."); - let block_tenure_b = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + sleep_ms(1000); + + info!("Tenure B broadcasted but did not process a block. Issue the next bitcon block and unstall block commits."); + + // the block will be stored, not processed, so load it out of staging + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("Failed to get sortition tip"); + + let block_tenure_b = chainstate + .nakamoto_blocks_db() + .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() + .get(0) + .cloned() .unwrap(); + let blocks = test_observer::get_mined_nakamoto_blocks(); let block_b = blocks.last().unwrap(); + info!("Tenure B tip block: {}", &block_tenure_b.block_id()); + info!("Tenure B last block: {}", &block_b.block_id); - info!("Starting tenure C."); - // Submit a block commit op for tenure C + // Block B was built atop block A + assert_eq!( + block_tenure_b.header.chain_length, + block_tenure_a.stacks_block_height + 1 + ); + + info!("Starting Tenure C."); + + // Submit a block commit op for tenure C. + // It should also build on block A, since the node has paused processing of block B. let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); next_block_and(&mut btc_regtest_controller, 60, || { test_skip_commit_op.0.lock().unwrap().replace(false); + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); Ok(commits_count > commits_before && blocks_count > blocks_before) }) .unwrap(); + // allow blocks B and C to be processed + sleep_ms(1000); + info!("Tenure C produced a block!"); let block_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); let blocks = test_observer::get_mined_nakamoto_blocks(); let block_c = blocks.last().unwrap(); + info!("Tenure C tip block: {}", &block_tenure_c.index_block_hash()); + info!("Tenure C last block: {}", &block_c.block_id); + + // Block C was built AFTER Block B was built, but BEFORE it was broadcasted (processed), so it should be built off of Block A + assert_eq!( + block_tenure_c.stacks_block_height, + block_tenure_a.stacks_block_height + 1 + ); // Now let's produce a second block for tenure C and ensure it builds off of block C. let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -3869,6 +3922,9 @@ fn forked_tenure_is_ignored() { thread::sleep(Duration::from_secs(1)); } + // give C's second block a moment to process + sleep_ms(1000); + info!("Tenure C produced a second block!"); let block_2_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -3877,6 +3933,12 @@ fn forked_tenure_is_ignored() { let blocks = test_observer::get_mined_nakamoto_blocks(); let block_2_c = blocks.last().unwrap(); + info!( + "Tenure C tip block: {}", + &block_2_tenure_c.index_block_hash() + ); + info!("Tenure C last block: {}", &block_2_c.block_id); + info!("Starting tenure D."); // Submit a block commit op for tenure D and mine a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); @@ -3888,18 +3950,25 @@ fn forked_tenure_is_ignored() { }) .unwrap(); + // give tenure D's block a moment to process + sleep_ms(1000); + let block_tenure_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); let blocks = test_observer::get_mined_nakamoto_blocks(); let block_d = blocks.last().unwrap(); - assert_ne!(block_tenure_b, block_tenure_a); - assert_ne!(block_tenure_b, block_tenure_c); + + info!("Tenure D tip block: {}", block_tenure_d.index_block_hash()); + info!("Tenure D last block: {}", block_d.block_id); + + assert_ne!(block_tenure_b.block_id(), block_tenure_a.index_block_hash()); + assert_ne!(block_tenure_b.block_id(), block_tenure_c.index_block_hash()); assert_ne!(block_tenure_c, block_tenure_a); // Block B was built atop block A assert_eq!( - block_tenure_b.stacks_block_height, + block_tenure_b.header.chain_length, block_tenure_a.stacks_block_height + 1 ); assert_eq!( From 7b483f5e60cb1b36ccc0e07010ca4b339d4ef32b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 11:06:24 -0400 Subject: [PATCH 0697/1400] chore: cargo fmt --- .../stacks-node/src/nakamoto_node/miner.rs | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 0525701bfe..43a5c51040 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -349,22 +349,22 @@ impl BlockMinerThread { &mut attempts, ) { Ok(x) => x, - Err(e) => match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"); - return Err(e); - } - _ => { - error!( - "Error while gathering signatures: {e:?}. Will try mining again." - ); - continue; + Err(e) => { + match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"); + return Err(e); + } + _ => { + error!("Error while gathering signatures: {e:?}. Will try mining again."); + continue; + } } - }, + } }; new_block.header.signer_signature = signer_signature; From 1ad9e5e383d40f8a8461d7ecca6f8de24ccb9bf8 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 8 Aug 2024 18:53:53 +0300 Subject: [PATCH 0698/1400] add `||true` so it runs all packages, not just the first one found --- contrib/tools/local-mutation-testing.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh index a3d563682c..80f3e16e3f 100755 --- a/contrib/tools/local-mutation-testing.sh +++ b/contrib/tools/local-mutation-testing.sh @@ -56,7 +56,7 @@ run_mutants() { if [ ! -f "$mutant_file" ]; then echo "No mutants found for $package" - return + return 0 fi local regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "$mutant_file" | paste -sd'|' -) @@ -70,15 +70,17 @@ run_mutants() { --output "$output_dir" \ --test-tool=nextest \ --package "$package" \ - -- --all-targets --test-threads 1 + -- --all-targets --test-threads 1 || true echo $? > "${output_dir}/exit_code.txt" else echo "Skipping $package, only $mutant_count mutants (threshold: $threshold)" fi + + return 0 } # Run mutants for each wanted package -run_mutants "stacks-signer" 500 "./stacks-signer_mutants" -run_mutants "stacks-node" 540 "./stacks-node_mutants" -run_mutants "stackslib" 72 "./stackslib_mutants" +run_mutants "stacks-signer" 500 "./stacks-signer_mutants" || true +run_mutants "stacks-node" 540 "./stacks-node_mutants" || true +run_mutants "stackslib" 72 "./stackslib_mutants" || true From 9d137db19ebb5ffa8107fbace17c0253d2655067 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 8 Aug 2024 18:57:02 +0300 Subject: [PATCH 0699/1400] rust backtrace and bitcoind_test added --- contrib/tools/local-mutation-testing.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh index 80f3e16e3f..41592b5030 100755 --- a/contrib/tools/local-mutation-testing.sh +++ b/contrib/tools/local-mutation-testing.sh @@ -64,7 +64,7 @@ run_mutants() { if [ "$mutant_count" -gt "$threshold" ]; then echo "Running mutants for $package ($mutant_count mutants)" - cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ --output "$output_dir" \ From a68c4c951a1fbe9df5220838798c9029412ea980 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 12:50:19 -0400 Subject: [PATCH 0700/1400] fix: include broadcast_signed_blocks --- stacks-signer/src/client/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 71720a015b..ba68976d90 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -567,6 +567,7 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, + broadcast_signed_blocks: true } } From f129669465da76626c57f4045b5d39fa09610b58 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 8 Aug 2024 20:31:53 +0300 Subject: [PATCH 0701/1400] add example to run one mutant --- docs/mutation-testing.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md index 45110ce7aa..e75ba75bdf 100644 --- a/docs/mutation-testing.md +++ b/docs/mutation-testing.md @@ -108,3 +108,20 @@ cargo install --version 24.7.1 cargo-mutants --locked --test-tool=nextest \ -- --all-targets --test-threads 1 ``` + +## How to run one specific mutant to test it + +Example of output which had a missing mutant +```sh +MISSED stacks-signer/src/runloop.rs:424:9: replace >::run_one_pass -> Option> with None in 3.0s build + 9.3s test +``` + +Example of fix for it +```sh +RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants -vV -F "replace process_stackerdb_event" -E ": replace >::run_one_pass -> Option> with None in " --test-tool=nextest -- --run-ignored all --fail-fast --test-threads 1 +``` + +General command to run +```sh +RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants -vV -F "replace process_stackerdb_event" -E ": replace [modify this] with [modify this] in " --test-tool=nextest -- --run-ignored all --fail-fast --test-threads 1 +``` From 151878bdfd8cfb3ae50ce98e0432754b8b8a993e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 8 Aug 2024 13:50:56 -0400 Subject: [PATCH 0702/1400] fix: cargo fmt --- stacks-signer/src/client/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index ba68976d90..e62fff0d5e 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -567,7 +567,7 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, - broadcast_signed_blocks: true + broadcast_signed_blocks: true, } } From 5715f6796ab8457831752af510bf88a076afbd0d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 8 Aug 2024 14:06:26 -0500 Subject: [PATCH 0703/1400] fix: a handful of issues causing timing-related test failures in CI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make `not_available_try_again` error in `GetStackers`, and make it transient in the signer binary * make signer binary timeout on retries in client * update signer outer runloop to differentiate between 'not in signer set' and 'have not loaded info yet' * update signer outer runloop to handle errors and non-presence differently in the signer config refresh * update signer outer runloop to perform signer config refresh on the current cycle (if not loaded yet) and on the next cycle (if in prepare phase for the next cycle). This was causing an issue on exactly the first cycle of Nakamoto, because the signer set cannot be loaded for the first cycle until after the prepare phase * update the signer outer runloop to check the node’s block height on event receipt as well * update the testing harnesses to wait and check more appropriately for status checks from signers, etc. --- libsigner/src/runloop.rs | 5 +- stacks-signer/src/client/mod.rs | 3 + stacks-signer/src/client/stacks_client.rs | 38 ++- stacks-signer/src/lib.rs | 2 - stacks-signer/src/runloop.rs | 250 ++++++++++++------ stacks-signer/src/v0/signer.rs | 4 - stacks-signer/src/v1/signer.rs | 22 +- stackslib/src/net/api/getstackers.rs | 51 +++- .../src/nakamoto_node/sign_coordinator.rs | 3 +- .../src/tests/nakamoto_integrations.rs | 139 ++++++---- testnet/stacks-node/src/tests/signer/mod.rs | 127 ++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 75 ++---- 12 files changed, 450 insertions(+), 269 deletions(-) diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index e548db89e3..bf786888c1 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -246,13 +246,14 @@ impl< let (event_send, event_recv) = channel(); event_receiver.add_consumer(event_send); + let bind_port = bind_addr.port(); event_receiver.bind(bind_addr)?; let stop_signaler = event_receiver.get_stop_signaler()?; let mut ret_stop_signaler = event_receiver.get_stop_signaler()?; // start a thread for the event receiver let event_thread = thread::Builder::new() - .name("event_receiver".to_string()) + .name(format!("event_receiver:{bind_port}")) .stack_size(THREAD_STACK_SIZE) .spawn(move || event_receiver.main_loop()) .map_err(|e| { @@ -262,7 +263,7 @@ impl< // start receiving events and doing stuff with them let runloop_thread = thread::Builder::new() - .name(format!("signer_runloop:{}", bind_addr.port())) + .name(format!("signer_runloop:{bind_port}")) .stack_size(THREAD_STACK_SIZE) .spawn(move || { signer_loop.main_loop(event_recv, command_receiver, result_sender, stop_signaler) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 71720a015b..d2afbeb175 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -34,6 +34,8 @@ use stacks_common::debug; const BACKOFF_INITIAL_INTERVAL: u64 = 128; /// Backoff timer max interval in milliseconds const BACKOFF_MAX_INTERVAL: u64 = 16384; +/// Backoff timer max elapsed seconds +const BACKOFF_MAX_ELAPSED: u64 = 5; #[derive(thiserror::Error, Debug)] /// Client error type @@ -109,6 +111,7 @@ where let backoff_timer = backoff::ExponentialBackoffBuilder::new() .with_initial_interval(Duration::from_millis(BACKOFF_INITIAL_INTERVAL)) .with_max_interval(Duration::from_millis(BACKOFF_MAX_INTERVAL)) + .with_max_elapsed_time(Some(Duration::from_secs(BACKOFF_MAX_ELAPSED))) .build(); backoff::retry_notify(backoff_timer, request_fn, notify).map_err(|_| ClientError::RetryTimeout) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b6337364db..e41485ea40 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -44,6 +44,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use libsigner::v0::messages::PeerInfo; use reqwest::header::AUTHORIZATION; +use serde::Deserialize; use serde_json::json; use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; @@ -80,6 +81,12 @@ pub struct StacksClient { auth_password: String, } +#[derive(Deserialize)] +struct GetStackersErrorResp { + err_type: String, + err_msg: String, +} + impl From<&GlobalConfig> for StacksClient { fn from(config: &GlobalConfig) -> Self { Self { @@ -514,23 +521,38 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result>, ClientError> { - debug!("Getting reward set for reward cycle {reward_cycle}..."); let timer = crate::monitoring::new_rpc_call_timer( &self.reward_set_path(reward_cycle), &self.http_origin, ); let send_request = || { - self.stacks_node_client + let response = self + .stacks_node_client .get(self.reward_set_path(reward_cycle)) .send() - .map_err(backoff::Error::transient) + .map_err(|e| backoff::Error::transient(e.into()))?; + let status = response.status(); + if status.is_success() { + return response + .json() + .map_err(|e| backoff::Error::permanent(e.into())); + } + let error_data = response.json::().map_err(|e| { + warn!("Failed to parse the GetStackers error response: {e}"); + backoff::Error::permanent(e.into()) + })?; + if error_data.err_type == "not_available_try_again" { + return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + } else { + warn!("Got error response ({status}): {}", error_data.err_msg); + return Err(backoff::Error::permanent(ClientError::RequestFailure( + status, + ))); + } }; - let response = retry_with_exponential_backoff(send_request)?; + let stackers_response = + retry_with_exponential_backoff::<_, ClientError, GetStackersResponse>(send_request)?; timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - let stackers_response = response.json::()?; Ok(stackers_response.stacker_set.signers) } diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 13b14bd358..2cbdc579c9 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -61,8 +61,6 @@ use crate::runloop::{RunLoop, RunLoopCommand}; pub trait Signer: Debug + Display { /// Create a new `Signer` instance fn new(config: SignerConfig) -> Self; - /// Update the `Signer` instance's with the next reward cycle data `SignerConfig` - fn update_signer(&mut self, next_signer_config: &SignerConfig); /// Get the reward cycle of the signer fn reward_cycle(&self) -> u64; /// Process an event diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 3e2ff53438..cd8bf5972d 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -138,6 +138,58 @@ impl RewardCycleInfo { } } +/// The configuration state for a reward cycle. +/// Allows us to track if we've registered a signer for a cycle or not +/// and to differentiate between being unregistered and simply not configured +pub enum ConfiguredSigner +where + Signer: SignerTrait, + T: StacksMessageCodec + Clone + Send + Debug, +{ + /// Signer is registered for the cycle and ready to process messages + RegisteredSigner(Signer), + /// The signer runloop isn't registered for this cycle (i.e., we've checked the + /// the signer set and we're not in it) + NotRegistered { + /// the cycle number we're not registered for + cycle: u64, + /// Phantom data for the message codec + _phantom_state: std::marker::PhantomData, + }, +} + +impl, T: StacksMessageCodec + Clone + Send + Debug> std::fmt::Display + for ConfiguredSigner +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::RegisteredSigner(s) => write!(f, "{s}"), + Self::NotRegistered { cycle, .. } => write!(f, "NotRegistered in Cycle #{cycle}"), + } + } +} + +impl, T: StacksMessageCodec + Clone + Send + Debug> + ConfiguredSigner +{ + /// Create a `NotRegistered` instance of the enum (so that callers do not need + /// to supply phantom_state data). + pub fn not_registered(cycle: u64) -> Self { + Self::NotRegistered { + cycle, + _phantom_state: std::marker::PhantomData, + } + } + + /// The reward cycle this signer is configured for + pub fn reward_cycle(&self) -> u64 { + match self { + ConfiguredSigner::RegisteredSigner(s) => s.reward_cycle(), + ConfiguredSigner::NotRegistered { cycle, .. } => *cycle, + } + } +} + /// The runloop for the stacks signer pub struct RunLoop where @@ -150,7 +202,7 @@ where pub stacks_client: StacksClient, /// The internal signer for an odd or even reward cycle /// Keyed by reward cycle % 2 - pub stacks_signers: HashMap, + pub stacks_signers: HashMap>, /// The state of the runloop pub state: State, /// The commands received thus far @@ -159,8 +211,6 @@ where pub current_reward_cycle_info: Option, /// Cache sortitin data from `stacks-node` pub sortition_state: Option, - /// Phantom data for the message codec - _phantom_data: std::marker::PhantomData, } impl, T: StacksMessageCodec + Clone + Send + Debug> RunLoop { @@ -175,7 +225,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo commands: VecDeque::new(), current_reward_cycle_info: None, sortition_state: None, - _phantom_data: std::marker::PhantomData, } } /// Get the registered signers for a specific reward cycle @@ -222,25 +271,40 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(signer_slot_ids) } /// Get a signer configuration for a specific reward cycle from the stacks node - fn get_signer_config(&mut self, reward_cycle: u64) -> Option { + fn get_signer_config( + &mut self, + reward_cycle: u64, + ) -> Result, ClientError> { // We can only register for a reward cycle if a reward set exists. - let signer_entries = self.get_parsed_reward_set(reward_cycle).ok()??; - let signer_slot_ids = self - .get_parsed_signer_slots(&self.stacks_client, reward_cycle) - .ok()?; + let signer_entries = match self.get_parsed_reward_set(reward_cycle) { + Ok(Some(x)) => x, + Ok(None) => return Ok(None), + Err(e) => { + warn!("Error while fetching reward set {reward_cycle}: {e:?}"); + return Err(e); + } + }; + let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) + { + Ok(x) => x, + Err(e) => { + warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); + return Err(e); + } + }; let current_addr = self.stacks_client.get_signer_address(); let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { warn!( "Signer {current_addr} was not found in stacker db. Must not be registered for this reward cycle {reward_cycle}." ); - return None; + return Ok(None); }; let Some(signer_id) = signer_entries.signer_ids.get(current_addr) else { warn!( "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." ); - return None; + return Ok(None); }; info!( "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." @@ -250,7 +314,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo .get(signer_id) .cloned() .unwrap_or_default(); - Some(SignerConfig { + Ok(Some(SignerConfig { reward_cycle, signer_id: *signer_id, signer_slot_id: *signer_slot_id, @@ -271,32 +335,30 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, - }) + })) } /// Refresh signer configuration for a specific reward cycle fn refresh_signer_config(&mut self, reward_cycle: u64) { let reward_index = reward_cycle % 2; - if let Some(new_signer_config) = self.get_signer_config(reward_cycle) { - let signer_id = new_signer_config.signer_id; - debug!("Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initializing signer state."); - if reward_cycle != 0 { - let prior_reward_cycle = reward_cycle.saturating_sub(1); - let prior_reward_set = prior_reward_cycle % 2; - if let Some(signer) = self.stacks_signers.get_mut(&prior_reward_set) { - if signer.reward_cycle() == prior_reward_cycle { - // The signers have been calculated for the next reward cycle. Update the current one - debug!("{signer}: Next reward cycle ({reward_cycle}) signer set calculated. Reconfiguring current reward cycle signer."); - signer.update_signer(&new_signer_config); - } - } + let new_signer_config = match self.get_signer_config(reward_cycle) { + Ok(Some(new_signer_config)) => { + let signer_id = new_signer_config.signer_id; + let new_signer = Signer::new(new_signer_config); + info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initialized signer state."); + ConfiguredSigner::RegisteredSigner(new_signer) } - let new_signer = Signer::new(new_signer_config); - info!("{new_signer} initialized."); - self.stacks_signers.insert(reward_index, new_signer); - } else { - warn!("Signer is not registered for reward cycle {reward_cycle}. Waiting for confirmed registration..."); - } + Ok(None) => { + warn!("Signer is not registered for reward cycle {reward_cycle}"); + ConfiguredSigner::not_registered(reward_cycle) + } + Err(e) => { + warn!("Failed to get the reward set info: {e}. Will try again later."); + return; + } + }; + + self.stacks_signers.insert(reward_index, new_signer_config); } fn initialize_runloop(&mut self) -> Result<(), ClientError> { @@ -322,7 +384,11 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(()) } - fn refresh_runloop(&mut self, current_burn_block_height: u64) -> Result<(), ClientError> { + fn refresh_runloop(&mut self, ev_burn_block_height: u64) -> Result<(), ClientError> { + let current_burn_block_height = std::cmp::max( + self.stacks_client.get_peer_info()?.burn_block_height, + ev_burn_block_height, + ); let reward_cycle_info = self .current_reward_cycle_info .as_mut() @@ -332,48 +398,44 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo // First ensure we refresh our view of the current reward cycle information if block_reward_cycle != current_reward_cycle { - let new_reward_cycle_info = retry_with_exponential_backoff(|| { - let info = self - .stacks_client - .get_current_reward_cycle_info() - .map_err(backoff::Error::transient)?; - if info.reward_cycle < block_reward_cycle { - // If the stacks-node is still processing the burn block, the /v2/pox endpoint - // may return the previous reward cycle. In this case, we should retry. - return Err(backoff::Error::transient(ClientError::InvalidResponse( - format!("Received reward cycle ({}) does not match the expected reward cycle ({}) for block {}.", - info.reward_cycle, - block_reward_cycle, - current_burn_block_height - ), - ))); - } - Ok(info) - })?; + let new_reward_cycle_info = RewardCycleInfo { + reward_cycle: block_reward_cycle, + reward_cycle_length: reward_cycle_info.reward_cycle_length, + prepare_phase_block_length: reward_cycle_info.prepare_phase_block_length, + first_burnchain_block_height: reward_cycle_info.first_burnchain_block_height, + last_burnchain_block_height: current_burn_block_height, + }; *reward_cycle_info = new_reward_cycle_info; } + let reward_cycle_before_refresh = current_reward_cycle; let current_reward_cycle = reward_cycle_info.reward_cycle; - // We should only attempt to refresh the signer if we are not configured for the next reward cycle yet and we received a new burn block for its prepare phase - if reward_cycle_info.is_in_next_prepare_phase(current_burn_block_height) { - let next_reward_cycle = current_reward_cycle.saturating_add(1); - if self - .stacks_signers - .get(&(next_reward_cycle % 2)) - .map(|signer| signer.reward_cycle() != next_reward_cycle) - .unwrap_or(true) - { - info!("Received a new burnchain block height ({current_burn_block_height}) in the prepare phase of the next reward cycle ({next_reward_cycle}). Checking for signer registration..."); + let is_in_next_prepare_phase = + reward_cycle_info.is_in_next_prepare_phase(current_burn_block_height); + let next_reward_cycle = current_reward_cycle.saturating_add(1); + + info!( + "Refreshing runloop with new burn block event"; + "latest_node_burn_ht" => current_burn_block_height, + "event_ht" => ev_burn_block_height, + "reward_cycle_before_refresh" => reward_cycle_before_refresh, + "current_reward_cycle" => current_reward_cycle, + "configured_for_current" => Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle), + "configured_for_next" => Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle), + "is_in_next_prepare_phase" => is_in_next_prepare_phase, + ); + + // Check if we need to refresh the signers: + // need to refresh the current signer if we are not configured for the current reward cycle + // need to refresh the next signer if we're not configured for the next reward cycle, and we're in the prepare phase + if !Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle) { + self.refresh_signer_config(current_reward_cycle); + } + if is_in_next_prepare_phase { + if !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) { self.refresh_signer_config(next_reward_cycle); } - } else { - info!("Received a new burnchain block height ({current_burn_block_height}) but not in prepare phase."; - "reward_cycle" => reward_cycle_info.reward_cycle, - "reward_cycle_length" => reward_cycle_info.reward_cycle_length, - "prepare_phase_block_length" => reward_cycle_info.prepare_phase_block_length, - "first_burnchain_block_height" => reward_cycle_info.first_burnchain_block_height, - "last_burnchain_block_height" => reward_cycle_info.last_burnchain_block_height, - ); } + self.cleanup_stale_signers(current_reward_cycle); if self.stacks_signers.is_empty() { self.state = State::NoRegisteredSigners; @@ -383,6 +445,16 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(()) } + fn is_configured_for_cycle( + stacks_signers: &HashMap>, + reward_cycle: u64, + ) -> bool { + let Some(signer) = stacks_signers.get(&(reward_cycle % 2)) else { + return false; + }; + signer.reward_cycle() == reward_cycle + } + fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { @@ -390,7 +462,13 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let next_reward_cycle = reward_cycle.wrapping_add(1); let stale = match next_reward_cycle.cmp(¤t_reward_cycle) { std::cmp::Ordering::Less => true, // We are more than one reward cycle behind, so we are stale - std::cmp::Ordering::Equal => !signer.has_pending_blocks(), // We are the next reward cycle, so check if we have any pending blocks to process + std::cmp::Ordering::Equal => { + // We are the next reward cycle, so check if we were registered and have any pending blocks to process + match signer { + ConfiguredSigner::RegisteredSigner(signer) => !signer.has_pending_blocks(), + _ => true, + } + } std::cmp::Ordering::Greater => false, // We are the current reward cycle, so we are not stale }; if stale { @@ -425,6 +503,19 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", self.state ); + // This is the only event that we respond to from the outer signer runloop + if let Some(SignerEvent::StatusCheck) = event { + info!("Signer status check requested: {:?}.", self.state); + if let Err(e) = res.send(vec![StateInfo { + runloop_state: self.state, + reward_cycle_info: self.current_reward_cycle_info, + } + .into()]) + { + error!("Failed to send status check result: {e}."); + } + } + if let Some(cmd) = cmd { self.commands.push_back(cmd); } @@ -447,7 +538,12 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> .as_ref() .expect("FATAL: cannot be an initialized signer with no reward cycle info.") .reward_cycle; - for signer in self.stacks_signers.values_mut() { + for configured_signer in self.stacks_signers.values_mut() { + let ConfiguredSigner::RegisteredSigner(ref mut signer) = configured_signer else { + debug!("{configured_signer}: Not configured for cycle, ignoring events for cycle"); + continue; + }; + signer.process_event( &self.stacks_client, &mut self.sortition_state, @@ -466,18 +562,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> let next_reward_cycle = current_reward_cycle.saturating_add(1); info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); } - // This is the only event that we respond to from the outer signer runloop - if let Some(SignerEvent::StatusCheck) = event { - info!("Signer status check requested: {:?}.", self.state); - if let Err(e) = res.send(vec![StateInfo { - runloop_state: self.state, - reward_cycle_info: self.current_reward_cycle_info, - } - .into()]) - { - error!("Failed to send status check result: {e}."); - } - } None } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index d81fbbd814..c32af06f3f 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -71,10 +71,6 @@ impl SignerTrait for Signer { Self::from(config) } - /// Refresh the next signer data from the given configuration data - fn update_signer(&mut self, _new_signer_config: &SignerConfig) { - // do nothing - } /// Return the reward cycle of the signer fn reward_cycle(&self) -> u64 { self.reward_cycle diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 18e31946c0..8212586beb 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -143,16 +143,6 @@ impl SignerTrait for Signer { Self::from(config) } - /// Refresh the next signer data from the given configuration data - fn update_signer(&mut self, new_signer_config: &SignerConfig) { - self.next_signer_addresses = new_signer_config - .signer_entries - .signer_ids - .keys() - .copied() - .collect(); - self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); - } /// Return the reward cycle of the signer fn reward_cycle(&self) -> u64 { self.reward_cycle @@ -356,6 +346,18 @@ impl Signer { } } + /// Refresh the next signer data from the given configuration data + #[allow(dead_code)] + fn update_signer(&mut self, new_signer_config: &SignerConfig) { + self.next_signer_addresses = new_signer_config + .signer_entries + .signer_ids + .keys() + .copied() + .collect(); + self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); + } + /// Get the current coordinator for executing DKG /// This will always use the coordinator selector to determine the coordinator fn get_coordinator_dkg(&self) -> (u32, PublicKey) { diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4546b66fc9..afcea6b551 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -51,6 +51,35 @@ pub struct GetStackersResponse { pub stacker_set: RewardSet, } +pub enum GetStackersErrors { + NotAvailableYet(crate::chainstate::coordinator::Error), + Other(String), +} + +impl GetStackersErrors { + pub fn error_type_string(&self) -> &'static str { + match self { + GetStackersErrors::NotAvailableYet(_) => "not_available_try_again", + GetStackersErrors::Other(_) => "other", + } + } +} + +impl From<&str> for GetStackersErrors { + fn from(value: &str) -> Self { + GetStackersErrors::Other(value.into()) + } +} + +impl std::fmt::Display for GetStackersErrors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GetStackersErrors::NotAvailableYet(e) => write!(f, "Could not read reward set. Prepare phase may not have started for this cycle yet. Err = {e:?}"), + GetStackersErrors::Other(msg) => write!(f, "{msg}") + } + } +} + impl GetStackersResponse { pub fn load( sortdb: &SortitionDB, @@ -58,7 +87,7 @@ impl GetStackersResponse { tip: &StacksBlockId, burnchain: &Burnchain, cycle_number: u64, - ) -> Result { + ) -> Result { let cycle_start_height = burnchain.reward_cycle_to_block_height(cycle_number); let pox_contract_name = burnchain @@ -74,16 +103,9 @@ impl GetStackersResponse { } let provider = OnChainRewardSetProvider::new(); - let stacker_set = provider.read_reward_set_nakamoto( - cycle_start_height, - chainstate, - burnchain, - sortdb, - tip, - true, - ).map_err( - |e| format!("Could not read reward set. Prepare phase may not have started for this cycle yet. Cycle = {cycle_number}, Err = {e:?}") - )?; + let stacker_set = provider + .read_reward_set_nakamoto(cycle_start_height, chainstate, burnchain, sortdb, tip, true) + .map_err(GetStackersErrors::NotAvailableYet)?; Ok(Self { stacker_set }) } @@ -173,10 +195,13 @@ impl RPCRequestHandler for GetStackersRequestHandler { let response = match stacker_response { Ok(response) => response, - Err(err_str) => { + Err(error) => { return StacksHttpResponse::new_error( &preamble, - &HttpBadRequest::new_json(json!({"response": "error", "err_msg": err_str})), + &HttpBadRequest::new_json(json!({ + "response": "error", + "err_type": error.error_type_string(), + "err_msg": error.to_string()})), ) .try_into_contents() .map_err(NetError::from) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index db442ac46b..914c2efb1a 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -775,9 +775,10 @@ impl SignCoordinator { let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { warn!( - "Processed signature but didn't validate over the expected block. Returning error."; + "Processed signature for a different block. Will try to continue."; "signature" => %signature, "block_signer_signature_hash" => %block_sighash, + "response_hash" => %response_hash, "slot_id" => slot_id, "reward_cycle_id" => reward_cycle_id, "response_hash" => %response_hash diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 13de8a350c..d07f6b91d6 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -673,54 +673,95 @@ pub fn next_block_and_mine_commit( coord_channels: &Arc>, commits_submitted: &Arc, ) -> Result<(), String> { - let commits_submitted = commits_submitted.clone(); - let blocks_processed_before = coord_channels - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let commits_before = commits_submitted.load(Ordering::SeqCst); - let mut block_processed_time: Option = None; - let mut commit_sent_time: Option = None; + next_block_and_wait_for_commits( + btc_controller, + timeout_secs, + &[coord_channels], + &[commits_submitted], + ) +} + +/// Mine a bitcoin block, and wait until: +/// (1) a new block has been processed by the coordinator +/// (2) 2 block commits have been issued ** or ** more than 10 seconds have +/// passed since (1) occurred +/// This waits for this check to pass on *all* supplied channels +pub fn next_block_and_wait_for_commits( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + coord_channels: &[&Arc>], + commits_submitted: &[&Arc], +) -> Result<(), String> { + let commits_submitted: Vec<_> = commits_submitted.iter().cloned().collect(); + let blocks_processed_before: Vec<_> = coord_channels + .iter() + .map(|x| { + x.lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed() + }) + .collect(); + let commits_before: Vec<_> = commits_submitted + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .collect(); + + let mut block_processed_time: Vec> = + (0..commits_before.len()).map(|_| None).collect(); + let mut commit_sent_time: Vec> = + (0..commits_before.len()).map(|_| None).collect(); next_block_and(btc_controller, timeout_secs, || { - let commits_sent = commits_submitted.load(Ordering::SeqCst); - let blocks_processed = coord_channels - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let now = Instant::now(); - if blocks_processed > blocks_processed_before && block_processed_time.is_none() { - block_processed_time.replace(now); - } - if commits_sent > commits_before && commit_sent_time.is_none() { - commit_sent_time.replace(now); - } - if blocks_processed > blocks_processed_before { - let block_processed_time = block_processed_time - .as_ref() - .ok_or("TEST-ERROR: Processed time wasn't set")?; - if commits_sent <= commits_before { - return Ok(false); - } - let commit_sent_time = commit_sent_time - .as_ref() - .ok_or("TEST-ERROR: Processed time wasn't set")?; - // try to ensure the commit was sent after the block was processed - if commit_sent_time > block_processed_time { - return Ok(true); + for i in 0..commits_submitted.len() { + let commits_sent = commits_submitted[i].load(Ordering::SeqCst); + let blocks_processed = coord_channels[i] + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let now = Instant::now(); + if blocks_processed > blocks_processed_before[i] && block_processed_time[i].is_none() { + block_processed_time[i].replace(now); } - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before + 2 { - return Ok(true); + if commits_sent > commits_before[i] && commit_sent_time[i].is_none() { + commit_sent_time[i].replace(now); } - // otherwise, just timeout if the commit was sent and its been long enough - // for a new commit pass to have occurred - if block_processed_time.elapsed() > Duration::from_secs(10) { - return Ok(true); + } + + for i in 0..commits_submitted.len() { + let blocks_processed = coord_channels[i] + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let commits_sent = commits_submitted[i].load(Ordering::SeqCst); + + if blocks_processed > blocks_processed_before[i] { + let block_processed_time = block_processed_time[i] + .as_ref() + .ok_or("TEST-ERROR: Processed time wasn't set")?; + if commits_sent <= commits_before[i] { + return Ok(false); + } + let commit_sent_time = commit_sent_time[i] + .as_ref() + .ok_or("TEST-ERROR: Processed time wasn't set")?; + // try to ensure the commit was sent after the block was processed + if commit_sent_time > block_processed_time { + continue; + } + // if two commits have been sent, one of them must have been after + if commits_sent >= commits_before[i] + 2 { + continue; + } + // otherwise, just timeout if the commit was sent and its been long enough + // for a new commit pass to have occurred + if block_processed_time.elapsed() > Duration::from_secs(10) { + continue; + } + return Ok(false); + } else { + return Ok(false); } - Ok(false) - } else { - Ok(false) } + Ok(true) }) } @@ -1196,15 +1237,11 @@ pub fn boot_to_epoch_3_reward_set( btc_regtest_controller, num_stacking_cycles, ); - let epoch_3_reward_set_calculation = - btc_regtest_controller.get_headers_height().wrapping_add(1); - run_until_burnchain_height( - btc_regtest_controller, - &blocks_processed, - epoch_3_reward_set_calculation, - &naka_conf, + next_block_and_wait(btc_regtest_controller, &blocks_processed); + info!( + "Bootstrapped to Epoch 3.0 reward set calculation height: {}", + get_chain_info(naka_conf).burn_block_height ); - info!("Bootstrapped to Epoch 3.0 reward set calculation height: {epoch_3_reward_set_calculation}."); } /// Wait for a block commit, without producing a block diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 31afcf1300..fe7bf77104 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -15,6 +15,7 @@ mod v0; mod v1; +use std::collections::HashSet; // Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify @@ -51,17 +52,19 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::{SignerResult, StateInfo}; +use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use wsts::state_machine::PublicKeys; +use super::nakamoto_integrations::wait_for; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::neon::{Counters, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ - naka_neon_integration_conf, next_block_and_mine_commit, POX_4_DEFAULT_STACKER_BALANCE, + naka_neon_integration_conf, next_block_and_mine_commit, next_block_and_wait_for_commits, + POX_4_DEFAULT_STACKER_BALANCE, }; use crate::tests::neon_integrations::{ get_chain_info, next_block_and_wait, run_until_burnchain_height, test_observer, @@ -222,8 +225,12 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest) { + for signer_ix in 0..self.spawned_signers.len() { + if exclude.contains(&signer_ix) { + continue; + } + let port = 3000 + signer_ix; let endpoint = format!("http://localhost:{}", port); let path = format!("{endpoint}/status"); let client = reqwest::blocking::Client::new(); @@ -235,39 +242,78 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Vec { - debug!("Waiting for Status..."); - let now = std::time::Instant::now(); - let mut states = Vec::with_capacity(self.spawned_signers.len()); - for signer in self.spawned_signers.iter() { - let old_len = states.len(); - loop { - assert!( - now.elapsed() < timeout, - "Timed out waiting for state checks" - ); - let results = signer - .res_recv - .recv_timeout(timeout) - .expect("failed to recv state results"); - for result in results { - match result { - SignerResult::OperationResult(_operation) => { - panic!("Recieved an operation result."); - } - SignerResult::StatusCheck(state_info) => { - states.push(state_info); - } - } + pub fn wait_for_registered(&mut self, timeout_secs: u64) { + let mut finished_signers = HashSet::new(); + wait_for(timeout_secs, || { + self.send_status_request(&finished_signers); + thread::sleep(Duration::from_secs(1)); + let latest_states = self.get_states(&finished_signers); + for (ix, state) in latest_states.iter().enumerate() { + let Some(state) = state else { continue; }; + if state.runloop_state == State::RegisteredSigners { + finished_signers.insert(ix); + } else { + warn!("Signer #{ix} returned state = {:?}, will try to wait for a registered signers state from them.", state.runloop_state); + } + } + info!("Finished signers: {:?}", finished_signers.iter().collect::>()); + Ok(finished_signers.len() == self.spawned_signers.len()) + }).unwrap(); + } + + pub fn wait_for_cycle(&mut self, timeout_secs: u64, reward_cycle: u64) { + let mut finished_signers = HashSet::new(); + wait_for(timeout_secs, || { + self.send_status_request(&finished_signers); + thread::sleep(Duration::from_secs(1)); + let latest_states = self.get_states(&finished_signers); + for (ix, state) in latest_states.iter().enumerate() { + let Some(state) = state else { continue; }; + let Some(reward_cycle_info) = state.reward_cycle_info else { continue; }; + if reward_cycle_info.reward_cycle == reward_cycle { + finished_signers.insert(ix); + } else { + warn!("Signer #{ix} returned state = {:?}, will try to wait for a cycle = {} state from them.", state, reward_cycle); + } + } + info!("Finished signers: {:?}", finished_signers.iter().collect::>()); + Ok(finished_signers.len() == self.spawned_signers.len()) + }).unwrap(); + } + + /// Get status check results (if returned) from each signer without blocking + /// Returns Some() or None() for each signer, in order of `self.spawned_signers` + pub fn get_states(&mut self, exclude: &HashSet) -> Vec> { + let mut output = Vec::new(); + for (ix, signer) in self.spawned_signers.iter().enumerate() { + if exclude.contains(&ix) { + output.push(None); + continue; + } + let Ok(mut results) = signer.res_recv.try_recv() else { + debug!("Could not receive latest state from signer #{ix}"); + output.push(None); + continue; + }; + if results.len() > 1 { + warn!("Received multiple states from the signer receiver: this test function assumes it should only ever receive 1"); + panic!(); + } + let Some(result) = results.pop() else { + debug!("Could not receive latest state from signer #{ix}"); + output.push(None); + continue; + }; + match result { + SignerResult::OperationResult(_operation) => { + panic!("Recieved an operation result."); } - if states.len() > old_len { - break; + SignerResult::StatusCheck(state_info) => { + output.push(Some(state_info)); } } } - debug!("Finished waiting for state checks!"); - states + output } fn nmb_blocks_to_reward_set_calculation(&mut self) -> u64 { @@ -337,18 +383,21 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>], + commits_submitted: &[&Arc], + timeout: Duration, + ) { let blocks_len = test_observer::get_blocks().len(); let mined_block_time = Instant::now(); - next_block_and_mine_commit( + next_block_and_wait_for_commits( &mut self.running_nodes.btc_regtest_controller, timeout.as_secs(), - &self.running_nodes.coord_channel, - &commits_submitted, + coord_channels, + commits_submitted, ) .unwrap(); - let t_start = Instant::now(); while test_observer::get_blocks().len() <= blocks_len { assert!( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ee2e417ee..92c1c78287 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -212,22 +212,7 @@ impl SignerTest { &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, ); - let now = std::time::Instant::now(); - loop { - self.send_status_request(); - let states = self.wait_for_states(short_timeout); - if states - .iter() - .all(|state_info| state_info.runloop_state == State::RegisteredSigners) - { - break; - } - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for signers to be registered" - ); - std::thread::sleep(Duration::from_secs(1)); - } + self.wait_for_registered(30); debug!("Signers initialized"); info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); @@ -255,7 +240,7 @@ impl SignerTest { &mut self.running_nodes.btc_regtest_controller, Some(self.num_stacking_cycles), ); - debug!("Waiting for signer set calculation."); + info!("Waiting for signer set calculation."); let mut reward_set_calculated = false; let short_timeout = Duration::from_secs(30); let now = std::time::Instant::now(); @@ -277,31 +262,16 @@ impl SignerTest { "Timed out waiting for reward set calculation" ); } - debug!("Signer set calculated"); + info!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state - debug!("Waiting for signers to initialize."); + info!("Waiting for signers to initialize."); next_block_and_wait( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, ); - let now = std::time::Instant::now(); - loop { - self.send_status_request(); - let states = self.wait_for_states(short_timeout); - if states - .iter() - .all(|state_info| state_info.runloop_state == State::RegisteredSigners) - { - break; - } - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for signers to be registered" - ); - std::thread::sleep(Duration::from_secs(1)); - } - debug!("Singers initialized"); + self.wait_for_registered(30); + info!("Signers initialized"); self.run_until_epoch_3_boundary(); @@ -1244,6 +1214,11 @@ fn multiple_miners() { ); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + .. + } = run_loop_2.counters(); let _run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) @@ -1260,6 +1235,8 @@ fn multiple_miners() { // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); @@ -1270,7 +1247,11 @@ fn multiple_miners() { if btc_blocks_mined > max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } - signer_test.mine_block_wait_on_processing(Duration::from_secs(30)); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure @@ -1785,25 +1766,7 @@ fn end_of_tenure() { std::thread::sleep(Duration::from_millis(100)); } - let now = std::time::Instant::now(); - // Wait for the signer to process the burn blocks and fully enter the next reward cycle - loop { - signer_test.send_status_request(); - let states = signer_test.wait_for_states(short_timeout); - if states.iter().all(|state_info| { - state_info - .reward_cycle_info - .map(|info| info.reward_cycle == final_reward_cycle) - .unwrap_or(false) - }) { - break; - } - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for signers to be in the next reward cycle" - ); - std::thread::sleep(Duration::from_millis(100)); - } + signer_test.wait_for_cycle(30, final_reward_cycle); info!("Block proposed and burn blocks consumed. Verifying that stacks block is still not processed"); From 5cc974e2ddce40d4fa25a8a30ac71572b71fc2d6 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 8 Aug 2024 22:12:34 +0300 Subject: [PATCH 0704/1400] adjust the code blocks with line breaks for readability --- contrib/tools/local-mutation-testing.sh | 3 ++- docs/mutation-testing.md | 23 +++++++++++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/contrib/tools/local-mutation-testing.sh b/contrib/tools/local-mutation-testing.sh index 41592b5030..11da6810e5 100755 --- a/contrib/tools/local-mutation-testing.sh +++ b/contrib/tools/local-mutation-testing.sh @@ -64,7 +64,8 @@ run_mutants() { if [ "$mutant_count" -gt "$threshold" ]; then echo "Running mutants for $package ($mutant_count mutants)" - RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ + cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ --output "$output_dir" \ diff --git a/docs/mutation-testing.md b/docs/mutation-testing.md index e75ba75bdf..85fcd89a7f 100644 --- a/docs/mutation-testing.md +++ b/docs/mutation-testing.md @@ -68,6 +68,7 @@ cargo install --version 24.7.1 cargo-mutants --locked ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stackslib.txt" | paste -sd'|' -) + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ @@ -79,6 +80,7 @@ cargo install --version 24.7.1 cargo-mutants --locked ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/testnet.txt" | paste -sd'|' -) + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ @@ -90,6 +92,7 @@ cargo install --version 24.7.1 cargo-mutants --locked ```sh regex_pattern=$(sed 's/[][()\.^$*+?{}|]/\\&/g' "mutants_by_package/stacks-signer.txt" | paste -sd'|' -) + RUST_BACKTRACE=1 BITCOIND_TEST=1 \ cargo mutants --timeout-multiplier 1.5 --no-shuffle -vV \ -F "$regex_pattern" \ -E ": replace .{1,2} with .{1,2} in " \ @@ -118,10 +121,26 @@ MISSED stacks-signer/src/runloop.rs:424:9: replace >::run_one_pass -> Option> with None in " --test-tool=nextest -- --run-ignored all --fail-fast --test-threads 1 +RUST_BACKTRACE=1 BITCOIND_TEST=1 \ +cargo mutants -vV \ + -F "replace process_stackerdb_event" \ + -E ": replace >::run_one_pass -> Option> with None in " \ + --test-tool=nextest \ + -- \ + --run-ignored all \ + --fail-fast \ + --test-threads 1 ``` General command to run ```sh -RUST_BACKTRACE=1 BITCOIND_TEST=1 cargo mutants -vV -F "replace process_stackerdb_event" -E ": replace [modify this] with [modify this] in " --test-tool=nextest -- --run-ignored all --fail-fast --test-threads 1 +RUST_BACKTRACE=1 BITCOIND_TEST=1 \ +cargo mutants -vV \ + -F "replace process_stackerdb_event" \ + -E ": replace [modify this] with [modify this] in " \ + --test-tool=nextest \ + -- \ + --run-ignored all \ + --fail-fast \ + --test-threads 1 ``` From 98d05a4b7d2c7b0111b76c0ec94b67851ddb47b7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 8 Aug 2024 16:00:01 -0500 Subject: [PATCH 0705/1400] test: add test for reloading signer config when reward set unavailable at start of prepare phase --- .github/workflows/bitcoin-tests.yml | 1 + .../src/tests/nakamoto_integrations.rs | 61 ++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 118 +++++++++++++++++- 3 files changed, 163 insertions(+), 17 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index cd0fda6665..e14934558a 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -95,6 +95,7 @@ jobs: - tests::signer::v0::mock_sign_epoch_25 - tests::signer::v0::signer_set_rollover - tests::signer::v0::miner_forking + - tests::signer::v0::reloads_signer_set_in - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index d07f6b91d6..62afa03ac4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1073,11 +1073,7 @@ fn signer_vote_if_needed( } } -/// -/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order -/// for pox-4 to activate -/// * `signer_pks` - must be the same size as `stacker_sks` -pub fn boot_to_epoch_3_reward_set_calculation_boundary( +pub fn setup_epoch_3_reward_set( naka_conf: &Config, blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], @@ -1099,9 +1095,6 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( ); let epoch_3_reward_cycle_boundary = epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); - let epoch_3_reward_set_calculation_boundary = epoch_3_reward_cycle_boundary - .saturating_sub(prepare_phase_len) - .wrapping_add(1); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); next_block_and_wait(btc_regtest_controller, &blocks_processed); next_block_and_wait(btc_regtest_controller, &blocks_processed); @@ -1115,13 +1108,13 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( .block_height_to_reward_cycle(block_height) .unwrap(); let lock_period: u128 = num_stacking_cycles.unwrap_or(12_u64).into(); - debug!("Test Cycle Info"; - "prepare_phase_len" => {prepare_phase_len}, - "reward_cycle_len" => {reward_cycle_len}, - "block_height" => {block_height}, - "reward_cycle" => {reward_cycle}, - "epoch_3_reward_cycle_boundary" => {epoch_3_reward_cycle_boundary}, - "epoch_3_start_height" => {epoch_3_start_height}, + info!("Test Cycle Info"; + "prepare_phase_len" => {prepare_phase_len}, + "reward_cycle_len" => {reward_cycle_len}, + "block_height" => {block_height}, + "reward_cycle" => {reward_cycle}, + "epoch_3_reward_cycle_boundary" => {epoch_3_reward_cycle_boundary}, + "epoch_3_start_height" => {epoch_3_start_height}, ); for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( @@ -1165,6 +1158,44 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( ); submit_tx(&http_origin, &stacking_tx); } +} + +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +/// * `signer_pks` - must be the same size as `stacker_sks` +pub fn boot_to_epoch_3_reward_set_calculation_boundary( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + btc_regtest_controller: &mut BitcoinRegtestController, + num_stacking_cycles: Option, +) { + setup_epoch_3_reward_set( + naka_conf, + blocks_processed, + stacker_sks, + signer_sks, + btc_regtest_controller, + num_stacking_cycles, + ); + + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + + let epoch_3_start_height = epoch_3.start_height; + assert!( + epoch_3_start_height > 0, + "Epoch 3.0 start height must be greater than 0" + ); + let epoch_3_reward_cycle_boundary = + epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); + let epoch_3_reward_set_calculation_boundary = epoch_3_reward_cycle_boundary + .saturating_sub(prepare_phase_len) + .saturating_add(1); run_until_burnchain_height( btc_regtest_controller, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 92c1c78287..f589416746 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -61,8 +61,9 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, wait_for, - POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, + next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -744,6 +745,119 @@ struct TenureForkingResult { mined_d: MinedNakamotoBlockEvent, } +#[test] +#[ignore] +/// Test to make sure that the signers are capable of reloading their reward set +/// if the stacks-node doesn't have it available at the first block of a prepare phase (e.g., if there was no block) +fn reloads_signer_set_in() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |_config| {}, + |_| {}, + &[], + ); + + setup_epoch_3_reward_set( + &signer_test.running_nodes.conf, + &signer_test.running_nodes.blocks_processed, + &signer_test.signer_stacks_private_keys, + &signer_test.signer_stacks_private_keys, + &mut signer_test.running_nodes.btc_regtest_controller, + Some(signer_test.num_stacking_cycles), + ); + + let naka_conf = &signer_test.running_nodes.conf; + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; + let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; + + let epoch_3_start_height = epoch_3.start_height; + assert!( + epoch_3_start_height > 0, + "Epoch 3.0 start height must be greater than 0" + ); + let epoch_3_reward_cycle_boundary = + epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); + let before_epoch_3_reward_set_calculation = + epoch_3_reward_cycle_boundary.saturating_sub(prepare_phase_len); + run_until_burnchain_height( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + before_epoch_3_reward_set_calculation, + naka_conf, + ); + + info!("Waiting for signer set calculation."); + let mut reward_set_calculated = false; + let short_timeout = Duration::from_secs(30); + let now = std::time::Instant::now(); + // Make sure the signer set is calculated before continuing or signers may not + // recognize that they are registered signers in the subsequent burn block event + let reward_cycle = signer_test.get_current_reward_cycle() + 1; + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + while !reward_set_calculated { + let reward_set = signer_test + .stacks_client + .get_reward_set_signers(reward_cycle) + .expect("Failed to check if reward set is calculated"); + reward_set_calculated = reward_set.is_some(); + if reward_set_calculated { + info!("Signer set: {:?}", reward_set.unwrap()); + } + std::thread::sleep(Duration::from_secs(1)); + assert!( + now.elapsed() < short_timeout, + "Timed out waiting for reward set calculation" + ); + } + info!("Signer set calculated"); + + // Manually consume one more block to ensure signers refresh their state + info!("Waiting for signers to initialize."); + next_block_and_wait( + &mut signer_test.running_nodes.btc_regtest_controller, + &signer_test.running_nodes.blocks_processed, + ); + signer_test.wait_for_registered(30); + info!("Signers initialized"); + + signer_test.run_until_epoch_3_boundary(); + + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + + info!("Waiting 1 burnchain block for miner VRF key confirmation"); + // Wait one block to confirm the VRF register, wait until a block commit is submitted + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count >= 1) + }, + ) + .unwrap(); + info!("Ready to mine Nakamoto blocks!"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + signer_test.shutdown(); +} + /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches /// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop From 7d82eefde8d51df91a839c17c162ea8db5c5c164 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:41:36 -0700 Subject: [PATCH 0706/1400] Update signer changelog --- stacks-signer/CHANGELOG.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 6b28b15e8f..1476d56ad0 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -7,6 +7,25 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Added + +### Changed + +## [2.5.0.0.5.2-rc1] + +### Added + +- Signer set handoff integration test (#5037) +- Add mock signing (#5020) +- Add versioning info set at build-time (#5016) + +### Changed + +- Fix out of sync `RPCPeerInfo` with stacks-node (#5033, #5014, #4999) +- Logging Improvements (#5025) +- Timeout empty sortition (#5003) +- Enum for version specific data (#4981) + ## [2.5.0.0.5.1] ### Added From 9f4b56ebc6eb088c0fa1234f0d55f75d1e95b006 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 9 Aug 2024 09:03:23 -0500 Subject: [PATCH 0707/1400] test: add coverage for mutants --- stacks-signer/src/client/stacks_client.rs | 4 +-- stackslib/src/net/api/getstackers.rs | 35 +++++++++++++++++-- .../src/nakamoto_node/sign_coordinator.rs | 3 ++ 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index e41485ea40..223455c72d 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -33,7 +33,7 @@ use blockstack_lib::net::api::get_tenures_fork_info::{ use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; -use blockstack_lib::net::api::getstackers::GetStackersResponse; +use blockstack_lib::net::api::getstackers::{GetStackersErrors, GetStackersResponse}; use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postblock_v3; @@ -541,7 +541,7 @@ impl StacksClient { warn!("Failed to parse the GetStackers error response: {e}"); backoff::Error::permanent(e.into()) })?; - if error_data.err_type == "not_available_try_again" { + if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); } else { warn!("Got error response ({status}): {}", error_data.err_msg); diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index afcea6b551..4fd4234070 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -57,10 +57,13 @@ pub enum GetStackersErrors { } impl GetStackersErrors { + pub const NOT_AVAILABLE_ERR_TYPE: &'static str = "not_available_try_again"; + pub const OTHER_ERR_TYPE: &'static str = "other"; + pub fn error_type_string(&self) -> &'static str { match self { - GetStackersErrors::NotAvailableYet(_) => "not_available_try_again", - GetStackersErrors::Other(_) => "other", + Self::NotAvailableYet(_) => Self::NOT_AVAILABLE_ERR_TYPE, + Self::Other(_) => Self::OTHER_ERR_TYPE, } } } @@ -252,3 +255,31 @@ impl StacksHttpResponse { Ok(response) } } + +#[cfg(test)] +mod test { + use super::GetStackersErrors; + + #[test] + // Test the formatting and error type strings of GetStackersErrors + fn get_stackers_errors() { + let not_available_err = GetStackersErrors::NotAvailableYet( + crate::chainstate::coordinator::Error::PoXNotProcessedYet, + ); + let other_err = GetStackersErrors::Other("foo".into()); + + assert_eq!( + not_available_err.error_type_string(), + GetStackersErrors::NOT_AVAILABLE_ERR_TYPE + ); + assert_eq!( + other_err.error_type_string(), + GetStackersErrors::OTHER_ERR_TYPE + ); + + assert!(not_available_err + .to_string() + .starts_with("Could not read reward set")); + assert_eq!(other_err.to_string(), "foo".to_string()); + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 914c2efb1a..6a5f026a16 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -631,6 +631,9 @@ impl SignCoordinator { /// This function begins by sending a `BlockProposal` message /// to the signers, and then waits for the signers to respond /// with their signatures. + // Mutants skip here: this function is covered via integration tests, + // which the mutation testing does not see. + #[cfg_attr(test, mutants::skip)] pub fn begin_sign_v0( &mut self, block: &NakamotoBlock, From e6e89552d4dc1dab00c16b7207724b6a1e8f5e33 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:24:36 -0400 Subject: [PATCH 0708/1400] fix: broadcast-signed-block fault injection in the global signer config, so we can set it in tests --- stacks-signer/src/config.rs | 3 +++ stacks-signer/src/runloop.rs | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 68f6141ee8..037e8af773 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -203,6 +203,8 @@ pub struct GlobalConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// Broadcast a block to the node if we gather enough signatures from other signers + pub broadcast_signed_blocks: bool, } /// Internal struct for loading up the config file @@ -359,6 +361,7 @@ impl TryFrom for GlobalConfig { metrics_endpoint, first_proposal_burn_block_timing, block_proposal_timeout, + broadcast_signed_blocks: true, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 58502314bb..9e1083047b 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -335,7 +335,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, - broadcast_signed_blocks: true, + broadcast_signed_blocks: self.config.broadcast_signed_blocks, })) } From 1fe36734885a4dda7f9cfa125ff5dc2ba2a67825 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:25:05 -0400 Subject: [PATCH 0709/1400] fix: address #5046 completely --- .../stacks-node/src/nakamoto_node/relayer.rs | 77 +++++++++++-------- 1 file changed, 43 insertions(+), 34 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index d5e873a54d..add33424ad 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -375,47 +375,56 @@ impl RelayerThread { } } - /// Choose a miner directive based on the outcome of a sortition + /// Choose a miner directive based on the outcome of a sortition. + /// We won't always be able to mine -- for example, this could be an empty sortition, but the + /// parent block could be an epoch 2 block. In this case, the right thing to do is to wait for + /// the next block-commit. pub(crate) fn choose_miner_directive( config: &Config, sortdb: &SortitionDB, sn: BlockSnapshot, won_sortition: bool, committed_index_hash: StacksBlockId, - ) -> MinerDirective { + ) -> Option { let directive = if sn.sortition { - if won_sortition || config.get_node_config(false).mock_mining { - MinerDirective::BeginTenure { - parent_tenure_start: committed_index_hash, - burnchain_tip: sn, - } - } else { - MinerDirective::StopTenure - } + Some( + if won_sortition || config.get_node_config(false).mock_mining { + MinerDirective::BeginTenure { + parent_tenure_start: committed_index_hash, + burnchain_tip: sn, + } + } else { + MinerDirective::StopTenure + }, + ) } else { - let ih = sortdb.index_handle(&sn.sortition_id); - let parent_sn = ih.get_last_snapshot_with_sortition(sn.block_height).expect( - "FATAL: failed to query sortition DB for last snapshot with non-empty tenure", - ); - - let parent_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height) - .expect("FATAL: failed to query sortiiton DB for epoch") - .expect("FATAL: no epoch defined for existing sortition"); - - let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sn.block_height) - .expect("FATAL: failed to query sortition DB for epoch") - .expect("FATAL: no epoch defined for existing sortition"); - - if parent_epoch.epoch_id != cur_epoch.epoch_id { - // this is the first-ever sortition, so definitely mine - MinerDirective::BeginTenure { - parent_tenure_start: committed_index_hash, - burnchain_tip: sn, - } + // find out what epoch the Stacks tip is in. + // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so + // right now since this sortition has no winner. + let (cur_stacks_tip_ch, _cur_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for stacks tip"); + + let stacks_tip_sn = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cur_stacks_tip_ch) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no sortition for canonical stacks tip"); + + let cur_epoch = + SortitionDB::get_stacks_epoch(sortdb.conn(), stacks_tip_sn.block_height) + .expect("FATAL: failed to query sortition DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); + + if cur_epoch.epoch_id != StacksEpochId::Epoch30 { + debug!( + "As of sortition {}, there has not yet been a Nakamoto tip. Cannot mine.", + &stacks_tip_sn.consensus_hash + ); + None } else { - MinerDirective::ContinueTenure { + Some(MinerDirective::ContinueTenure { new_burn_view: sn.consensus_hash, - } + }) } }; directive @@ -425,7 +434,7 @@ impl RelayerThread { /// determine what miner action (if any) to take. /// /// Returns a directive to the relayer thread to either start, stop, or continue a tenure, if - /// this sortition matches the sortition tip. + /// this sortition matches the sortition tip and we have a parent to build atop. /// /// Otherwise, returns None, meaning no action will be taken. fn process_sortition( @@ -465,14 +474,14 @@ impl RelayerThread { return Ok(None); } - let directive = Self::choose_miner_directive( + let directive_opt = Self::choose_miner_directive( &self.config, &self.sortdb, sn, won_sortition, committed_index_hash, ); - Ok(Some(directive)) + Ok(directive_opt) } /// Constructs and returns a LeaderKeyRegisterOp out of the provided params From f7f12932a76263847c9a10f0e547ff941664d398 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:26:06 -0400 Subject: [PATCH 0710/1400] fix: counter for rejected blocks, and also, the canonical stacks tip doesn't have any bearing on whether or not we should keep mining, so don't check it --- .../src/nakamoto_node/sign_coordinator.rs | 33 ++----------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 97b64cbacd..d57639cd1d 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -683,6 +683,7 @@ impl SignCoordinator { ) .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; counters.bump_naka_proposed_blocks(); + #[cfg(test)] { info!( @@ -749,37 +750,6 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - // we don't have the block we ostensibly mined, but perhaps the tenure has advanced - // anyway? If so, then give up. - let canonical_stacks_header = - NakamotoChainState::get_canonical_block_header(chain_state.db(), sortdb) - .map_err(|e| { - let msg = format!("Failed to query canonical stacks tip: {:?}", &e); - warn!("{}", &msg); - NakamotoNodeError::SignerSignatureError(msg) - })? - .ok_or_else(|| { - let msg = "No canonical stacks tip".to_string(); - warn!("{}", &msg); - NakamotoNodeError::SignerSignatureError(msg) - })?; - - debug!( - "run_sign_v0: our canonical tip is currently {}/{}", - &canonical_stacks_header.consensus_hash, - &canonical_stacks_header.anchored_header.block_hash() - ); - if canonical_stacks_header.anchored_header.height() >= block.header.chain_length - && canonical_stacks_header.index_block_hash() != block.header.block_id() - { - info!( - "SignCoordinator: our block {} is superceded by block {}", - block.header.block_id(), - canonical_stacks_header.index_block_hash() - ); - return Err(NakamotoNodeError::StacksTipChanged); - } - // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); @@ -850,6 +820,7 @@ impl SignCoordinator { &block.header.consensus_hash, &block.header.block_hash() ); + counters.bump_naka_rejected_blocks(); return Err(NakamotoNodeError::SignersRejected); } } else { From 744bc37be88e423ec49b9ab933ef9a571b4840ea Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:39:24 -0400 Subject: [PATCH 0711/1400] feat: signer-rejected blocks counter --- testnet/stacks-node/src/run_loop/neon.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 663c14e27b..ecf541a0de 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -104,6 +104,7 @@ pub struct Counters { pub naka_submitted_vrfs: RunLoopCounter, pub naka_submitted_commits: RunLoopCounter, pub naka_mined_blocks: RunLoopCounter, + pub naka_rejected_blocks: RunLoopCounter, pub naka_proposed_blocks: RunLoopCounter, pub naka_mined_tenures: RunLoopCounter, @@ -168,6 +169,10 @@ impl Counters { Counters::inc(&self.naka_proposed_blocks); } + pub fn bump_naka_rejected_blocks(&self) { + Counters::inc(&self.naka_rejected_blocks); + } + pub fn bump_naka_mined_tenures(&self) { Counters::inc(&self.naka_mined_tenures); } From 30603691824234fc7434db15790b96c1478e4bf3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:39:38 -0400 Subject: [PATCH 0712/1400] chore: don't crash if we ask for /v2/info while the node is re-binding on the nakamoto transition --- testnet/stacks-node/src/tests/neon_integrations.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ac6a3ea978..8a1a08b5dc 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -694,7 +694,10 @@ pub fn run_until_burnchain_height( if !next_result { return false; } - let tip_info = get_chain_info(&conf); + let Ok(tip_info) = get_chain_info_result(&conf) else { + sleep_ms(1000); + continue; + }; current_height = tip_info.burn_block_height; } From 1bd92ba3fb53e4bc6b43834aaf3432b1adaff5ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:39:57 -0400 Subject: [PATCH 0713/1400] chore: track rejected blocks --- testnet/stacks-node/src/tests/signer/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5d5f8d0c43..3c5aec785e 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -85,6 +85,7 @@ pub struct RunningNodes { pub blocks_processed: Arc, pub nakamoto_blocks_proposed: Arc, pub nakamoto_blocks_mined: Arc, + pub nakamoto_blocks_rejected: Arc, pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, @@ -747,6 +748,7 @@ fn setup_stx_btc_node ()>( naka_submitted_commits: commits_submitted, naka_proposed_blocks: naka_blocks_proposed, naka_mined_blocks: naka_blocks_mined, + naka_rejected_blocks: naka_blocks_rejected, naka_skip_commit_op: nakamoto_test_skip_commit_op, .. } = run_loop.counters(); @@ -780,6 +782,7 @@ fn setup_stx_btc_node ()>( blocks_processed: blocks_processed.0, nakamoto_blocks_proposed: naka_blocks_proposed.0, nakamoto_blocks_mined: naka_blocks_mined.0, + nakamoto_blocks_rejected: naka_blocks_rejected.0, nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, From 3ca62eec07fb5df86e03ea9f4d391034effd29a0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 10:40:11 -0400 Subject: [PATCH 0714/1400] fix: get some of the forking tests to work --- testnet/stacks-node/src/tests/signer/v0.rs | 127 ++++++++++++++++++--- 1 file changed, 112 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 69d4bc68da..34aeafdc31 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -32,7 +32,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; -use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; +use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; @@ -46,6 +46,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, }; use stacks_common::bitvec::BitVec; +use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; @@ -57,7 +58,7 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; +use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ @@ -776,6 +777,9 @@ fn forked_tenure_testing( |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; + // don't allow signers to post signed blocks (limits the amount of fault injection we + // need) + config.broadcast_signed_blocks = false; }, |_| {}, &[], @@ -783,6 +787,7 @@ fn forked_tenure_testing( let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); + sleep_ms(1000); info!("------------------------- Reached Epoch 3.0 -------------------------"); let naka_conf = signer_test.running_nodes.conf.clone(); @@ -799,8 +804,14 @@ fn forked_tenure_testing( let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let proposed_blocks = signer_test.running_nodes.nakamoto_blocks_proposed.clone(); + let rejected_blocks = signer_test.running_nodes.nakamoto_blocks_rejected.clone(); + let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); - info!("Starting tenure A."); + info!("Starting Tenure A."); // In the next block, the miner should win the tenure and submit a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -810,20 +821,30 @@ fn forked_tenure_testing( || { let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); - Ok(commits_count > commits_before && blocks_count > blocks_before) + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before + && blocks_count > blocks_before + && blocks_processed > blocks_processed_before) }, ) .unwrap(); + sleep_ms(1000); + let tip_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted TEST_BROADCAST_STALL.lock().unwrap().replace(true); + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(true); let blocks_before = mined_blocks.load(Ordering::SeqCst); let commits_before = commits_submitted.load(Ordering::SeqCst); - info!("Starting tenure B."); + + info!("Starting Tenure B."); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -837,6 +858,7 @@ fn forked_tenure_testing( info!("Commit op is submitted; unpause tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits. + // However, do not allow B to be processed just yet signer_test .running_nodes .nakamoto_test_skip_commit_op @@ -858,13 +880,52 @@ fn forked_tenure_testing( info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcon block, and un-stall block commits."); thread::sleep(post_btc_block_pause); - let tip_b = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + + // the block will be stored, not processed, so load it out of staging + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("Failed to get sortition tip"); + + let tip_b_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() + .get(0) + .cloned() .unwrap(); + + // synthesize a StacksHeaderInfo from this unprocessed block + let tip_b = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(tip_b_block.header.clone()), + microblock_tail: None, + stacks_block_height: tip_b_block.header.chain_length.into(), + index_root: tip_b_block.header.state_index_root.clone(), + consensus_hash: tip_b_block.header.consensus_hash.clone(), + burn_header_hash: tip_sn.burn_header_hash.clone(), + burn_header_height: tip_sn.block_height as u32, + burn_header_timestamp: tip_sn.burn_header_timestamp, + anchored_block_size: tip_b_block.serialize_to_vec().len() as u64, + burn_view: Some(tip_b_block.header.consensus_hash.clone()), + }; + let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_b = blocks.last().unwrap().clone(); - info!("Starting tenure C."); + // Block B was built atop block A + assert_eq!(tip_b.stacks_block_height, tip_a.stacks_block_height + 1); + assert_eq!( + mined_b.parent_block_id, + tip_a.index_block_hash().to_string() + ); + assert_ne!(tip_b, tip_a); + + if !expect_tenure_c { + // allow B to process, so it'll be distinct from C + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); + sleep_ms(1000); + } + + info!("Starting Tenure C."); + // Submit a block commit op for tenure C let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = if expect_tenure_c { @@ -872,6 +933,8 @@ fn forked_tenure_testing( } else { proposed_blocks.load(Ordering::SeqCst) }; + let rejected_before = rejected_blocks.load(Ordering::SeqCst); + next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -883,17 +946,38 @@ fn forked_tenure_testing( .lock() .unwrap() .replace(false); + let commits_count = commits_submitted.load(Ordering::SeqCst); - let blocks_count = if expect_tenure_c { - mined_blocks.load(Ordering::SeqCst) + if commits_count > commits_before { + // now allow block B to process. + TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); + } + let rejected_count = rejected_blocks.load(Ordering::SeqCst); + let (blocks_count, rbf_count, has_reject_count) = if expect_tenure_c { + // if tenure C is going to be canonical, then we expect the miner to RBF its commit + // once (i.e. for the block it mines and gets signed), and we expect zero + // rejections. + (mined_blocks.load(Ordering::SeqCst), 1, true) } else { - proposed_blocks.load(Ordering::SeqCst) + // if tenure C is NOT going to be canonical, then we expect no RBFs (since the + // miner can't get its block signed), and we expect at least one rejection + ( + proposed_blocks.load(Ordering::SeqCst), + 0, + rejected_count > rejected_before, + ) }; - Ok(commits_count > commits_before && blocks_count > blocks_before) + + Ok(commits_count > commits_before + rbf_count + && blocks_count > blocks_before + && has_reject_count) }, ) .unwrap(); + // allow blocks B and C to be processed + sleep_ms(1000); + info!("Tenure C produced (or proposed) a block!"); let tip_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() @@ -902,6 +986,9 @@ fn forked_tenure_testing( let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_c = blocks.last().unwrap().clone(); + assert_eq!(tip_b, tip_c); + assert_ne!(tip_c, tip_a); + let (tip_c_2, mined_c_2) = if !expect_tenure_c { (None, None) } else { @@ -922,6 +1009,9 @@ fn forked_tenure_testing( thread::sleep(Duration::from_secs(1)); } + // give C's second block a moment to process + sleep_ms(1000); + info!("Tenure C produced a second block!"); let block_2_tenure_c = @@ -933,7 +1023,11 @@ fn forked_tenure_testing( (Some(block_2_tenure_c), Some(block_2_c)) }; - info!("Starting tenure D."); + // allow block C2 to be processed + sleep_ms(1000); + + info!("Starting Tenure D."); + // Submit a block commit op for tenure D and mine a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -948,6 +1042,9 @@ fn forked_tenure_testing( ) .unwrap(); + // allow block D to be processed + sleep_ms(1000); + let tip_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); @@ -1247,7 +1344,7 @@ fn multiple_miners() { if btc_blocks_mined > max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } - + let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); @@ -1255,13 +1352,13 @@ fn multiple_miners() { "Issue next block-build request\ninfo 1: {:?}\ninfo 2: {:?}\n", &info_1, &info_2 ); - + signer_test.mine_block_wait_on_processing( &[&rl1_coord_channels, &rl2_coord_channels], &[&rl1_commits, &rl2_commits], Duration::from_secs(30), ); - + btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure From 40d96e839e88af57a09c0c24fb27f701c95fe805 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 11:31:16 -0400 Subject: [PATCH 0715/1400] fix: assert_ne, not assert_eq --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 34aeafdc31..11625e01bf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -986,7 +986,7 @@ fn forked_tenure_testing( let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_c = blocks.last().unwrap().clone(); - assert_eq!(tip_b, tip_c); + assert_ne!(tip_b, tip_c); assert_ne!(tip_c, tip_a); let (tip_c_2, mined_c_2) = if !expect_tenure_c { From e0773044a6135c7925f5358d5c488068bd50385b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 9 Aug 2024 13:39:18 -0500 Subject: [PATCH 0716/1400] ci: attempt to fix naka mock mining test --- .../stacks-node/src/run_loop/boot_nakamoto.rs | 7 +++- .../src/tests/nakamoto_integrations.rs | 40 +++++++++---------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 97bf8dd4e0..0f6c3d6388 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -160,7 +160,12 @@ impl BootRunLoop { info!("Shutting down epoch-2/3 transition thread"); return; } - info!("Reached Epoch-3.0 boundary, starting nakamoto node"); + + info!( + "Reached Epoch-3.0 boundary, starting nakamoto node"; + "with_neon_data" => data_to_naka.is_some(), + "with_p2p_stack" => data_to_naka.as_ref().map(|x| x.peer_network.is_some()).unwrap_or(false) + ); termination_switch.store(true, Ordering::SeqCst); let naka = NakaRunLoop::new( self.config.clone(), diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 62afa03ac4..f3cf76af04 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7127,19 +7127,31 @@ fn mock_mining() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - let tenure_count = 5; - let inter_blocks_per_tenure = 9; + let tenure_count = 3; + let inter_blocks_per_tenure = 3; // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let localhost = "127.0.0.1"; + naka_conf.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + naka_conf.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + naka_conf.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + naka_conf.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, @@ -7212,11 +7224,7 @@ fn mock_mining() { blind_signer(&naka_conf, &signers, proposals_submitted); // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); + wait_for_first_naka_block_commit(60, &commits_submitted); let mut follower_conf = naka_conf.clone(); follower_conf.node.mock_mining = true; @@ -7225,18 +7233,10 @@ fn mock_mining() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + follower_conf.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + follower_conf.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + follower_conf.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); let node_info = get_chain_info(&naka_conf); follower_conf.node.add_bootstrap_node( From b9d332b471424a8197746e2495f72da4ae9df0c0 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 5 Aug 2024 13:02:43 -0400 Subject: [PATCH 0717/1400] fix: Warnings after Rust 1.80 update --- clarity/Cargo.toml | 2 ++ clarity/src/vm/database/key_value_wrapper.rs | 16 ++++++++-------- stacks-common/Cargo.toml | 4 ++++ stacks-common/src/deps_common/bech32/mod.rs | 16 ++++++++-------- stacks-common/src/deps_common/httparse/mod.rs | 1 - stackslib/src/clarity_cli.rs | 3 --- testnet/stacks-node/Cargo.toml | 1 + 7 files changed, 23 insertions(+), 20 deletions(-) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 2e35c06473..284e856e49 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -55,3 +55,5 @@ developer-mode = ["stacks_common/developer-mode"] slog_json = ["stacks_common/slog_json"] testing = ["canonical"] devtools = [] +rollback_value_check = [] +disable-costs = [] diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 69eb74b39e..3fd845f92f 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -31,15 +31,15 @@ use crate::vm::types::{ }; use crate::vm::{StacksEpoch, Value}; -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] type RollbackValueCheck = String; -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] type RollbackValueCheck = (); -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_value_check(_value: &str, _check: &RollbackValueCheck) {} -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, _value: &str) { edits.push((key, ())); } @@ -47,7 +47,7 @@ fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, _val // wrapper -- i.e., when committing to the underlying store. for the _unchecked_ implementation // this is used to get the edit _value_ out of the lookupmap, for used in the subsequent `put_all` // command. -#[cfg(not(rollback_value_check))] +#[cfg(not(feature = "rollback_value_check"))] fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, @@ -71,11 +71,11 @@ where output } -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_value_check(value: &String, check: &RollbackValueCheck) { assert_eq!(value, check) } -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_edits_push(edits: &mut Vec<(T, RollbackValueCheck)>, key: T, value: &String) where T: Eq + Hash + Clone, @@ -84,7 +84,7 @@ where } // this function is used to check the lookup map when committing at the "bottom" of the // wrapper -- i.e., when committing to the underlying store. -#[cfg(rollback_value_check)] +#[cfg(feature = "rollback_value_check")] fn rollback_check_pre_bottom_commit( edits: Vec<(T, RollbackValueCheck)>, lookup_map: &mut HashMap>, diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 57c1407fa8..b91f63ff99 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -75,6 +75,10 @@ canonical = ["rusqlite"] developer-mode = [] slog_json = ["slog-json"] testing = ["canonical"] +serde = [] +clippy = [] +bech32_std = [] +bech32_strict = [] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stacks-common/src/deps_common/bech32/mod.rs b/stacks-common/src/deps_common/bech32/mod.rs index 99f95e9cd6..655f2b1a82 100644 --- a/stacks-common/src/deps_common/bech32/mod.rs +++ b/stacks-common/src/deps_common/bech32/mod.rs @@ -30,7 +30,7 @@ //! has more details. //! #![cfg_attr( - feature = "std", + feature = "bech32_std", doc = " # Examples ``` @@ -54,20 +54,20 @@ assert_eq!(variant, Variant::Bech32); #![deny(non_camel_case_types)] #![deny(non_snake_case)] #![deny(unused_mut)] -#![cfg_attr(feature = "strict", deny(warnings))] +#![cfg_attr(feature = "bech32_strict", deny(warnings))] -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] extern crate alloc; -#[cfg(any(test, feature = "std"))] +#[cfg(any(test, feature = "bech32_std"))] extern crate core; -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] use alloc::borrow::Cow; -#[cfg(all(not(feature = "std"), not(test)))] +#[cfg(all(not(feature = "bech32_std"), not(test)))] use alloc::{string::String, vec::Vec}; use core::{fmt, mem}; -#[cfg(any(feature = "std", test))] +#[cfg(any(feature = "bech32_std", test))] use std::borrow::Cow; /// Integer in the range `0..32` @@ -690,7 +690,7 @@ impl fmt::Display for Error { } } -#[cfg(any(feature = "std", test))] +#[cfg(any(feature = "bech32_std", test))] impl std::error::Error for Error { fn description(&self) -> &str { match *self { diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index 90a08bf2f1..67ca2c52cd 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -22,7 +22,6 @@ #![cfg_attr(test, deny(warnings))] // we can't upgrade while supporting Rust 1.3 #![allow(deprecated)] -#![cfg_attr(httparse_min_2018, allow(rust_2018_idioms))] //! # httparse //! diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 53b53c36b7..21cf55dea6 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -106,7 +106,6 @@ macro_rules! panic_test { }; } -#[cfg_attr(tarpaulin, skip)] fn print_usage(invoked_by: &str) { eprintln!( "Usage: {} [command] @@ -129,7 +128,6 @@ where command is one of: panic_test!() } -#[cfg_attr(tarpaulin, skip)] fn friendly_expect(input: Result, msg: &str) -> A { input.unwrap_or_else(|e| { eprintln!("{}\nCaused by: {}", msg, e); @@ -137,7 +135,6 @@ fn friendly_expect(input: Result, msg: &str) -> A }) } -#[cfg_attr(tarpaulin, skip)] fn friendly_expect_opt(input: Option, msg: &str) -> A { input.unwrap_or_else(|| { eprintln!("{}", msg); diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index ba674dbaac..aa72f814db 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -63,3 +63,4 @@ monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stack slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] +testing = [] From d64e443d6ba2f2bf6a2247a4cf24a1ff5ff96096 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 6 Aug 2024 10:43:15 -0400 Subject: [PATCH 0718/1400] chore: Remove `clippy` feature flag --- stacks-common/Cargo.toml | 1 - stacks-common/src/deps_common/bitcoin/mod.rs | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index b91f63ff99..d5bfeb44e9 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -76,7 +76,6 @@ developer-mode = [] slog_json = ["slog-json"] testing = ["canonical"] serde = [] -clippy = [] bech32_std = [] bech32_strict = [] diff --git a/stacks-common/src/deps_common/bitcoin/mod.rs b/stacks-common/src/deps_common/bitcoin/mod.rs index 890825ea98..b70da5deb2 100644 --- a/stacks-common/src/deps_common/bitcoin/mod.rs +++ b/stacks-common/src/deps_common/bitcoin/mod.rs @@ -26,8 +26,8 @@ //! // Clippy flags -#![cfg_attr(feature = "clippy", allow(needless_range_loop))] // suggests making a big mess of array newtypes -#![cfg_attr(feature = "clippy", allow(extend_from_slice))] // `extend_from_slice` only available since 1.6 +#![allow(clippy::needless_range_loop)] // suggests making a big mess of array newtypes +#![allow(clippy::extend_from_slice)] // `extend_from_slice` only available since 1.6 // Coding conventions #![deny(non_upper_case_globals)] From b800565f53cd345c47f3b6231eaa57aa02d3ad43 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 9 Aug 2024 14:43:11 -0500 Subject: [PATCH 0719/1400] ci: add mutants skip --- testnet/stacks-node/src/run_loop/boot_nakamoto.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 0f6c3d6388..b78d857d59 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -140,6 +140,10 @@ impl BootRunLoop { naka_loop.start(burnchain_opt, mine_start, None) } + // configuring mutants::skip -- this function is covered through integration tests (this function + // is pretty definitionally an integration, so thats unavoidable), and the integration tests + // do not get counted in mutants coverage. + #[cfg_attr(test, mutants::skip)] fn start_from_neon(&mut self, burnchain_opt: Option, mine_start: u64) { let InnerLoops::Epoch2(ref mut neon_loop) = self.active_loop else { panic!("FATAL: unexpectedly invoked start_from_neon when active loop wasn't neon"); From 8430618aff74a2c1a1365fba885358a55723c9bf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 16:54:02 -0400 Subject: [PATCH 0720/1400] fix: fix remaining integration tests (all of which failed due not waiting around long enough for a Nakamoto block to process) --- .../src/tests/nakamoto_integrations.rs | 6 ++++ testnet/stacks-node/src/tests/signer/v0.rs | 34 +++++++++++++++---- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 22fde68793..9a72b7b57e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5720,6 +5720,7 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + sleep_ms(5_000); info!("Pausing commit ops to trigger a tenure extend."); test_skip_commit_op.0.lock().unwrap().replace(true); @@ -5732,6 +5733,7 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + sleep_ms(5_000); // Submit a TX let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); @@ -5766,6 +5768,7 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + sleep_ms(5_000); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -5775,6 +5778,7 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + sleep_ms(5_000); info!("Resuming commit ops to mine regular tenures."); test_skip_commit_op.0.lock().unwrap().replace(false); @@ -5802,6 +5806,8 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); + + sleep_ms(5_000); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1d1f3bc638..041e7f373a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -46,6 +46,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, }; use stacks_common::bitvec::BitVec; +use stacks_common::types::chainstate::TrieHash; use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; @@ -541,6 +542,9 @@ fn miner_gather_signatures() { signer_test.boot_to_epoch_3(); let timeout = Duration::from_secs(30); + // give the system a chance to mine a Nakamoto block + sleep_ms(30_000); + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); @@ -626,8 +630,14 @@ fn forked_tenure_invalid() { } let result = forked_tenure_testing(Duration::from_secs(5), Duration::from_secs(7), false); - assert_ne!(result.tip_b, result.tip_a); - assert_eq!(result.tip_b, result.tip_c); + assert_ne!( + result.tip_b.index_block_hash(), + result.tip_a.index_block_hash() + ); + assert_eq!( + result.tip_b.index_block_hash(), + result.tip_c.index_block_hash() + ); assert_ne!(result.tip_c, result.tip_a); // Block B was built atop block A @@ -661,7 +671,10 @@ fn forked_tenure_invalid() { // Tenure D should continue progress assert_ne!(result.tip_c, result.tip_d); - assert_ne!(result.tip_b, result.tip_d); + assert_ne!( + result.tip_b.index_block_hash(), + result.tip_d.index_block_hash() + ); assert_ne!(result.tip_a, result.tip_d); // Tenure D builds off of Tenure B @@ -1012,7 +1025,7 @@ fn forked_tenure_testing( anchored_header: StacksBlockHeaderTypes::Nakamoto(tip_b_block.header.clone()), microblock_tail: None, stacks_block_height: tip_b_block.header.chain_length.into(), - index_root: tip_b_block.header.state_index_root.clone(), + index_root: TrieHash([0x00; 32]), // we can't know this yet since the block hasn't been processed consensus_hash: tip_b_block.header.consensus_hash.clone(), burn_header_hash: tip_sn.burn_header_hash.clone(), burn_header_height: tip_sn.block_height as u32, @@ -1063,7 +1076,7 @@ fn forked_tenure_testing( let commits_count = commits_submitted.load(Ordering::SeqCst); if commits_count > commits_before { - // now allow block B to process. + // now allow block B to process if it hasn't already. TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); } let rejected_count = rejected_blocks.load(Ordering::SeqCst); @@ -1100,7 +1113,11 @@ fn forked_tenure_testing( let blocks = test_observer::get_mined_nakamoto_blocks(); let mined_c = blocks.last().unwrap().clone(); - assert_ne!(tip_b, tip_c); + if expect_tenure_c { + assert_ne!(tip_b.index_block_hash(), tip_c.index_block_hash()); + } else { + assert_eq!(tip_b.index_block_hash(), tip_c.index_block_hash()); + } assert_ne!(tip_c, tip_a); let (tip_c_2, mined_c_2) = if !expect_tenure_c { @@ -1893,6 +1910,9 @@ fn end_of_tenure() { .reward_cycle_to_block_height(final_reward_cycle) - 2; + // give the system a chance to mine a Nakamoto block + sleep_ms(30_000); + info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( long_timeout, @@ -1969,6 +1989,8 @@ fn end_of_tenure() { ) .unwrap(); } + + sleep_ms(10_000); assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); while test_observer::get_burn_blocks() From 6e6e69d47b097454371a9da4a5b22fdf11ad8141 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:31:38 -0400 Subject: [PATCH 0721/1400] feat: enable test for #4998 --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e14934558a..84acefd639 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -96,6 +96,7 @@ jobs: - tests::signer::v0::signer_set_rollover - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in + - tests::signer::v0::signers_broadcast_signed_blocks - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state From f2a3a9998b24fea877329916a7e14bd2c67ae4d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:31:51 -0400 Subject: [PATCH 0722/1400] chore: make block accept/reject an INFO-level log item --- stackslib/src/net/relay.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 92a1ebb480..dca8738d3d 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -936,14 +936,14 @@ impl Relayer { staging_db_tx.commit()?; if accepted { - debug!("{}", &accept_msg); + info!("{}", &accept_msg); if let Some(coord_comms) = coord_comms { if !coord_comms.announce_new_stacks_block() { return Err(chainstate_error::NetError(net_error::CoordinatorClosed)); } } } else { - debug!("{}", &reject_msg); + info!("{}", &reject_msg); } Ok(accepted) From 87940805270f2a3af182e3f950d9a2f13d3e3e97 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:32:09 -0400 Subject: [PATCH 0723/1400] feat: fault injection to ignore a signer signature --- .../src/nakamoto_node/sign_coordinator.rs | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 98090b2082..87afd617fa 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -52,6 +52,11 @@ use crate::event_dispatcher::STACKER_DB_CHANNEL; use crate::neon::Counters; use crate::Config; +/// Fault injection flag to prevent the miner from seeing enough signer signatures. +/// Used to test that the signers will broadcast a block if it gets enough signatures +#[cfg(test)] +pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); + /// How long should the coordinator poll on the event receiver before /// waking up to check timeouts? static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); @@ -631,6 +636,20 @@ impl SignCoordinator { )) } + /// Do we ignore signer signatures? + #[cfg(test)] + fn fault_injection_ignore_signatures() -> bool { + if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { + return true; + } + false + } + + #[cfg(not(test))] + fn fault_injection_ignore_signatures() -> bool { + false + } + /// Start gathering signatures for a Nakamoto block. /// This function begins by sending a `BlockProposal` message /// to the signers, and then waits for the signers to respond @@ -750,6 +769,7 @@ impl SignCoordinator { }) { debug!("SignCoordinator: Found signatures in relayed block"); + counters.bump_naka_signer_pushed_blocks(); return Ok(stored_block.header.signer_signature); } @@ -887,6 +907,21 @@ impl SignCoordinator { .checked_add(signer_entry.weight) .expect("FATAL: total weight signed exceeds u32::MAX"); } + + if Self::fault_injection_ignore_signatures() { + debug!("SignCoordinator: fault injection: ignoring well-formed signature for block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => total_weight_signed, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + continue; + } + debug!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), From aa0cb7441f1155cc09fd8554a73818724074ec65 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:32:25 -0400 Subject: [PATCH 0724/1400] feat: counter for signer-pushed blocks --- testnet/stacks-node/src/run_loop/neon.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index ecf541a0de..5d5ff3653d 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -107,6 +107,7 @@ pub struct Counters { pub naka_rejected_blocks: RunLoopCounter, pub naka_proposed_blocks: RunLoopCounter, pub naka_mined_tenures: RunLoopCounter, + pub naka_signer_pushed_blocks: RunLoopCounter, #[cfg(test)] pub naka_skip_commit_op: TestFlag, @@ -173,6 +174,10 @@ impl Counters { Counters::inc(&self.naka_rejected_blocks); } + pub fn bump_naka_signer_pushed_blocks(&self) { + Counters::inc(&self.naka_signer_pushed_blocks); + } + pub fn bump_naka_mined_tenures(&self) { Counters::inc(&self.naka_mined_tenures); } From 9ea5e1d32811c74069bc810fdd6d5244ce62a480 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:32:39 -0400 Subject: [PATCH 0725/1400] chore: push through counter for signer-pushed blocks --- testnet/stacks-node/src/tests/signer/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 3c5aec785e..a5973569a1 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -86,6 +86,7 @@ pub struct RunningNodes { pub nakamoto_blocks_proposed: Arc, pub nakamoto_blocks_mined: Arc, pub nakamoto_blocks_rejected: Arc, + pub nakamoto_blocks_signer_pushed: Arc, pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, @@ -750,6 +751,7 @@ fn setup_stx_btc_node ()>( naka_mined_blocks: naka_blocks_mined, naka_rejected_blocks: naka_blocks_rejected, naka_skip_commit_op: nakamoto_test_skip_commit_op, + naka_signer_pushed_blocks, .. } = run_loop.counters(); @@ -783,6 +785,7 @@ fn setup_stx_btc_node ()>( nakamoto_blocks_proposed: naka_blocks_proposed.0, nakamoto_blocks_mined: naka_blocks_mined.0, nakamoto_blocks_rejected: naka_blocks_rejected.0, + nakamoto_blocks_signer_pushed: naka_signer_pushed_blocks.0, nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, From 69e1a7649ce0016d7eccff67cb4bc400b59cce59 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 9 Aug 2024 22:32:54 -0400 Subject: [PATCH 0726/1400] feat: add test coverage for #4998 and #5048 --- testnet/stacks-node/src/tests/signer/v0.rs | 111 ++++++++++++++++++++- 1 file changed, 109 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 041e7f373a..5e366ba488 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -60,6 +60,7 @@ use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; +use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ @@ -2134,10 +2135,105 @@ fn retry_on_timeout() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test checks that the signers will broadcast a block once they receive enough signatures. +fn signers_broadcast_signed_blocks() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + sleep_ms(10_000); + + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + sleep_ms(10_000); + + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + let signer_pushed_before = signer_test + .running_nodes + .nakamoto_blocks_signer_pushed + .load(Ordering::SeqCst); + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + + // submit a tx so that the miner will mine a block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + debug!("Transaction sent; waiting for block-mining"); + + let start = Instant::now(); + let duration = 60; + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let signer_pushed = signer_test + .running_nodes + .nakamoto_blocks_signer_pushed + .load(Ordering::SeqCst); + + let info = get_chain_info(&signer_test.running_nodes.conf); + if blocks_mined > blocks_before + && signer_pushed > signer_pushed_before + && info.stacks_tip_height > info_before.stacks_tip_height + { + break; + } + + debug!( + "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", + blocks_mined, + blocks_before, + signer_pushed, + signer_pushed_before, + info.stacks_tip_height, + info_before.stacks_tip_height + ); + + std::thread::sleep(Duration::from_millis(100)); + if start.elapsed() >= Duration::from_secs(duration) { + panic!("Timed out"); + } + } + + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behaviour of signers when a sortition is empty. Specifically: /// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. +/// - The miner will stop trying to mine once it sees a threshold of signers reject the block /// - The empty sortition will trigger the miner to attempt a tenure extend. /// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition fn empty_sortition() { @@ -2238,6 +2334,11 @@ fn empty_sortition() { .load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before); + let rejected_before = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = @@ -2294,8 +2395,14 @@ fn empty_sortition() { info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); } } - // wait until we've found rejections for all the signers - Ok(found_rejections.len() == signer_slot_ids.len()) + let rejections = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + + // wait until we've found rejections for all the signers, and the miner has confirmed that + // the signers have rejected the block + Ok(found_rejections.len() == signer_slot_ids.len() && rejections > rejected_before) }).unwrap(); signer_test.shutdown(); } From 753a87ec453c0a0cc0a233c2abb630def2284978 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 12 Aug 2024 16:46:38 -0400 Subject: [PATCH 0727/1400] test: add test with multiple miners mining multiple blocks per tenure --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 251 +++++++++++++++++++++ 2 files changed, 252 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e14934558a..b46da01d6b 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -105,6 +105,7 @@ jobs: - tests::nakamoto_integrations::continue_tenure_extend - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners + - tests::signer::v0::multiple_miners_with_nakamoto_blocks # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f589416746..1ae75fdda3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2643,3 +2643,254 @@ fn signer_set_rollover() { assert!(signer.stop().is_none()); } } + +#[test] +#[ignore] +// This test involves two miners, each mining tenures with 6 blocks each. +fn multiple_miners_with_nakamoto_blocks() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr.clone(), + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 0; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); + btc_blocks_mined += 1; + + // wait for the new block to be processed + loop { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + + info!( + "Nakamoto blocks mined: {}", + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) + ); + + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + info!( + "Mined interim block {}:{}", + btc_blocks_mined, interim_block_ix + ); + } + + let blocks = get_nakamoto_headers(&conf); + let mut seen_burn_hashes = HashSet::new(); + miner_1_tenures = 0; + miner_2_tenures = 0; + for header in blocks.iter() { + if seen_burn_hashes.contains(&header.burn_header_hash) { + continue; + } + seen_burn_hashes.insert(header.burn_header_hash.clone()); + + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + if miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_1_tenures += 1; + } + if miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_2_tenures += 1; + } + } + info!( + "Miner 1 tenures: {}, Miner 2 tenures: {}", + miner_1_tenures, miner_2_tenures + ); + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + btc_blocks_mined * (inter_blocks_per_tenure + 1) + ); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); + + signer_test.shutdown(); +} From a574768cadf3d6c3d2d307db273ad223df3a96ff Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 13 Aug 2024 08:19:24 -0400 Subject: [PATCH 0728/1400] Add pre_nakamoto_miner_messaging option to MinerConfig Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 5 +++++ testnet/stacks-node/src/tests/signer/v0.rs | 3 +-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4eef0bbdd0..04def859d1 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2332,6 +2332,8 @@ pub struct MinerConfig { pub max_reorg_depth: u64, /// Amount of time while mining in nakamoto to wait for signers to respond to a proposed block pub wait_on_signers: Duration, + /// Whether to send miner messages in Epoch 2.5 through the .miners contract. This is used for testing. + pub pre_nakamoto_miner_messaging: bool, } impl Default for MinerConfig { @@ -2362,6 +2364,7 @@ impl Default for MinerConfig { max_reorg_depth: 3, // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), + pre_nakamoto_miner_messaging: true, } } } @@ -2693,6 +2696,7 @@ pub struct MinerConfigFile { pub filter_origins: Option, pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, + pub pre_nakamoto_miner_messaging: Option, } impl MinerConfigFile { @@ -2795,6 +2799,7 @@ impl MinerConfigFile { .wait_on_signers_ms .map(Duration::from_millis) .unwrap_or(miner_default_config.wait_on_signers), + pre_nakamoto_miner_messaging: self.pre_nakamoto_miner_messaging.unwrap_or(true) }) } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f589416746..705cb2e011 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -49,7 +49,6 @@ use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -61,7 +60,7 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; From 4e37d0bdaec28f27c2a252a1b6b9226cbc0a8e02 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 13 Aug 2024 15:23:58 -0400 Subject: [PATCH 0729/1400] Have miners respond to mock signature messages in epoch 2.5 via stackerdb Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 84 +++++++++- testnet/stacks-node/src/config.rs | 2 +- .../src/nakamoto_node/sign_coordinator.rs | 4 + testnet/stacks-node/src/neon_node.rs | 147 +++++++++++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 5 +- 5 files changed, 232 insertions(+), 10 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7d411f89b5..af7c38e22a 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -88,9 +88,18 @@ MinerSlotID { /// Block proposal from the miner BlockProposal = 0, /// Block pushed from the miner - BlockPushed = 1 + BlockPushed = 1, + /// Mock message from the miner + MockMinerMessage = 2 }); +#[cfg_attr(test, mutants::skip)] +impl Display for MinerSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}({})", self, self.to_u8()) + } +} + impl MessageSlotIDTrait for MessageSlotID { fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) @@ -116,7 +125,9 @@ SignerMessageTypePrefix { /// Block Pushed message from miners BlockPushed = 2, /// Mock Signature message from Epoch 2.5 signers - MockSignature = 3 + MockSignature = 3, + /// Mock Pre-Nakamoto message from Epoch 2.5 miners + MockMinerMessage = 4 }); #[cfg_attr(test, mutants::skip)] @@ -160,6 +171,7 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::BlockPushed(_) => SignerMessageTypePrefix::BlockPushed, SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, + SignerMessage::MockMinerMessage(_) => SignerMessageTypePrefix::MockMinerMessage, } } } @@ -175,6 +187,8 @@ pub enum SignerMessage { BlockPushed(NakamotoBlock), /// A mock signature from the epoch 2.5 signers MockSignature(MockSignature), + /// A mock message from the epoch 2.5 miners + MockMinerMessage(MockMinerMessage), } impl SignerMessage { @@ -184,7 +198,7 @@ impl SignerMessage { #[cfg_attr(test, mutants::skip)] pub fn msg_id(&self) -> Option { match self { - Self::BlockProposal(_) | Self::BlockPushed(_) => None, + Self::BlockProposal(_) | Self::BlockPushed(_) | Self::MockMinerMessage(_) => None, Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), Self::MockSignature(_) => Some(MessageSlotID::MockSignature), } @@ -201,6 +215,7 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::BlockResponse(block_response) => block_response.consensus_serialize(fd), SignerMessage::BlockPushed(block) => block.consensus_serialize(fd), SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), + SignerMessage::MockMinerMessage(message) => message.consensus_serialize(fd), }?; Ok(()) } @@ -226,6 +241,10 @@ impl StacksMessageCodec for SignerMessage { let signature = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::MockSignature(signature) } + SignerMessageTypePrefix::MockMinerMessage => { + let message = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockMinerMessage(message) + } }; Ok(message) } @@ -441,6 +460,43 @@ impl StacksMessageCodec for MockSignature { } } +/// A mock message for the stacks node to be used for mock mining messages +/// This is only used by Epoch 2.5 miners to simulate miners responding to mock signatures +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockMinerMessage { + /// The view of the stacks node peer information at the time of the mock signature + pub peer_info: PeerInfo, + /// The burn block height of the miner's tenure + pub tenure_burn_block_height: u64, + /// The chain id for the mock signature + pub chain_id: u32, + /// The mock signatures that the miner received + pub mock_signatures: Vec, +} + +impl StacksMessageCodec for MockMinerMessage { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.peer_info.consensus_serialize(fd)?; + write_next(fd, &self.tenure_burn_block_height)?; + write_next(fd, &self.chain_id)?; + write_next(fd, &self.mock_signatures)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let peer_info = PeerInfo::consensus_deserialize(fd)?; + let tenure_burn_block_height = read_next::(fd)?; + let chain_id = read_next::(fd)?; + let mock_signatures = read_next::, _>(fd)?; + Ok(Self { + peer_info, + tenure_burn_block_height, + chain_id, + mock_signatures, + }) + } +} + define_u8_enum!( /// Enum representing the reject code type prefix RejectCodeTypePrefix { @@ -940,4 +996,26 @@ mod test { .expect("Failed to deserialize MockSignData"); assert_eq!(sign_data, deserialized_data); } + + #[test] + fn serde_mock_miner_message() { + let mock_signature_1 = MockSignature { + signature: MessageSignature::empty(), + sign_data: random_mock_sign_data(), + }; + let mock_signature_2 = MockSignature { + signature: MessageSignature::empty(), + sign_data: random_mock_sign_data(), + }; + let mock_miner_message = MockMinerMessage { + peer_info: random_peer_data(), + tenure_burn_block_height: thread_rng().next_u64(), + chain_id: thread_rng().gen_range(0..=1), + mock_signatures: vec![mock_signature_1, mock_signature_2], + }; + let serialized_data = mock_miner_message.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) + .expect("Failed to deserialize MockSignData"); + assert_eq!(mock_miner_message, deserialized_data); + } } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 04def859d1..4528e07222 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2799,7 +2799,7 @@ impl MinerConfigFile { .wait_on_signers_ms .map(Duration::from_millis) .unwrap_or(miner_default_config.wait_on_signers), - pre_nakamoto_miner_messaging: self.pre_nakamoto_miner_messaging.unwrap_or(true) + pre_nakamoto_miner_messaging: self.pre_nakamoto_miner_messaging.unwrap_or(true), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6a5f026a16..b366d93132 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -774,6 +774,10 @@ impl SignCoordinator { debug!("Received mock signature message. Ignoring."); continue; } + SignerMessageV0::MockMinerMessage(_) => { + debug!("Received mock miner message. Ignoring."); + continue; + } }; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 8c3c4ed179..f3170a1c00 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -147,9 +147,14 @@ use std::thread::JoinHandle; use std::time::Duration; use std::{fs, mem, thread}; +use clarity::boot_util::boot_code_id; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use libsigner::v0::messages::{ + MessageSlotID, MinerSlotID, MockMinerMessage, MockSignature, PeerInfo, SignerMessage, +}; +use libsigner::StackerDBSession; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::{Burnchain, BurnchainSigner, PoxConstants, Txid}; @@ -164,10 +169,11 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::blocks::StagingBlock; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ - signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder, + signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -178,7 +184,6 @@ use stacks::core::mempool::MemPoolDB; use stacks::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, STACKS_EPOCH_3_0_MARKER}; use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; use stacks::cost_estimates::{CostEstimator, FeeEstimator, UnitEstimator}; -use stacks::monitoring; use stacks::monitoring::{increment_stx_blocks_mined_counter, update_active_miners_count_gauge}; use stacks::net::atlas::{AtlasConfig, AtlasDB}; use stacks::net::db::{LocalPeer, PeerDB}; @@ -190,6 +195,7 @@ use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; use stacks::util_lib::strings::{UrlString, VecDisplay}; +use stacks::{monitoring, version_string}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, @@ -210,6 +216,7 @@ use crate::burnchains::make_bitcoin_indexer; use crate::chain_data::MinerStats; use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; +use crate::nakamoto_node::sign_coordinator::SignCoordinator; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; @@ -2255,6 +2262,133 @@ impl BlockMinerThread { return false; } + /// Read any mock signatures from stackerdb and respond to them + pub fn respond_to_mock_signatures(&mut self) -> Result<(), ChainstateError> { + let miner_config = self.config.get_miner_config(); + if miner_config.pre_nakamoto_miner_messaging { + debug!("Pre-Nakamoto miner messaging is disabled"); + return Ok(()); + } + + let burn_db_path = self.config.get_burn_db_file_path(); + let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + + let target_epoch_id = + SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1)? + .expect("FATAL: no epoch defined") + .epoch_id; + if target_epoch_id != StacksEpochId::Epoch25 { + debug!("Mock signing is disabled for non-epoch 2.5 blocks."; + "target_epoch_id" => target_epoch_id.to_string() + ); + return Ok(()); + } + // Retrieve any MockSignatures from stackerdb + let mut mock_signatures = Vec::new(); + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(self.burn_block.block_height) + .expect("BUG: block commit exists before first block height"); + let signers_contract_id = MessageSlotID::MockSignature + .stacker_db_contract(self.config.is_mainnet(), reward_cycle); + // Get the slots for every signer + let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false)?; + let slot_ids: Vec<_> = stackerdbs + .get_signers(&signers_contract_id) + .expect("FATAL: could not get signers from stacker DB") + .into_iter() + .enumerate() + .map(|(slot_id, _)| { + u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range") + }) + .collect(); + let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; + for chunk in chunks { + if let Some(chunk) = chunk { + match MockSignature::consensus_deserialize(&mut chunk.as_slice()) { + Ok(mock_signature) => { + if mock_signature.sign_data.event_burn_block_height + == self.burn_block.block_height + { + mock_signatures.push(mock_signature); + } + } + Err(e) => { + warn!("Failed to deserialize mock signature: {:?}", &e); + continue; + } + } + } + } + info!( + "Miner responding to {} mock signatures", + mock_signatures.len() + ); + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + + let p2p_net = StacksNode::setup_peer_network( + &self.config, + &self.config.atlas, + self.burnchain.clone(), + ); + + let server_version = version_string( + "stacks-node", + option_env!("STACKS_NODE_VERSION") + .or(option_env!("CARGO_PKG_VERSION")) + .unwrap_or("0.0.0.0"), + ); + let stacks_tip_height = p2p_net.stacks_tip.height; + let stacks_tip = p2p_net.stacks_tip.block_hash.clone(); + let stacks_tip_consensus_hash = p2p_net.stacks_tip.consensus_hash.clone(); + let pox_consensus = p2p_net.burnchain_tip.consensus_hash.clone(); + let burn_block_height = p2p_net.chain_view.burn_block_height; + + let peer_info = PeerInfo { + burn_block_height, + stacks_tip_consensus_hash, + stacks_tip, + stacks_tip_height, + pox_consensus, + server_version, + }; + + info!("Responding to mock signatures for burn block {:?}", &self.burn_block.block_height; + "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?peer_info.stacks_tip.clone(), + "peer_burn_block_height" => peer_info.burn_block_height, + "pox_consensus" => ?peer_info.pox_consensus.clone(), + "server_version" => peer_info.server_version.clone(), + "chain_id" => self.config.burnchain.chain_id + ); + let message = MockMinerMessage { + peer_info, + tenure_burn_block_height: self.burn_block.block_height, + chain_id: self.config.burnchain.chain_id, + mock_signatures, + }; + let sort_db = SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: failed to open burnchain DB"); + + if let Err(e) = SignCoordinator::send_miners_message( + &miner_config.mining_key.expect("BUG: no mining key"), + &sort_db, + &self.burn_block, + &stackerdbs, + SignerMessage::MockMinerMessage(message), + MinerSlotID::MockMinerMessage, + self.config.is_mainnet(), + &mut miners_stackerdb, + &self.burn_block.consensus_hash, + ) { + warn!("Failed to send mock miner message: {:?}", &e); + } + Ok(()) + } + // TODO: add tests from mutation testing results #4871 #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a @@ -3595,7 +3729,14 @@ impl RelayerThread { if let Ok(miner_handle) = thread::Builder::new() .name(format!("miner-block-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) - .spawn(move || miner_thread_state.run_tenure()) + .spawn(move || { + let result = miner_thread_state.run_tenure(); + if let Err(e) = miner_thread_state.respond_to_mock_signatures() { + warn!("Failed to respond to mock signatures: {}", e); + } + result + + }) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); e diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 705cb2e011..4b3fea46a0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -60,9 +60,8 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, - next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, From c6c3aa4dafaecd87dd1af7deb02a16cb9a834dd1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 13 Aug 2024 15:25:48 -0400 Subject: [PATCH 0730/1400] Rust fmt Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index f3170a1c00..6cbbd3b9f6 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -173,7 +173,7 @@ use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::blocks::StagingBlock; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ - signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder + signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder, }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -3735,8 +3735,7 @@ impl RelayerThread { warn!("Failed to respond to mock signatures: {}", e); } result - - }) + }) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); e From 8c68cb284bbf34c8c4537276bcccc80347436654 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 14 Aug 2024 12:57:21 -0400 Subject: [PATCH 0731/1400] test: add partial_tenure_fork test --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/net/relay.rs | 39 +++ testnet/stacks-node/src/tests/signer/v0.rs | 277 +++++++++++++++++++-- 3 files changed, 301 insertions(+), 16 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index b46da01d6b..52635b7abb 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -106,6 +106,7 @@ jobs: - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners - tests::signer::v0::multiple_miners_with_nakamoto_blocks + - tests::signer::v0::partial_tenure_fork # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 32dc7d065a..ab59632ea7 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -70,6 +70,40 @@ pub const MAX_RECENT_MESSAGES: usize = 256; pub const MAX_RECENT_MESSAGE_AGE: usize = 600; // seconds; equal to the expected epoch length pub const RELAY_DUPLICATE_INFERENCE_WARMUP: usize = 128; +#[cfg(any(test, feature = "testing"))] +pub mod fault_injection { + use std::path::Path; + + static IGNORE_BLOCK: std::sync::Mutex> = std::sync::Mutex::new(None); + + pub fn ignore_block(height: u64, working_dir: &str) -> bool { + if let Some((ignore_height, ignore_dir)) = &*IGNORE_BLOCK.lock().unwrap() { + let working_dir_path = Path::new(working_dir); + let ignore_dir_path = Path::new(ignore_dir); + + let ignore = *ignore_height == height && working_dir_path.starts_with(ignore_dir_path); + if ignore { + warn!("Fault injection: ignore block at height {}", height); + } + return ignore; + } + false + } + + pub fn set_ignore_block(height: u64, working_dir: &str) { + warn!( + "Fault injection: set ignore block at height {} for working directory {}", + height, working_dir + ); + *IGNORE_BLOCK.lock().unwrap() = Some((height, working_dir.to_string())); + } + + pub fn clear_ignore_block() { + warn!("Fault injection: clear ignore block"); + *IGNORE_BLOCK.lock().unwrap() = None; + } +} + pub struct Relayer { /// Connection to the p2p thread p2p: NetworkHandle, @@ -810,6 +844,11 @@ impl Relayer { &block.header.block_hash() ); + #[cfg(any(test, feature = "testing"))] + if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { + return Ok(false); + } + // do we have this block? don't lock the DB needlessly if so. if chainstate .nakamoto_blocks_db() diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1ae75fdda3..3cf7a14804 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -37,6 +37,7 @@ use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; +use stacks::net::relay::fault_injection::{clear_ignore_block, set_ignore_block}; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; @@ -49,7 +50,6 @@ use stacks_common::bitvec::BitVec; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -61,9 +61,8 @@ use crate::nakamoto_node::miner::TEST_BROADCAST_STALL; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, - next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -2797,14 +2796,12 @@ fn multiple_miners_with_nakamoto_blocks() { btc_blocks_mined += 1; // wait for the new block to be processed - loop { + wait_for(60, || { let blocks_processed = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); info!( "Nakamoto blocks mined: {}", @@ -2822,14 +2819,12 @@ fn multiple_miners_with_nakamoto_blocks() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - loop { + wait_for(60, || { let blocks_processed = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); info!( "Mined interim block {}:{}", btc_blocks_mined, interim_block_ix @@ -2894,3 +2889,253 @@ fn multiple_miners_with_nakamoto_blocks() { signer_test.shutdown(); } + +#[test] +#[ignore] +// This test involves two miners, 1 and 2. During miner 1's first tenure, miner +// 2 is forced to ignore one of the blocks in that tenure. The next time miner +// 2 mines a block, it should attempt to fork the chain at that point. The test +// verifies that the fork is not successful and that miner 1 is able to +// continue mining after this fork attempt. +fn partial_tenure_fork() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + + // All signers are listening to node 1 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr.clone(), + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let Counters { + naka_mined_blocks: blocks_mined2, + naka_proposed_blocks: blocks_proposed2, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let mut btc_blocks_mined = 0; + let mut miner_1_tenures = 0u64; + let mut miner_2_tenures = 0u64; + let mut fork_initiated = false; + let mut min_miner_1_tenures = u64::MAX; + let mut min_miner_2_tenures = u64::MAX; + let mut ignore_block = 0; + + while !(miner_1_tenures >= min_miner_1_tenures && miner_2_tenures >= min_miner_2_tenures) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + + // Mine a block and wait for it to be processed, unless we are in a + // forked tenure, in which case, just wait for the block proposal + let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + + Ok((fork_initiated && proposed_2 > proposed_before_2) + || mined_1 > mined_before_1 + || mined_2 > mined_before_2) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let miner = if mined_1 > mined_before_1 { 1 } else { 2 }; + + if miner == 1 && miner_1_tenures == 0 { + // Setup miner 2 to ignore a block in this tenure + ignore_block = pre_nakamoto_peer_1_height + + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) + + 3; + set_ignore_block(ignore_block, &conf_node_2.node.working_dir); + + // Ensure that miner 2 runs at least one more tenure + min_miner_2_tenures = miner_2_tenures + 1; + fork_initiated = true; + } + if miner == 2 && miner_2_tenures == min_miner_2_tenures { + // This is the forking tenure. Ensure that miner 1 runs one more + // tenure after this to validate that it continues to build off of + // the proper block. + min_miner_1_tenures = miner_1_tenures + 1; + } + + // mine (or attempt to mine) the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + + // submit a tx so that the miner will mine an extra block + let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + + Ok((fork_initiated && proposed_2 > proposed_before_2) + || mined_1 > mined_before_1 + || mined_2 > mined_before_2) + }) + .unwrap(); + info!( + "Attempted to mine interim block {}:{}", + btc_blocks_mined, interim_block_ix + ); + } + + if miner == 1 { + miner_1_tenures += 1; + } else { + miner_2_tenures += 1; + } + info!( + "Miner 1 tenures: {}, Miner 2 tenures: {}", + miner_1_tenures, miner_2_tenures + ); + + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + if miner == 1 { + assert_eq!(mined_1, mined_before_1 + inter_blocks_per_tenure + 1); + } else { + if miner_2_tenures < min_miner_2_tenures { + assert_eq!(mined_2, mined_before_2 + inter_blocks_per_tenure + 1); + } else if miner_2_tenures == min_miner_2_tenures { + // If this is the forking tenure, miner 2 should have mined 0 blocks + assert_eq!(mined_2, mined_before_2); + + // Clear the ignore block + clear_ignore_block(); + } + } + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_2_height, ignore_block - 1); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + + (miner_1_tenures + min_miner_2_tenures - 1) * (inter_blocks_per_tenure + 1) + ); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); + + signer_test.shutdown(); +} From dc5b1170c0136ed483b9fe9cf729a668ec0fc351 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 14 Aug 2024 12:58:14 -0400 Subject: [PATCH 0732/1400] refactor: clean up in block processing stall code --- .../src/chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 569114aa12..b9c994e3e0 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -41,10 +41,10 @@ use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use crate::chainstate::nakamoto::fault_injection::*; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::test_stall::*; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d059a96cb6..536819e72a 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -271,15 +271,14 @@ lazy_static! { } #[cfg(test)] -mod test_stall { - pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = - std::sync::Mutex::new(None); +mod fault_injection { + static PROCESS_BLOCK_STALL: std::sync::Mutex = std::sync::Mutex::new(false); pub fn stall_block_processing() { - if *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + if *PROCESS_BLOCK_STALL.lock().unwrap() { // Do an extra check just so we don't log EVERY time. warn!("Block processing is stalled due to testing directive."); - while *TEST_PROCESS_BLOCK_STALL.lock().unwrap() == Some(true) { + while *PROCESS_BLOCK_STALL.lock().unwrap() { std::thread::sleep(std::time::Duration::from_millis(10)); } info!("Block processing is no longer stalled due to testing directive."); @@ -287,11 +286,11 @@ mod test_stall { } pub fn enable_process_block_stall() { - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(true); + *PROCESS_BLOCK_STALL.lock().unwrap() = true; } pub fn disable_process_block_stall() { - TEST_PROCESS_BLOCK_STALL.lock().unwrap().replace(false); + *PROCESS_BLOCK_STALL.lock().unwrap() = false; } } @@ -1748,7 +1747,7 @@ impl NakamotoChainState { dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { #[cfg(test)] - test_stall::stall_block_processing(); + fault_injection::stall_block_processing(); let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); let Some((next_ready_block, block_size)) = From 78e3189c5780345283083c22f71ca208137b6ffa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:00:24 -0400 Subject: [PATCH 0733/1400] fix: /v3/block_proposal --- docs/rpc-endpoints.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6163f27b75..eea916a781 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -427,7 +427,7 @@ Determine whether a given trait is implemented within the specified contract (ei See OpenAPI [spec](./rpc/openapi.yaml) for details. -### POST /v2/block_proposal +### POST /v3/block_proposal Used by miner to validate a proposed Stacks block using JSON encoding. From 8dcd82b245b6508dedc34d90b7f0f56e58e7daf5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:00:44 -0400 Subject: [PATCH 0734/1400] feat: use broadcast=1 on block upload and use /v3/ endpoint --- stacks-signer/src/client/stacks_client.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 223455c72d..a2e995ae9c 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -685,8 +685,9 @@ impl StacksClient { pub fn post_block(&self, block: &NakamotoBlock) -> Result { let response = self .stacks_node_client - .post(format!("{}{}", self.http_origin, postblock_v3::PATH)) + .post(format!("{}{}?broadcast=1", self.http_origin, postblock_v3::PATH)) .header("Content-Type", "application/octet-stream") + .header(AUTHORIZATION, self.auth_password.clone()) .body(block.serialize_to_vec()) .send()?; if !response.status().is_success() { @@ -789,7 +790,7 @@ impl StacksClient { } fn block_proposal_path(&self) -> String { - format!("{}/v2/block_proposal", self.http_origin) + format!("{}/v3/block_proposal", self.http_origin) } fn sortition_info_path(&self) -> String { @@ -814,7 +815,7 @@ impl StacksClient { } fn reward_set_path(&self, reward_cycle: u64) -> String { - format!("{}/v2/stacker_set/{reward_cycle}", self.http_origin) + format!("{}/v3/stacker_set/{reward_cycle}", self.http_origin) } fn fees_transaction_path(&self) -> String { From f3fe79155873640303539d740d47614468a0474a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:01:05 -0400 Subject: [PATCH 0735/1400] fix: store broadcast timestamp and don't allow it to be overwritten --- stacks-signer/src/signerdb.rs | 65 +++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 86e9928c26..4964f549d4 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -242,7 +242,7 @@ CREATE TABLE IF NOT EXISTS blocks ( block_info TEXT NOT NULL, consensus_hash TEXT NOT NULL, signed_over INTEGER NOT NULL, - broadcasted INTEGER NOT NULL, + broadcasted INTEGER, stacks_height INTEGER NOT NULL, burn_block_height INTEGER NOT NULL, PRIMARY KEY (reward_cycle, signer_signature_hash) @@ -261,7 +261,7 @@ CREATE TABLE IF NOT EXISTS block_signatures ( ) STRICT;"#; static CREATE_INDEXES_2: &str = r#" -CREATE INDEX IF NOT EXISTS block_reward_cycle_and_signature ON block_signatures(signer_signature_hash); +CREATE INDEX IF NOT EXISTS block_signatures_on_signer_signature_hash ON block_signatures(signer_signature_hash); "#; static SCHEMA_1: &[&str] = &[ @@ -479,18 +479,18 @@ impl SignerDb { } /// Insert or replace a block into the database. - /// `hash` is the `signer_signature_hash` of the block. + /// Preserves the `broadcast` column if replacing an existing block. pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { let block_json = serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); let signed_over = &block_info.signed_over; - let broadcasted = false; let vote = block_info .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); + let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, &hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, @@ -498,7 +498,7 @@ impl SignerDb { "sighash" => %hash, "block_id" => %block_id, "signed" => %signed_over, - "broadcasted" => %broadcasted, + "broadcasted" => ?broadcasted, "vote" => vote ); self.db @@ -553,12 +553,12 @@ impl SignerDb { let qry = "SELECT signature FROM block_signatures WHERE signer_signature_hash = ?1"; let args = params![block_sighash]; let sigs_txt: Vec = query_rows(&self.db, qry, args)?; - let mut sigs = vec![]; - for sig_txt in sigs_txt.into_iter() { - let sig = serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError)?; - sigs.push(sig); - } - Ok(sigs) + sigs_txt + .into_iter() + .map(|sig_txt| { + serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError) + }) + .collect() } /// Mark a block as having been broadcasted @@ -566,27 +566,33 @@ impl SignerDb { &self, reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, + ts: u64 ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = 1 WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; - let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + let qry = "UPDATE blocks SET broadcasted = ?1 WHERE reward_cycle = ?2 AND signer_signature_hash = ?3"; + let args = params![u64_to_sql(ts)?, u64_to_sql(reward_cycle)?, block_sighash]; - debug!("Marking block {} as broadcasted", block_sighash); + debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; Ok(()) } - /// Is a block broadcasted already - pub fn is_block_broadcasted( + /// Get the timestamp at which the block was broadcasted. + pub fn get_block_broadcasted( &self, reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, - ) -> Result { + ) -> Result, DBError> { let qry = - "SELECT broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; + "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; let args = params![u64_to_sql(reward_cycle)?, block_sighash]; - let broadcasted: i64 = query_row(&self.db, qry, args)?.unwrap_or(0); - Ok(broadcasted != 0) + let Some(broadcasted): Option = query_row(&self.db, qry, args)? else { + return Ok(None); + }; + if broadcasted == 0 { + return Ok(None); + } + Ok(u64::try_from(broadcasted).ok()) } } @@ -901,22 +907,29 @@ mod tests { db.insert_block(&block_info_1) .expect("Unable to insert block into db"); - assert!(!db - .is_block_broadcasted( + assert!(db + .get_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash() ) - .unwrap()); + .unwrap() + .is_none()); db.set_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash(), + 12345 ) .unwrap(); - assert!(db - .is_block_broadcasted( + db.insert_block(&block_info_1) + .expect("Unable to insert block into db a second time"); + + assert_eq!(db + .get_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash() ) - .unwrap()); + .unwrap() + .unwrap(), + 12345); } } From c41cf4b455d7039ea31c73db4fc5fe308f417766 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:01:23 -0400 Subject: [PATCH 0736/1400] refactor: separate signature weights and total weights calculations --- stacks-signer/src/v0/signer.rs | 129 ++++++++++++++++++--------------- 1 file changed, 71 insertions(+), 58 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 78fb181a0f..1bf94e38b5 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -31,6 +31,7 @@ use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::util::get_epoch_time_secs; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -452,8 +453,7 @@ impl Signer { block_validate_response: &BlockValidateResponse, ) { info!("{self}: Received a block validate response: {block_validate_response:?}"); - let mut signature_opt = None; - let (response, block_info) = match block_validate_response { + let (response, block_info, signature_opt) = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; @@ -479,10 +479,10 @@ impl Signer { .sign(&signer_signature_hash.0) .expect("Failed to sign block"); - signature_opt = Some(signature.clone()); ( BlockResponse::accepted(signer_signature_hash, signature), block_info, + Some(signature.clone()) ) } BlockValidateResponse::Reject(block_validate_reject) => { @@ -507,6 +507,7 @@ impl Signer { ( BlockResponse::from(block_validate_reject.clone()), block_info, + None ) } }; @@ -541,35 +542,27 @@ impl Signer { } } - /// Compute the signing weight and total weight, given a list of signatures - fn compute_signature_weight( + /// Compute the signing weight, given a list of signatures + fn compute_signature_signing_weight<'a>( &self, - block_hash: &Sha512Trunc256Sum, - sigs: &[MessageSignature], - ) -> (u32, u32) { - let signing_weight = sigs.iter().fold(0usize, |signing_weight, sig| { - let weight = if let Ok(public_key) = - Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), sig) - { - let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); - let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); - *stacker_weight - } else { - 0 - }; - signing_weight.saturating_add(weight) + addrs: impl Iterator + ) -> u32 { + let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { + let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); + signing_weight.saturating_add(*stacker_weight) }); + u32::try_from(signing_weight) + .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")) + } + /// Compute the total signing weight + fn compute_signature_total_weight(&self) -> u32 { let total_weight = self .signer_weights .values() .fold(0usize, |acc, val| acc.saturating_add(*val)); - ( - u32::try_from(signing_weight) - .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")), - u32::try_from(total_weight) - .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")), - ) + u32::try_from(total_weight) + .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")) } /// Handle an observed signature from another signer @@ -586,16 +579,34 @@ impl Signer { debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); + // have we broadcasted before? + if let Some(ts) = self + .signer_db + .get_block_broadcasted(self.reward_cycle, block_hash) + .unwrap_or_else(|_| { + panic!("{self}: failed to determine if block {block_hash} was broadcasted") + }) + { + debug!("{self}: have already broadcasted block {} at {}, so will not re-attempt", block_hash, ts); + return; + } + + // recover public key + let Ok(public_key) = + Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) + else { + debug!("{self}: Received unrecovarable signature. Will not store."; + "signature" => %signature, + "block_hash" => %block_hash); + + return; + }; + // authenticate the signature -- it must be signed by one of the stacking set let is_valid_sig = self .signer_addresses .iter() .find(|addr| { - let Ok(public_key) = - Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) - else { - return false; - }; let stacker_address = StacksAddress::p2pkh(true, &public_key); // it only matters that the address hash bytes match @@ -608,18 +619,34 @@ impl Signer { return; } + // signature is valid! store it self.signer_db .add_block_signature(block_hash, signature) .unwrap_or_else(|_| panic!("{self}: Failed to save block signature")); // do we have enough signatures to broadcast? + // i.e. is the threshold reached? let signatures = self .signer_db .get_block_signatures(block_hash) .unwrap_or_else(|_| panic!("{self}: Failed to load block signatures")); - let (signature_weight, total_weight) = - self.compute_signature_weight(block_hash, &signatures); + // put signatures in order by signer address (i.e. reward cycle order) + let addrs_to_sigs: HashMap<_, _> = signatures + .into_iter() + .filter_map(|sig| { + let Ok(public_key) = Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), &sig) + else { + return None; + }; + let addr = StacksAddress::p2pkh(self.mainnet, &public_key); + Some((addr, sig)) + }) + .collect(); + + let signature_weight = self.compute_signature_signing_weight(addrs_to_sigs.keys()); + let total_weight = self.compute_signature_total_weight(); + let min_weight = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight) .unwrap_or_else(|_| { panic!("{self}: Failed to compute threshold weight for {total_weight}") @@ -634,19 +661,7 @@ impl Signer { } // have enough signatures to broadcast! - // have we broadcasted before? - if self - .signer_db - .is_block_broadcasted(self.reward_cycle, block_hash) - .unwrap_or_else(|_| { - panic!("{self}: failed to determine if block {block_hash} was broadcasted") - }) - { - debug!("{self}: will not re-broadcast block {}", block_hash); - return; - } - - let Ok(Some(block_info)) = self + let Ok(Some(mut block_info)) = self .signer_db .block_lookup(self.reward_cycle, block_hash) .map_err(|e| { @@ -658,19 +673,17 @@ impl Signer { return; }; - // put signatures in order by signer address (i.e. reward cycle order) - let addrs_to_sigs: HashMap<_, _> = signatures - .into_iter() - .filter_map(|sig| { - let Ok(public_key) = Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), &sig) - else { - return None; - }; - let addr = StacksAddress::p2pkh(self.mainnet, &public_key); - Some((addr, sig)) - }) - .collect(); + // record time at which we reached the threshold + block_info.signed_group = Some(get_epoch_time_secs()); + let _ = self + .signer_db + .insert_block(&block_info) + .map_err(|e| { + warn!("Failed to set group threshold signature timestamp for {}: {:?}", block_hash, &e); + e + }); + // collect signatures for the block let signatures: Vec<_> = self .signer_addresses .iter() @@ -697,7 +710,7 @@ impl Signer { if broadcasted { self.signer_db - .set_block_broadcasted(self.reward_cycle, block_hash) + .set_block_broadcasted(self.reward_cycle, block_hash, get_epoch_time_secs()) .unwrap_or_else(|_| { panic!("{self}: failed to determine if block {block_hash} was broadcasted") }); From b9f5291bc7cd7b2503f110f166ad2acd752e3c97 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:01:44 -0400 Subject: [PATCH 0737/1400] refactor: log errors in deserialization within a closure --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 1d7b2a8414..931a00777b 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -339,7 +339,16 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { let block_data: Vec> = query_rows(self, qry, args)?; Ok(block_data .into_iter() - .filter_map(|block_vec| NakamotoBlock::consensus_deserialize(&mut &block_vec[..]).ok()) + .filter_map(|block_vec| { + NakamotoBlock::consensus_deserialize(&mut &block_vec[..]) + .map_err(|e| { + error!("Failed to deserialize block from DB, likely database corruption"; + "consensus_hash" => %consensus_hash, + "error" => ?e); + e + }) + .ok() + }) .collect()) } From 3aabc4db4e9c2c6619e5ccf66d690985034311a8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:02:10 -0400 Subject: [PATCH 0738/1400] fix: /v3/ for 3.0-only endpoints --- stackslib/src/net/api/getstackers.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4fd4234070..69961dbe14 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -121,11 +121,11 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v2/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/stacker_set/:cycle_num" + "/v3/stacker_set/:cycle_num" } /// Try to decode this request. @@ -239,7 +239,7 @@ impl StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v2/stacker_set/{cycle_num}"), + format!("/v3/stacker_set/{cycle_num}"), HttpRequestContents::new().for_tip(tip_req), ) .expect("FATAL: failed to construct request from infallible data") From 3dc662115441eee3ed528869562e5f0220794c5e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:02:30 -0400 Subject: [PATCH 0739/1400] refactor: auth_token --- stackslib/src/net/api/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index d256c15b97..4405f49a25 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -129,9 +129,9 @@ impl StacksHttp { ); self.register_rpc_endpoint(postblock::RPCPostBlockRequestHandler::new()); self.register_rpc_endpoint(postblock_proposal::RPCBlockProposalRequestHandler::new( - self.block_proposal_token.clone(), + self.auth_token.clone(), )); - self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::default()); + self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::new(self.auth_token.clone())); self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); From 2ac05375a4eea4ebbfcf16eeed2e64a7155970b7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:02:46 -0400 Subject: [PATCH 0740/1400] fix: /v3/ for v3-only endpoints --- stackslib/src/net/api/postblock_proposal.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6c1d5526b5..043c316565 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -163,7 +163,7 @@ impl From> for BlockValidateRespons } } -/// Represents a block proposed to the `v2/block_proposal` endpoint for validation +/// Represents a block proposed to the `v3/block_proposal` endpoint for validation #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NakamotoBlockProposal { /// Proposed block @@ -431,11 +431,11 @@ impl HttpRequest for RPCBlockProposalRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v2/block_proposal$"#).unwrap() + Regex::new(r#"^/v3/block_proposal$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/block_proposal" + "/v3/block_proposal" } /// Try to decode this request. From d14466b56c8be2f3333ed9fcbbff0656f613df06 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:03:03 -0400 Subject: [PATCH 0741/1400] feat: if broadcast=1 is given, and the auth token is set, then broadcast the block on the p2p network even if we already have it locally --- stackslib/src/net/api/postblock_v3.rs | 57 ++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index df7a7eae73..4eeb68750e 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -31,14 +31,26 @@ use crate::net::httpcore::{ use crate::net::relay::Relayer; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; +use url::form_urlencoded; + pub static PATH: &'static str = "/v3/blocks/upload/"; #[derive(Clone, Default)] pub struct RPCPostBlockRequestHandler { pub block: Option, + pub auth: Option, + pub broadcast: Option } impl RPCPostBlockRequestHandler { + pub fn new(auth: Option) -> Self { + Self { + block: None, + auth, + broadcast: None + } + } + /// Decode a bare block from the body fn parse_postblock_octets(mut body: &[u8]) -> Result { let block = NakamotoBlock::consensus_deserialize(&mut body).map_err(|e| { @@ -87,6 +99,31 @@ impl HttpRequest for RPCPostBlockRequestHandler { )); } + // if broadcast=1 is set, then the requester must be authenticated + let mut broadcast = false; + let mut authenticated = false; + + // look for authorization header + if let Some(password) = &self.auth { + if let Some(auth_header) = preamble.headers.get("authorization") { + if auth_header != password { + return Err(Error::Http(401, "Unauthorized".into())); + } + authenticated = true; + } + } + + // see if broadcast=1 is set + for (key, value) in form_urlencoded::parse(query.as_ref().unwrap_or(&"").as_bytes()) { + if key == "broadcast" { + broadcast = broadcast || value == "1"; + } + } + + if broadcast && !authenticated { + return Err(Error::Http(401, "Unauthorized".into())); + } + if Some(HttpContentType::Bytes) != preamble.content_type || preamble.content_type.is_none() { return Err(Error::DecodeError( @@ -97,6 +134,7 @@ impl HttpRequest for RPCPostBlockRequestHandler { let block = Self::parse_postblock_octets(body)?; self.block = Some(block); + self.broadcast = Some(broadcast); Ok(HttpRequestContents::new().query_string(query)) } } @@ -105,6 +143,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { /// Reset internal state fn restart(&mut self) { self.block = None; + self.broadcast = None; } /// Make the response @@ -124,7 +163,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { .with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { let mut handle_conn = sortdb.index_handle_at_tip(); let stacks_tip = network.stacks_tip.block_id(); - Relayer::process_new_nakamoto_block( + Relayer::process_new_nakamoto_block_ext( &network.burnchain, &sortdb, &mut handle_conn, @@ -133,6 +172,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { &block, rpc_args.coord_comms, NakamotoBlockObtainMethod::Uploaded, + self.broadcast.unwrap_or(false) ) }) .map_err(|e| { @@ -186,4 +226,19 @@ impl StacksHttpRequest { ) .expect("FATAL: failed to construct request from infallible data") } + + /// Make a new post-block request, with intent to broadcast + pub fn new_post_block_v3_broadcast(host: PeerHost, block: &NakamotoBlock, auth: &str) -> StacksHttpRequest { + let mut request = StacksHttpRequest::new_for_peer( + host, + "POST".into(), + PATH.into(), + HttpRequestContents::new() + .query_arg("broadcast".into(), "1".into()) + .payload_stacks(block), + ) + .expect("FATAL: failed to construct request from infallible data"); + request.add_header("authorization".into(), auth.into()); + request + } } From 00d6ca4a7fda46ae1e105429dd19ebd24187f22e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:03:31 -0400 Subject: [PATCH 0742/1400] fix: wrong API path --- stackslib/src/net/api/poststackerdbchunk.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs index b3c9420602..affcc8dc1b 100644 --- a/stackslib/src/net/api/poststackerdbchunk.rs +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -83,7 +83,7 @@ impl HttpRequest for RPCPostStackerDBChunkRequestHandler { } fn metrics_identifier(&self) -> &str { - "/v2/block_proposal/:principal/:contract_name/chunks" + "/v2/stackerdb/:principal/:contract_name/chunks" } /// Try to decode this request. From 0b8bb80a0b965d5b13165316ac2e85835135e0db Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:03:42 -0400 Subject: [PATCH 0743/1400] refactor: auth_token --- stackslib/src/net/api/tests/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index f0a537d045..b02bb53bb8 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -265,7 +265,7 @@ impl<'a> TestRPC<'a> { runtime: 2000000, }; peer_1_config.connection_opts.maximum_call_argument_size = 4096; - peer_1_config.connection_opts.block_proposal_token = Some("password".to_string()); + peer_1_config.connection_opts.auth_token = Some("password".to_string()); peer_2_config.connection_opts.read_only_call_limit = ExecutionCost { write_length: 0, @@ -275,7 +275,7 @@ impl<'a> TestRPC<'a> { runtime: 2000000, }; peer_2_config.connection_opts.maximum_call_argument_size = 4096; - peer_2_config.connection_opts.block_proposal_token = Some("password".to_string()); + peer_2_config.connection_opts.auth_token = Some("password".to_string()); // stacker DBs get initialized thru reconfiguration when the above block gets processed peer_1_config.add_stacker_db( From 9d649ac9aa9455fdf85b83313c80919099ef68d6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:03:56 -0400 Subject: [PATCH 0744/1400] fix: /v2 to /v3 --- stackslib/src/net/api/tests/postblock_proposal.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 6ab465a683..391afc949f 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -69,7 +69,7 @@ fn test_try_parse_request() { let mut request = StacksHttpRequest::new_for_peer( addr.into(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -320,7 +320,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -340,7 +340,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -360,7 +360,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); From d9170614dde7450b5d3970259c6cf03545589516 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:04:12 -0400 Subject: [PATCH 0745/1400] feat: test coverage for new broadcast=1 directive --- stackslib/src/net/api/tests/postblock_v3.rs | 81 ++++++++++++++++++++- 1 file changed, 79 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs index e68d334239..0764953c6e 100644 --- a/stackslib/src/net/api/tests/postblock_v3.rs +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -44,7 +44,7 @@ fn parse_request() { let bytes = request.try_serialize().unwrap(); let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut handler = postblock_v3::RPCPostBlockRequestHandler::default(); + let mut handler = postblock_v3::RPCPostBlockRequestHandler::new(Some("12345".to_string())); let mut parsed_request = http .handle_try_parse_request( &mut handler, @@ -60,9 +60,36 @@ fn parse_request() { let (preamble, _contents) = parsed_request.destruct(); assert_eq!(&preamble, request.preamble()); + assert_eq!(handler.broadcast, Some(false)); handler.restart(); assert!(handler.block.is_none()); + assert!(handler.broadcast.is_none()); + + // try to authenticate + let block = make_codec_test_nakamoto_block(StacksEpochId::Epoch30, &miner_sk); + let request = StacksHttpRequest::new_post_block_v3_broadcast(addr.into(), &block, "12345"); + let bytes = request.try_serialize().unwrap(); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + parsed_request.clear_headers(); + parsed_request.add_header("authorization".into(), "12345".into()); + let (preamble, _contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); + assert_eq!(handler.broadcast, Some(true)); + + handler.restart(); + assert!(handler.block.is_none()); + assert!(handler.broadcast.is_none()); // try to deal with an invalid block let mut bad_block = block.clone(); @@ -72,7 +99,6 @@ fn parse_request() { let request = StacksHttpRequest::new_post_block_v3(addr.into(), &bad_block); let bytes = request.try_serialize().unwrap(); let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut handler = postblock_v3::RPCPostBlockRequestHandler::default(); match http.handle_try_parse_request( &mut handler, &parsed_preamble.expect_request(), @@ -83,6 +109,57 @@ fn parse_request() { panic!("worked with bad block"); } } + + handler.restart(); + assert!(handler.block.is_none()); + assert!(handler.broadcast.is_none()); + + // deal with bad authentication + let request = StacksHttpRequest::new_post_block_v3_broadcast(addr.into(), &block, "wrong password"); + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let bad_response = http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ); + match bad_response { + Err(crate::net::Error::Http(crate::net::http::Error::Http(err_code, message))) => { + assert_eq!(err_code, 401); + assert_eq!(message, "Unauthorized"); + } + x => { + error!("Expected HTTP 401, got {:?}", &x); + panic!("expected error"); + } + } + + handler.restart(); + assert!(handler.block.is_none()); + assert!(handler.broadcast.is_none()); + + // deal with missing authorization + let mut request = StacksHttpRequest::new_post_block_v3(addr.into(), &block); + let path = request.request_path(); + request.preamble_mut().path_and_query_str = format!("{}?broadcast=1", &path); + + let bytes = request.try_serialize().unwrap(); + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let bad_response = http.handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ); + match bad_response { + Err(crate::net::Error::Http(crate::net::http::Error::Http(err_code, message))) => { + assert_eq!(err_code, 401); + assert_eq!(message, "Unauthorized"); + } + x => { + error!("Expected HTTP 401, got {:?}", &x); + panic!("expected error"); + } + } } #[test] From b76641e3f65b94d984d37825ae199a5a024328b9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:04:28 -0400 Subject: [PATCH 0746/1400] fix: auth_token --- stackslib/src/net/connection.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index d3a77ebc8d..36b1fc18ff 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -398,8 +398,8 @@ pub struct ConnectionOptions { /// maximum number of confirmations for a nakamoto block's sortition for which it will be /// pushed pub max_nakamoto_block_relay_age: u64, - /// The authorization token to enable the block proposal RPC endpoint - pub block_proposal_token: Option, + /// The authorization token to enable privileged RPC endpoints + pub auth_token: Option, // fault injection /// Disable neighbor walk and discovery @@ -521,7 +521,7 @@ impl std::default::Default for ConnectionOptions { socket_send_buffer_size: 16384, // Linux default private_neighbors: true, max_nakamoto_block_relay_age: 6, - block_proposal_token: None, + auth_token: None, // no faults on by default disable_neighbor_walk: false, From 4b1e50b5416b478b1fef233eaa33fbbb3b2255b9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:04:48 -0400 Subject: [PATCH 0747/1400] fix: off-by-one calculation in next reward cycle --- .../src/net/download/nakamoto/download_state_machine.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index d1510af9c1..9d30fa50e3 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -774,7 +774,7 @@ impl NakamotoDownloadStateMachine { let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); let sort_rc = sortdb .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height) + .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height.saturating_add(1)) .expect("FATAL: burnchain tip is before system start"); let next_sort_rc = if last_sort_height == sort_tip.block_height { @@ -782,13 +782,13 @@ impl NakamotoDownloadStateMachine { .pox_constants .block_height_to_reward_cycle( sortdb.first_block_height, - sort_tip.block_height.saturating_add(1), + sort_tip.block_height.saturating_add(2), ) .expect("FATAL: burnchain tip is before system start") } else { sortdb .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) + .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height.saturating_add(1)) .expect("FATAL: burnchain tip is before system start") }; From 2ac0332b3dc235a38348316643e94caf3cfa31ed Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:05:06 -0400 Subject: [PATCH 0748/1400] refactor: set self.idle in one place --- .../src/net/download/nakamoto/tenure_downloader.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 7197adf0b2..74b7084a84 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -643,7 +643,7 @@ impl NakamotoTenureDownloader { &mut self, response: StacksHttpResponse, ) -> Result>, NetError> { - match self.state { + let handle_result = match self.state { NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { debug!( "Got download response for tenure-start block {}", @@ -654,12 +654,10 @@ impl NakamotoTenureDownloader { e })?; self.try_accept_tenure_start_block(block)?; - self.idle = true; Ok(None) } NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { debug!("Invalid state -- Got download response for WaitForTenureBlock"); - self.idle = true; Err(NetError::InvalidState) } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { @@ -669,7 +667,6 @@ impl NakamotoTenureDownloader { e })?; self.try_accept_tenure_end_block(&block)?; - self.idle = true; Ok(None) } NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { @@ -682,14 +679,14 @@ impl NakamotoTenureDownloader { e })?; let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - self.idle = true; Ok(blocks_opt) } NakamotoTenureDownloadState::Done => { - self.idle = true; Err(NetError::InvalidState) } - } + }; + self.idle = true; + handle_result } pub fn is_done(&self) -> bool { From c1fd6fee874f378144da6512ce97edeac7bff653 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:05:22 -0400 Subject: [PATCH 0749/1400] fix: structured logging --- .../nakamoto/tenure_downloader_set.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 337c8d1cd6..28a40e7eb5 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -214,6 +214,7 @@ impl NakamotoTenureDownloaderSet { /// Returns true if the peer gets scheduled. /// Returns false if not. pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); if let Some(idx) = self.peers.get(&naddr) { let Some(Some(_downloader)) = self.downloaders.get(*idx) else { return false; @@ -426,16 +427,14 @@ impl NakamotoTenureDownloaderSet { count: usize, current_reward_cycles: &BTreeMap, ) { - debug!("schedule: {:?}", schedule); - debug!("available: {:?}", &available); - debug!("tenure_block_ids: {:?}", &tenure_block_ids); - debug!("inflight: {}", self.inflight()); - debug!( - "count: {}, running: {}, scheduled: {}", - count, - self.num_downloaders(), - self.num_scheduled_downloaders() - ); + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); self.clear_finished_downloaders(); self.clear_available_peers(); From cddf696d4e0cb2739d58aa094e4f8f88b87c3109 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:05:38 -0400 Subject: [PATCH 0750/1400] chore: auth_token --- stackslib/src/net/httpcore.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index dec51df42a..88ee0365b2 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -888,8 +888,8 @@ pub struct StacksHttp { pub maximum_call_argument_size: u32, /// Maximum execution budget of a read-only call pub read_only_call_limit: ExecutionCost, - /// The authorization token to enable the block proposal RPC endpoint - pub block_proposal_token: Option, + /// The authorization token to enable access to privileged features, such as the block proposal RPC endpoint + pub auth_token: Option, } impl StacksHttp { @@ -905,7 +905,7 @@ impl StacksHttp { request_handlers: vec![], maximum_call_argument_size: conn_opts.maximum_call_argument_size, read_only_call_limit: conn_opts.read_only_call_limit.clone(), - block_proposal_token: conn_opts.block_proposal_token.clone(), + auth_token: conn_opts.auth_token.clone(), }; http.register_rpc_methods(); http From b41846b8b004cd48494c255bc7f0ac684930c07a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:05:50 -0400 Subject: [PATCH 0751/1400] fix: restart inv sync if burnchain tip changes --- stackslib/src/net/inv/nakamoto.rs | 50 +++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 0a2ea4dc63..3a27c1072d 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -397,13 +397,19 @@ impl NakamotoTenureInv { if self.start_sync_time + inv_sync_interval <= now && (self.cur_reward_cycle >= cur_rc || !self.online) { - debug!("Reset inv comms for {}", &self.neighbor_address); - self.online = true; - self.start_sync_time = now; - self.cur_reward_cycle = start_rc; + self.reset_comms(start_rc); } } + /// Reset synchronization state for this peer in the last reward cycle. + /// Called as part of processing a new burnchain block + pub fn reset_comms(&mut self, start_rc: u64) { + debug!("Reset inv comms for {}", &self.neighbor_address); + self.online = true; + self.start_sync_time = get_epoch_time_secs(); + self.cur_reward_cycle = start_rc; + } + /// Get the reward cycle we're sync'ing for pub fn reward_cycle(&self) -> u64 { self.cur_reward_cycle @@ -506,6 +512,19 @@ impl NakamotoTenureInv { } } } + + /// Get the burnchain tip reward cycle for purposes of inv sync + fn get_current_reward_cycle(tip: &BlockSnapshot, sortdb: &SortitionDB) -> u64 { + // NOTE: reward cycles start when (sortition_height % reward_cycle_len) == 1, not 0, but + // .block_height_to_reward_cycle does not account for this. + sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + tip.block_height.saturating_sub(1), + ) + .expect("FATAL: snapshot occurred before system start") + } } /// Nakamoto inventory state machine @@ -593,15 +612,7 @@ impl NakamotoInvStateMachine { .map(|(highest_rc, _)| *highest_rc) .unwrap_or(0); - // NOTE: reward cycles start when (sortition_height % reward_cycle_len) == 1, not 0, but - // .block_height_to_reward_cycle does not account for this. - let tip_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - tip.block_height.saturating_sub(1), - ) - .expect("FATAL: snapshot occurred before system start"); + let tip_rc = NakamotoTenureInv::get_current_reward_cycle(tip, sortdb); debug!( "Load all reward cycle consensus hashes from {} to {}", @@ -794,7 +805,20 @@ impl NakamotoInvStateMachine { Ok((num_msgs, learned)) } + /// Top-level state machine execution pub fn run(&mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, ibd: bool) -> bool { + // if the burnchain tip has changed, then force all communications to reset for the current + // reward cycle in order to hasten block download + if let Some(last_sort_tip) = self.last_sort_tip.as_ref() { + if last_sort_tip.consensus_hash != network.burnchain_tip.consensus_hash { + debug!("Forcibly restarting all Nakamoto inventory comms due to burnchain tip change ({} != {})", &last_sort_tip.consensus_hash, &network.burnchain_tip.consensus_hash); + let tip_rc = NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); + for inv_state in self.inventories.values_mut() { + inv_state.reset_comms(tip_rc.saturating_sub(1)); + } + } + } + if let Err(e) = self.process_getnakamotoinv_begins(network, sortdb, ibd) { warn!( "{:?}: Failed to begin Nakamoto tenure inventory sync: {:?}", From 21fc76d4d95d9ccdfff0c3871161f0507d8b39eb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:06:05 -0400 Subject: [PATCH 0752/1400] feat: allow forced block broadcast --- stackslib/src/net/relay.rs | 51 +++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index dca8738d3d..ff71feda9d 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -790,11 +790,44 @@ impl Relayer { } Ok(res) } + + /// Wrapper around inner_process_new_nakamoto_block + pub fn process_new_nakamoto_block( + burnchain: &Burnchain, + sortdb: &SortitionDB, + sort_handle: &mut SortitionHandleConn, + chainstate: &mut StacksChainState, + stacks_tip: &StacksBlockId, + block: &NakamotoBlock, + coord_comms: Option<&CoordinatorChannels>, + obtained_method: NakamotoBlockObtainMethod, + ) -> Result { + Self::process_new_nakamoto_block_ext( + burnchain, + sortdb, + sort_handle, + chainstate, + stacks_tip, + block, + coord_comms, + obtained_method, + false + ) + } /// Insert a staging Nakamoto block that got relayed to us somehow -- e.g. uploaded via http, /// downloaded by us, or pushed via p2p. - /// Return Ok(true) if we stored it, Ok(false) if we didn't - pub fn process_new_nakamoto_block( + /// Return Ok(true) if we should broadcast the block. If force_broadcast is true, then this + /// function will return Ok(true) even if we already have the block. + /// Return Ok(false) if we should not broadcast it (e.g. we already have it, it was invalid, + /// etc.) + /// Return Err(..) in the following cases, beyond DB errors: + /// * If the block is from a tenure we don't recognize + /// * If we're not in the Nakamoto epoch + /// * If the reward cycle info could not be determined + /// * If there was an unrecognized signer + /// * If the coordinator is closed, and `coord_comms` is Some(..) + pub fn process_new_nakamoto_block_ext( burnchain: &Burnchain, sortdb: &SortitionDB, sort_handle: &mut SortitionHandleConn, @@ -803,6 +836,7 @@ impl Relayer { block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, obtained_method: NakamotoBlockObtainMethod, + force_broadcast: bool ) -> Result { debug!( "Handle incoming Nakamoto block {}/{} obtained via {}", @@ -825,8 +859,16 @@ impl Relayer { e })? { - debug!("Already have Nakamoto block {}", &block.header.block_id()); - return Ok(false); + if force_broadcast { + // it's possible that the signer sent this block to us, in which case, we should + // broadcast it + debug!("Already have Nakamoto block {}, but broadcasting anyway", &block.header.block_id()); + return Ok(true); + } + else { + debug!("Already have Nakamoto block {}", &block.header.block_id()); + return Ok(false); + } } let block_sn = @@ -2541,6 +2583,7 @@ impl Relayer { accepted_blocks: Vec, force_send: bool, ) { + // TODO: we don't relay HTTP-uploaded blocks :( debug!( "{:?}: relay {} sets of Nakamoto blocks", _local_peer, From 62004f92f0f0e4779a834276f57a78be13da4426 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:06:23 -0400 Subject: [PATCH 0753/1400] refactor: auth_token --- stackslib/src/net/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 05477bb08c..8372116ced 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -352,7 +352,7 @@ impl NakamotoBootPlan { peer_config .initial_balances .append(&mut self.initial_balances.clone()); - peer_config.connection_opts.block_proposal_token = Some("password".to_string()); + peer_config.connection_opts.auth_token = Some("password".to_string()); // Create some balances for test Stackers // They need their stacking amount + enough to pay fees From c9ec4fb9871e0809710088c2258ddd5e52d5254b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:06:40 -0400 Subject: [PATCH 0754/1400] chore: auth_token, and fix poll timeout --- testnet/stacks-node/src/config.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f5c7c7bfbd..256011001d 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -221,12 +221,12 @@ mod tests { } #[test] - fn should_load_block_proposal_token() { + fn should_load_auth_token() { let config = Config::from_config_file( ConfigFile::from_str( r#" [connection_options] - block_proposal_token = "password" + auth_token = "password" "#, ) .unwrap(), @@ -235,7 +235,7 @@ mod tests { .expect("Expected to be able to parse block proposal token from file"); assert_eq!( - config.connection_options.block_proposal_token, + config.connection_options.auth_token, Some("password".to_string()) ); } @@ -1368,7 +1368,7 @@ impl Config { let poll_timeout = if self.node.miner { cmp::min(1000, self.miner.first_attempt_time_ms / 2) } else { - 5000 + 1000 }; poll_timeout } @@ -2425,7 +2425,7 @@ pub struct ConnectionOptionsFile { pub force_disconnect_interval: Option, pub antientropy_public: Option, pub private_neighbors: Option, - pub block_proposal_token: Option, + pub auth_token: Option, pub antientropy_retry: Option, } @@ -2551,7 +2551,7 @@ impl ConnectionOptionsFile { max_sockets: self.max_sockets.unwrap_or(800) as usize, antientropy_public: self.antientropy_public.unwrap_or(true), private_neighbors: self.private_neighbors.unwrap_or(true), - block_proposal_token: self.block_proposal_token, + auth_token: self.auth_token, antientropy_retry: self.antientropy_retry.unwrap_or(default.antientropy_retry), ..default }) From f5df7efe46c3d0dc63885ae509fd779af4e95bbf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:06:55 -0400 Subject: [PATCH 0755/1400] refactor: separate fault injection into its own function --- .../stacks-node/src/nakamoto_node/miner.rs | 37 +++++++++++-------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 43a5c51040..8036389d53 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -678,6 +678,24 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } + /// Fault injection -- possibly fail to broadcast + /// Return true to drop the block + fn fault_injection_broadcast_fail(&self) -> bool { + let drop_prob = self + .config + .node + .fault_injection_block_push_fail_probability + .unwrap_or(0) + .min(100); + let will_drop = if drop_prob > 0 { + let throw: u8 = thread_rng().gen_range(0..100); + throw < drop_prob + } else { + false + }; + will_drop + } + /// Store a block to the chainstate, and if successful (it should be since we mined it), /// broadcast it via the p2p network. fn broadcast_p2p( @@ -717,25 +735,12 @@ impl BlockMinerThread { } // forward to p2p thread, but do fault injection - let block_id = block.block_id(); - let drop_prob = self - .config - .node - .fault_injection_block_push_fail_probability - .unwrap_or(0) - .min(100); - let will_drop = if drop_prob > 0 { - let throw: u8 = thread_rng().gen_range(0..100); - throw < drop_prob - } else { - false - }; - - if will_drop { - info!("Fault injection: drop block {}", &block_id); + if self.fault_injection_broadcast_fail() { + info!("Fault injection: drop block {}", &block.block_id()); return Ok(()); } + let block_id = block.block_id(); debug!("Broadcasting block {}", &block_id); if let Err(e) = self.p2p_handle.broadcast_message( vec![], From e1046fd8f7a2cdfaec7a4ca86c24df686944f454 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:07:11 -0400 Subject: [PATCH 0756/1400] fix: use get_poll_time() --- testnet/stacks-node/src/nakamoto_node/peer.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index b825cfe46f..facb1dd835 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -190,8 +190,7 @@ impl PeerThread { info!("`PeerNetwork::bind()` skipped, already bound"); } - let poll_timeout = cmp::min(1000, config.miner.first_attempt_time_ms / 2); - + let poll_timeout = config.get_poll_time(); PeerThread { config, net, From 3150d4c092b5922638fed1c26672ccaf64684b70 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:07:35 -0400 Subject: [PATCH 0757/1400] chore: awaken coordinator thread on new sortition since a stacks block may be buffered --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index add33424ad..148c80d030 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -415,7 +415,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB for epoch") .expect("FATAL: no epoch defined for existing sortition"); - if cur_epoch.epoch_id != StacksEpochId::Epoch30 { + if cur_epoch.epoch_id < StacksEpochId::Epoch30 { debug!( "As of sortition {}, there has not yet been a Nakamoto tip. Cannot mine.", &stacks_tip_sn.consensus_hash @@ -454,6 +454,9 @@ impl RelayerThread { } self.globals.set_last_sortition(sn.clone()); + // there may be a bufferred stacks block to process, so wake up the coordinator to check + self.globals.coord_comms.announce_new_stacks_block(); + info!( "Relayer: Process sortition"; "sortition_ch" => %consensus_hash, From c436e0e6aca4673314880873ac547f8222de4acf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:08:01 -0400 Subject: [PATCH 0758/1400] refactor: info/warn instead of debug, and short return for invalid sig --- .../src/nakamoto_node/sign_coordinator.rs | 81 ++++++++++--------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 87afd617fa..0bba347795 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -738,22 +738,6 @@ impl SignCoordinator { let start_ts = Instant::now(); while start_ts.elapsed() <= self.signing_round_timeout { - // one of two things can happen: - // * we get enough signatures from stackerdb from the signers, OR - // * we see our block get processed in our chainstate (meaning, the signers broadcasted - // the block and our node got it and processed it) - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold if let Ok(Some((stored_block, _sz))) = chain_state @@ -773,6 +757,22 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } + // one of two things can happen: + // * we get enough signatures from stackerdb from the signers, OR + // * we see our block get processed in our chainstate (meaning, the signers broadcasted + // the block and our node got it and processed it) + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { + Ok(event) => event, + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + continue; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDB event receiver disconnected".into(), + )) + } + }; + // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); @@ -821,33 +821,34 @@ impl SignCoordinator { )); }; if rejected_data.signer_signature_hash - == block.header.signer_signature_hash() + != block.header.signer_signature_hash() + { + debug!("Received rejected block response for a block besides my own. Ignoring."); + continue; + } + + debug!( + "Signer {} rejected our block {}/{}", + slot_id, + &block.header.consensus_hash, + &block.header.block_hash() + ); + total_reject_weight = total_reject_weight + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + + if total_reject_weight.saturating_add(self.weight_threshold) + > self.total_weight { debug!( - "Signer {} rejected our block {}/{}", - slot_id, + "{}/{} signers vote to reject our block {}/{}", + total_reject_weight, + self.total_weight, &block.header.consensus_hash, &block.header.block_hash() ); - total_reject_weight = total_reject_weight - .checked_add(signer_entry.weight) - .expect("FATAL: total weight rejected exceeds u32::MAX"); - - if total_reject_weight.saturating_add(self.weight_threshold) - > self.total_weight - { - debug!( - "{}/{} signers vote to reject our block {}/{}", - total_reject_weight, - self.total_weight, - &block.header.consensus_hash, - &block.header.block_hash() - ); - counters.bump_naka_rejected_blocks(); - return Err(NakamotoNodeError::SignersRejected); - } - } else { - debug!("Received rejected block response for a block besides my own. Ignoring."); + counters.bump_naka_rejected_blocks(); + return Err(NakamotoNodeError::SignersRejected); } continue; } @@ -909,7 +910,7 @@ impl SignCoordinator { } if Self::fault_injection_ignore_signatures() { - debug!("SignCoordinator: fault injection: ignoring well-formed signature for block"; + warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, @@ -922,7 +923,7 @@ impl SignCoordinator { continue; } - debug!("SignCoordinator: Signature Added to block"; + info!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, From 5341074571d1b21072faced75dabcab624cf88b3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:08:20 -0400 Subject: [PATCH 0759/1400] refactor: auth_token --- .../stacks-node/src/tests/nakamoto_integrations.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 1596a0bad9..df8c512756 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -231,7 +231,7 @@ impl TestSigningChannel { pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); - let path = format!("{http_origin}/v2/stacker_set/{cycle}"); + let path = format!("{http_origin}/v3/stacker_set/{cycle}"); let res = client .get(&path) .send() @@ -2302,7 +2302,7 @@ fn correct_burn_outs() { run_loop_thread.join().unwrap(); } -/// Test `/v2/block_proposal` API endpoint +/// Test `/v3/block_proposal` API endpoint /// /// This endpoint allows miners to propose Nakamoto blocks to a node, /// and test if they would be accepted or rejected @@ -2315,7 +2315,7 @@ fn block_proposal_api_endpoint() { let (mut conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); - conf.connection_options.block_proposal_token = Some(password.clone()); + conf.connection_options.auth_token = Some(password.clone()); let account_keys = add_initial_balances(&mut conf, 10, 1_000_000); let stacker_sk = setup_stacker(&mut conf); let sender_signer_sk = Secp256k1PrivateKey::new(); @@ -2539,7 +2539,7 @@ fn block_proposal_api_endpoint() { .expect("Failed to build `reqwest::Client`"); // Build URL let http_origin = format!("http://{}", &conf.node.rpc_bind); - let path = format!("{http_origin}/v2/block_proposal"); + let path = format!("{http_origin}/v3/block_proposal"); let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap()); for (ix, (test_description, block_proposal, expected_http_code, _)) in @@ -4494,7 +4494,7 @@ fn nakamoto_attempt_time() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); - naka_conf.connection_options.block_proposal_token = Some(password.clone()); + naka_conf.connection_options.auth_token = Some(password.clone()); // Use fixed timing params for this test let nakamoto_attempt_time_ms = 20_000; naka_conf.miner.nakamoto_attempt_time_ms = nakamoto_attempt_time_ms; @@ -5165,7 +5165,7 @@ fn signer_chainstate() { socket, naka_conf .connection_options - .block_proposal_token + .auth_token .clone() .unwrap_or("".into()), false, From 3fe4266784ebf8a58e93a69115711d02333804f4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:08:31 -0400 Subject: [PATCH 0760/1400] refactor: /v3 --- testnet/stacks-node/src/tests/neon_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 8a1a08b5dc..ee4aff890c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1397,7 +1397,7 @@ pub fn get_contract_src( pub fn get_stacker_set(http_origin: &str, reward_cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/stacker_set/{}", http_origin, reward_cycle); + let path = format!("{}/v3/stacker_set/{}", http_origin, reward_cycle); let res = client.get(&path).send().unwrap(); info!("Got stacker_set response {:?}", &res); From e9bf54d97ed15266fb99bdd3c9b38937d0ac0482 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 14 Aug 2024 14:08:40 -0400 Subject: [PATCH 0761/1400] refactor: auth_token --- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a5973569a1..91371578cb 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -155,7 +155,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Wed, 14 Aug 2024 14:13:01 -0400 Subject: [PATCH 0762/1400] chore: cargo fmt --- stacks-signer/src/client/stacks_client.rs | 6 +++- stacks-signer/src/signerdb.rs | 19 ++++++------ stacks-signer/src/v0/signer.rs | 30 ++++++++++--------- stackslib/src/net/api/mod.rs | 4 ++- stackslib/src/net/api/postblock_v3.rs | 17 ++++++----- stackslib/src/net/api/tests/postblock_v3.rs | 7 +++-- .../nakamoto/download_state_machine.rs | 10 +++++-- .../download/nakamoto/tenure_downloader.rs | 4 +-- stackslib/src/net/inv/nakamoto.rs | 3 +- stackslib/src/net/relay.rs | 14 +++++---- 10 files changed, 66 insertions(+), 48 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index a2e995ae9c..0aeb30bb6e 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -685,7 +685,11 @@ impl StacksClient { pub fn post_block(&self, block: &NakamotoBlock) -> Result { let response = self .stacks_node_client - .post(format!("{}{}?broadcast=1", self.http_origin, postblock_v3::PATH)) + .post(format!( + "{}{}?broadcast=1", + self.http_origin, + postblock_v3::PATH + )) .header("Content-Type", "application/octet-stream") .header(AUTHORIZATION, self.auth_password.clone()) .body(block.serialize_to_vec()) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 4964f549d4..2d2e9cc22a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -554,11 +554,9 @@ impl SignerDb { let args = params![block_sighash]; let sigs_txt: Vec = query_rows(&self.db, qry, args)?; sigs_txt - .into_iter() - .map(|sig_txt| { - serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError) - }) - .collect() + .into_iter() + .map(|sig_txt| serde_json::from_str(&sig_txt).map_err(|_| DBError::ParseError)) + .collect() } /// Mark a block as having been broadcasted @@ -566,7 +564,7 @@ impl SignerDb { &self, reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, - ts: u64 + ts: u64, ) -> Result<(), DBError> { let qry = "UPDATE blocks SET broadcasted = ?1 WHERE reward_cycle = ?2 AND signer_signature_hash = ?3"; let args = params![u64_to_sql(ts)?, u64_to_sql(reward_cycle)?, block_sighash]; @@ -917,19 +915,20 @@ mod tests { db.set_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash(), - 12345 + 12345, ) .unwrap(); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); - assert_eq!(db - .get_block_broadcasted( + assert_eq!( + db.get_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash() ) .unwrap() .unwrap(), - 12345); + 12345 + ); } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 1bf94e38b5..e51d96d933 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -29,9 +29,9 @@ use libsigner::v0::messages::{ use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::util::get_epoch_time_secs; use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; @@ -482,7 +482,7 @@ impl Signer { ( BlockResponse::accepted(signer_signature_hash, signature), block_info, - Some(signature.clone()) + Some(signature.clone()), ) } BlockValidateResponse::Reject(block_validate_reject) => { @@ -507,7 +507,7 @@ impl Signer { ( BlockResponse::from(block_validate_reject.clone()), block_info, - None + None, ) } }; @@ -545,7 +545,7 @@ impl Signer { /// Compute the signing weight, given a list of signatures fn compute_signature_signing_weight<'a>( &self, - addrs: impl Iterator + addrs: impl Iterator, ) -> u32 { let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); @@ -587,13 +587,15 @@ impl Signer { panic!("{self}: failed to determine if block {block_hash} was broadcasted") }) { - debug!("{self}: have already broadcasted block {} at {}, so will not re-attempt", block_hash, ts); + debug!( + "{self}: have already broadcasted block {} at {}, so will not re-attempt", + block_hash, ts + ); return; } // recover public key - let Ok(public_key) = - Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) + let Ok(public_key) = Secp256k1PublicKey::recover_to_pubkey(block_hash.bits(), signature) else { debug!("{self}: Received unrecovarable signature. Will not store."; "signature" => %signature, @@ -675,13 +677,13 @@ impl Signer { // record time at which we reached the threshold block_info.signed_group = Some(get_epoch_time_secs()); - let _ = self - .signer_db - .insert_block(&block_info) - .map_err(|e| { - warn!("Failed to set group threshold signature timestamp for {}: {:?}", block_hash, &e); - e - }); + let _ = self.signer_db.insert_block(&block_info).map_err(|e| { + warn!( + "Failed to set group threshold signature timestamp for {}: {:?}", + block_hash, &e + ); + e + }); // collect signatures for the block let signatures: Vec<_> = self diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 4405f49a25..5bbc6281a2 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -131,7 +131,9 @@ impl StacksHttp { self.register_rpc_endpoint(postblock_proposal::RPCBlockProposalRequestHandler::new( self.auth_token.clone(), )); - self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::new(self.auth_token.clone())); + self.register_rpc_endpoint(postblock_v3::RPCPostBlockRequestHandler::new( + self.auth_token.clone(), + )); self.register_rpc_endpoint(postfeerate::RPCPostFeeRateRequestHandler::new()); self.register_rpc_endpoint(postmempoolquery::RPCMempoolQueryRequestHandler::new()); self.register_rpc_endpoint(postmicroblock::RPCPostMicroblockRequestHandler::new()); diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 4eeb68750e..39ff26087f 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -16,6 +16,7 @@ use regex::{Captures, Regex}; use stacks_common::codec::{Error as CodecError, StacksMessageCodec, MAX_PAYLOAD_LEN}; use stacks_common::types::net::PeerHost; +use url::form_urlencoded; use super::postblock::StacksBlockAcceptedData; use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; @@ -31,15 +32,13 @@ use crate::net::httpcore::{ use crate::net::relay::Relayer; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; -use url::form_urlencoded; - pub static PATH: &'static str = "/v3/blocks/upload/"; #[derive(Clone, Default)] pub struct RPCPostBlockRequestHandler { pub block: Option, pub auth: Option, - pub broadcast: Option + pub broadcast: Option, } impl RPCPostBlockRequestHandler { @@ -47,7 +46,7 @@ impl RPCPostBlockRequestHandler { Self { block: None, auth, - broadcast: None + broadcast: None, } } @@ -172,7 +171,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { &block, rpc_args.coord_comms, NakamotoBlockObtainMethod::Uploaded, - self.broadcast.unwrap_or(false) + self.broadcast.unwrap_or(false), ) }) .map_err(|e| { @@ -226,9 +225,13 @@ impl StacksHttpRequest { ) .expect("FATAL: failed to construct request from infallible data") } - + /// Make a new post-block request, with intent to broadcast - pub fn new_post_block_v3_broadcast(host: PeerHost, block: &NakamotoBlock, auth: &str) -> StacksHttpRequest { + pub fn new_post_block_v3_broadcast( + host: PeerHost, + block: &NakamotoBlock, + auth: &str, + ) -> StacksHttpRequest { let mut request = StacksHttpRequest::new_for_peer( host, "POST".into(), diff --git a/stackslib/src/net/api/tests/postblock_v3.rs b/stackslib/src/net/api/tests/postblock_v3.rs index 0764953c6e..5cc652fc83 100644 --- a/stackslib/src/net/api/tests/postblock_v3.rs +++ b/stackslib/src/net/api/tests/postblock_v3.rs @@ -79,14 +79,14 @@ fn parse_request() { &bytes[offset..], ) .unwrap(); - + parsed_request.clear_headers(); parsed_request.add_header("authorization".into(), "12345".into()); let (preamble, _contents) = parsed_request.destruct(); assert_eq!(&preamble, request.preamble()); assert_eq!(handler.broadcast, Some(true)); - + handler.restart(); assert!(handler.block.is_none()); assert!(handler.broadcast.is_none()); @@ -115,7 +115,8 @@ fn parse_request() { assert!(handler.broadcast.is_none()); // deal with bad authentication - let request = StacksHttpRequest::new_post_block_v3_broadcast(addr.into(), &block, "wrong password"); + let request = + StacksHttpRequest::new_post_block_v3_broadcast(addr.into(), &block, "wrong password"); let bytes = request.try_serialize().unwrap(); let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); let bad_response = http.handle_try_parse_request( diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 9d30fa50e3..6e298470e0 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -774,7 +774,10 @@ impl NakamotoDownloadStateMachine { let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); let sort_rc = sortdb .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height.saturating_add(1)) + .block_height_to_reward_cycle( + sortdb.first_block_height, + last_sort_height.saturating_add(1), + ) .expect("FATAL: burnchain tip is before system start"); let next_sort_rc = if last_sort_height == sort_tip.block_height { @@ -788,7 +791,10 @@ impl NakamotoDownloadStateMachine { } else { sortdb .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height.saturating_add(1)) + .block_height_to_reward_cycle( + sortdb.first_block_height, + sort_tip.block_height.saturating_add(1), + ) .expect("FATAL: burnchain tip is before system start") }; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 74b7084a84..f7fb970bb6 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -681,9 +681,7 @@ impl NakamotoTenureDownloader { let blocks_opt = self.try_accept_tenure_blocks(blocks)?; Ok(blocks_opt) } - NakamotoTenureDownloadState::Done => { - Err(NetError::InvalidState) - } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), }; self.idle = true; handle_result diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 3a27c1072d..d01e8625a1 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -812,7 +812,8 @@ impl NakamotoInvStateMachine { if let Some(last_sort_tip) = self.last_sort_tip.as_ref() { if last_sort_tip.consensus_hash != network.burnchain_tip.consensus_hash { debug!("Forcibly restarting all Nakamoto inventory comms due to burnchain tip change ({} != {})", &last_sort_tip.consensus_hash, &network.burnchain_tip.consensus_hash); - let tip_rc = NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); + let tip_rc = + NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); for inv_state in self.inventories.values_mut() { inv_state.reset_comms(tip_rc.saturating_sub(1)); } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index ff71feda9d..0496909973 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -790,7 +790,7 @@ impl Relayer { } Ok(res) } - + /// Wrapper around inner_process_new_nakamoto_block pub fn process_new_nakamoto_block( burnchain: &Burnchain, @@ -811,7 +811,7 @@ impl Relayer { block, coord_comms, obtained_method, - false + false, ) } @@ -836,7 +836,7 @@ impl Relayer { block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, obtained_method: NakamotoBlockObtainMethod, - force_broadcast: bool + force_broadcast: bool, ) -> Result { debug!( "Handle incoming Nakamoto block {}/{} obtained via {}", @@ -862,10 +862,12 @@ impl Relayer { if force_broadcast { // it's possible that the signer sent this block to us, in which case, we should // broadcast it - debug!("Already have Nakamoto block {}, but broadcasting anyway", &block.header.block_id()); + debug!( + "Already have Nakamoto block {}, but broadcasting anyway", + &block.header.block_id() + ); return Ok(true); - } - else { + } else { debug!("Already have Nakamoto block {}", &block.header.block_id()); return Ok(false); } From 5d77eabf6c33cf1fed539cbb9bf8afdf236db742 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 14 Aug 2024 15:20:53 -0400 Subject: [PATCH 0763/1400] WIP: add test for mock miner messages. Failing to write to miner slot Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 11 +- stackslib/src/chainstate/nakamoto/mod.rs | 1 - testnet/stacks-node/src/neon_node.rs | 72 +++++++----- testnet/stacks-node/src/tests/signer/v0.rs | 122 ++++++++++++++++++++- 4 files changed, 167 insertions(+), 39 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index af7c38e22a..117a8c4912 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -88,18 +88,9 @@ MinerSlotID { /// Block proposal from the miner BlockProposal = 0, /// Block pushed from the miner - BlockPushed = 1, - /// Mock message from the miner - MockMinerMessage = 2 + BlockPushed = 1 }); -#[cfg_attr(test, mutants::skip)] -impl Display for MinerSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}({})", self, self.to_u8()) - } -} - impl MessageSlotIDTrait for MessageSlotID { fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d059a96cb6..7e70c9940c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4210,7 +4210,6 @@ impl NakamotoChainState { "stackerdb_slots" => ?stackerdb_config.signers, "queried_sortition" => %election_sortition, "sortition_hashes" => ?miners_info.get_sortitions()); - return Ok(None); } let slot_id_range = signer_ranges.swap_remove(signer_ix); diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 6cbbd3b9f6..6e17328f6d 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -154,7 +154,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libsigner::v0::messages::{ MessageSlotID, MinerSlotID, MockMinerMessage, MockSignature, PeerInfo, SignerMessage, }; -use libsigner::StackerDBSession; +use libsigner::{SignerSession, StackerDBSession}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::{Burnchain, BurnchainSigner, PoxConstants, Txid}; @@ -190,7 +190,7 @@ use stacks::net::db::{LocalPeer, PeerDB}; use stacks::net::dns::{DNSClient, DNSResolver}; use stacks::net::p2p::PeerNetwork; use stacks::net::relay::Relayer; -use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; +use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs, MINER_SLOT_COUNT}; use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; @@ -2263,10 +2263,11 @@ impl BlockMinerThread { } /// Read any mock signatures from stackerdb and respond to them - pub fn respond_to_mock_signatures(&mut self) -> Result<(), ChainstateError> { + pub fn send_mock_miner_message(&mut self) -> Result<(), ChainstateError> { + let new_burn_block_height = self.burn_block.block_height + 1; let miner_config = self.config.get_miner_config(); - if miner_config.pre_nakamoto_miner_messaging { - debug!("Pre-Nakamoto miner messaging is disabled"); + if !miner_config.pre_nakamoto_miner_messaging { + debug!("Pre-Nakamoto mock miner messaging is disabled"); return Ok(()); } @@ -2274,16 +2275,43 @@ impl BlockMinerThread { let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let target_epoch_id = - SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height + 1)? - .expect("FATAL: no epoch defined") - .epoch_id; + let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), new_burn_block_height)? + .expect("FATAL: no epoch defined") + .epoch_id; if target_epoch_id != StacksEpochId::Epoch25 { - debug!("Mock signing is disabled for non-epoch 2.5 blocks."; + debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; "target_epoch_id" => target_epoch_id.to_string() ); return Ok(()); } + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + let slot_id = MinerSlotID::BlockProposal.to_u8().into(); + if let Ok(messages) = + miners_stackerdb.get_latest_chunks(&[slot_id, slot_id * MINER_SLOT_COUNT]) + { + debug!("Miner got messages: {:?}", messages.len()); + for message in messages { + if let Some(message) = message { + if message.is_empty() { + continue; + } + let Ok(SignerMessage::MockMinerMessage(miner_message)) = + SignerMessage::consensus_deserialize(&mut message.as_slice()) + else { + continue; + }; + if miner_message.peer_info.burn_block_height == new_burn_block_height { + debug!( + "Already sent mock miner message for tenure burn block height {:?}", + self.burn_block.block_height + ); + return Ok(()); + } + } + } + } // Retrieve any MockSignatures from stackerdb let mut mock_signatures = Vec::new(); let reward_cycle = self @@ -2321,13 +2349,6 @@ impl BlockMinerThread { } } } - info!( - "Miner responding to {} mock signatures", - mock_signatures.len() - ); - let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); - let mut miners_stackerdb = - StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); let p2p_net = StacksNode::setup_peer_network( &self.config, @@ -2356,30 +2377,29 @@ impl BlockMinerThread { server_version, }; - info!("Responding to mock signatures for burn block {:?}", &self.burn_block.block_height; + info!("Sending mock miner message in response to mock signatures for burn block {:?}", &self.burn_block.block_height; "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), "stacks_tip" => ?peer_info.stacks_tip.clone(), "peer_burn_block_height" => peer_info.burn_block_height, "pox_consensus" => ?peer_info.pox_consensus.clone(), "server_version" => peer_info.server_version.clone(), - "chain_id" => self.config.burnchain.chain_id + "chain_id" => self.config.burnchain.chain_id, + "num_mock_signatures" => mock_signatures.len(), ); let message = MockMinerMessage { peer_info, - tenure_burn_block_height: self.burn_block.block_height, chain_id: self.config.burnchain.chain_id, mock_signatures, + tenure_burn_block_height: new_burn_block_height, }; - let sort_db = SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) - .expect("FATAL: failed to open burnchain DB"); if let Err(e) = SignCoordinator::send_miners_message( &miner_config.mining_key.expect("BUG: no mining key"), - &sort_db, + &burn_db, &self.burn_block, &stackerdbs, SignerMessage::MockMinerMessage(message), - MinerSlotID::MockMinerMessage, + MinerSlotID::BlockProposal, // We are sending a mock miner message NOT a block proposal, but since we do not propose blocks in epoch 2.5, it is fine self.config.is_mainnet(), &mut miners_stackerdb, &self.burn_block.consensus_hash, @@ -3731,8 +3751,8 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { let result = miner_thread_state.run_tenure(); - if let Err(e) = miner_thread_state.respond_to_mock_signatures() { - warn!("Failed to respond to mock signatures: {}", e); + if let Err(e) = miner_thread_state.send_mock_miner_message() { + warn!("Failed to send mock miner message: {}", e); } result }) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4b3fea46a0..92cd23a3af 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, + BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; @@ -184,7 +184,7 @@ impl SignerTest { ); debug!("Waiting for signer set calculation."); let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(30); + let short_timeout = Duration::from_secs(60); let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event @@ -2359,6 +2359,124 @@ fn mock_sign_epoch_25() { assert_eq!(old_signatures, new_signatures); } +#[test] +#[ignore] +/// This test checks that Epoch 2.5 miners will issue a MockMinerMessage per burn block they receive +/// including the mock signature from the signers. +fn mock_miner_message_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(5)), + |_| {}, + |node_config| { + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + }, + &[], + ); + + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + let main_poll_time = Instant::now(); + let mut mock_miner_message = None; + while signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + < epoch_3_start_height + { + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let mock_poll_time = Instant::now(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); + + while mock_miner_message.is_none() { + std::thread::sleep(Duration::from_millis(100)); + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .filter_map(|chunk| { + if chunk.contract_id != miners_stackerdb_contract { + return None; + } + Some(chunk.modified_slots) + }) + .flatten() + { + if chunk.slot_id == MinerSlotID::BlockProposal.to_u8() as u32 { + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockMinerMessage(message) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize MockMinerMessage") + else { + continue; + }; + if message.peer_info.burn_block_height == current_burn_block_height { + mock_miner_message = Some(message); + break; + } + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock miner message within timeout" + ); + } + test_observer::clear(); + mock_miner_message = None; + assert!( + main_poll_time.elapsed() <= Duration::from_secs(45), + "Timed out waiting to advance epoch 3.0" + ); + } +} + #[test] #[ignore] /// This test asserts that signer set rollover works as expected. From 4c7598c418c996e2353695ee058ade2c4ff47027 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 14 Aug 2024 15:35:38 -0400 Subject: [PATCH 0764/1400] WIP: Failing to write to miner slot Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 6e17328f6d..9749b2625d 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -152,7 +152,7 @@ use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libsigner::v0::messages::{ - MessageSlotID, MinerSlotID, MockMinerMessage, MockSignature, PeerInfo, SignerMessage, + MessageSlotID, MinerSlotID, MockMinerMessage, PeerInfo, SignerMessage, }; use libsigner::{SignerSession, StackerDBSession}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; @@ -2334,17 +2334,13 @@ impl BlockMinerThread { let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; for chunk in chunks { if let Some(chunk) = chunk { - match MockSignature::consensus_deserialize(&mut chunk.as_slice()) { - Ok(mock_signature) => { - if mock_signature.sign_data.event_burn_block_height - == self.burn_block.block_height - { - mock_signatures.push(mock_signature); - } - } - Err(e) => { - warn!("Failed to deserialize mock signature: {:?}", &e); - continue; + if let Ok(SignerMessage::MockSignature(mock_signature)) = + SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + { + if mock_signature.sign_data.event_burn_block_height + == self.burn_block.block_height + { + mock_signatures.push(mock_signature); } } } From edfaa10b524d65f73e9c759d371b4c1d4664bf15 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 14 Aug 2024 15:57:23 -0400 Subject: [PATCH 0765/1400] WIP: No longer failing to write to .miners but failing to find appropro message Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 9749b2625d..0fc40c135a 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -3746,11 +3746,10 @@ impl RelayerThread { .name(format!("miner-block-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { - let result = miner_thread_state.run_tenure(); if let Err(e) = miner_thread_state.send_mock_miner_message() { warn!("Failed to send mock miner message: {}", e); } - result + miner_thread_state.run_tenure() }) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); From c590bcf42039771f0bc42a5b6fa160d4154b576c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 14 Aug 2024 16:16:47 -0400 Subject: [PATCH 0766/1400] WIP: failing at 222 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 25 +++++++++------------- testnet/stacks-node/src/tests/signer/v0.rs | 1 - 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 0fc40c135a..43eb114414 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2264,7 +2264,6 @@ impl BlockMinerThread { /// Read any mock signatures from stackerdb and respond to them pub fn send_mock_miner_message(&mut self) -> Result<(), ChainstateError> { - let new_burn_block_height = self.burn_block.block_height + 1; let miner_config = self.config.get_miner_config(); if !miner_config.pre_nakamoto_miner_messaging { debug!("Pre-Nakamoto mock miner messaging is disabled"); @@ -2274,13 +2273,15 @@ impl BlockMinerThread { let burn_db_path = self.config.get_burn_db_file_path(); let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - - let target_epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), new_burn_block_height)? - .expect("FATAL: no epoch defined") - .epoch_id; - if target_epoch_id != StacksEpochId::Epoch25 { + let p2p_net = StacksNode::setup_peer_network( + &self.config, + &self.config.atlas, + self.burnchain.clone(), + ); + let epoch_id = p2p_net.get_current_epoch().epoch_id; + if epoch_id != StacksEpochId::Epoch25 { debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; - "target_epoch_id" => target_epoch_id.to_string() + "epoch_id" => epoch_id.to_string() ); return Ok(()); } @@ -2302,7 +2303,7 @@ impl BlockMinerThread { else { continue; }; - if miner_message.peer_info.burn_block_height == new_burn_block_height { + if miner_message.peer_info.burn_block_height == self.burn_block.block_height { debug!( "Already sent mock miner message for tenure burn block height {:?}", self.burn_block.block_height @@ -2346,12 +2347,6 @@ impl BlockMinerThread { } } - let p2p_net = StacksNode::setup_peer_network( - &self.config, - &self.config.atlas, - self.burnchain.clone(), - ); - let server_version = version_string( "stacks-node", option_env!("STACKS_NODE_VERSION") @@ -2386,7 +2381,7 @@ impl BlockMinerThread { peer_info, chain_id: self.config.burnchain.chain_id, mock_signatures, - tenure_burn_block_height: new_burn_block_height, + tenure_burn_block_height: self.burn_block.block_height, }; if let Err(e) = SignCoordinator::send_miners_message( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 92cd23a3af..9034a8a523 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2468,7 +2468,6 @@ fn mock_miner_message_epoch_25() { "Failed to find mock miner message within timeout" ); } - test_observer::clear(); mock_miner_message = None; assert!( main_poll_time.elapsed() <= Duration::from_secs(45), From d8d333733a5c4bd87a6c48048b813ece74770035 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 31 Jul 2024 15:12:04 -0400 Subject: [PATCH 0767/1400] chore: Add `mock_mining_output_dir` to node config --- testnet/stacks-node/src/config.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4eef0bbdd0..cae1fda528 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1810,6 +1810,8 @@ pub struct NodeConfig { pub miner: bool, pub stacker: bool, pub mock_mining: bool, + /// Where to output blocks from mock mining + pub mock_mining_output_dir: Option, pub mine_microblocks: bool, pub microblock_frequency: u64, pub max_microblocks: u64, @@ -2102,6 +2104,7 @@ impl Default for NodeConfig { miner: false, stacker: false, mock_mining: false, + mock_mining_output_dir: None, mine_microblocks: true, microblock_frequency: 30_000, max_microblocks: u16::MAX as u64, @@ -2164,7 +2167,7 @@ impl NodeConfig { ) -> Neighbor { Neighbor { addr: NeighborKey { - peer_version: peer_version, + peer_version, network_id: chain_id, addrbytes: PeerAddress::from_socketaddr(&addr), port: addr.port(), @@ -2556,6 +2559,7 @@ pub struct NodeConfigFile { pub miner: Option, pub stacker: Option, pub mock_mining: Option, + pub mock_mining_output_dir: Option, pub mine_microblocks: Option, pub microblock_frequency: Option, pub max_microblocks: Option, @@ -2595,10 +2599,9 @@ impl NodeConfigFile { p2p_address: self.p2p_address.unwrap_or(rpc_bind.clone()), bootstrap_node: vec![], deny_nodes: vec![], - data_url: match self.data_url { - Some(data_url) => data_url, - None => format!("http://{}", rpc_bind), - }, + data_url: self + .data_url + .unwrap_or_else(|| format!("http://{rpc_bind}")), local_peer_seed: match self.local_peer_seed { Some(seed) => hex_bytes(&seed) .map_err(|_e| format!("node.local_peer_seed should be a hex encoded string"))?, @@ -2607,6 +2610,14 @@ impl NodeConfigFile { miner, stacker, mock_mining: self.mock_mining.unwrap_or(default_node_config.mock_mining), + mock_mining_output_dir: self + .mock_mining_output_dir + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| { + panic!("Failed to construct PathBuf from node.mock_mining_output_dir: {e}") + }), mine_microblocks: self .mine_microblocks .unwrap_or(default_node_config.mine_microblocks), From f949691c0c50776208f1440690f91a157044c15a Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 31 Jul 2024 17:12:21 -0400 Subject: [PATCH 0768/1400] feat: Neon mock miner can write blocks to files --- testnet/stacks-node/src/neon_node.rs | 81 ++++++++++++++++++++-------- 1 file changed, 58 insertions(+), 23 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 8c3c4ed179..fd3e240c9a 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -140,12 +140,14 @@ use std::cmp; use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::io::{Read, Write}; +use std::fs::{self, File}; +use std::io::{BufWriter, Read, Write}; use std::net::SocketAddr; +use std::path::Path; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; -use std::{fs, mem, thread}; +use std::{mem, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -237,7 +239,7 @@ pub(crate) enum MinerThreadResult { /// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was /// linked to the burnchain and what view(s) the miner had of the burnchain before and after /// completing the block. -#[derive(Clone)] +#[derive(Clone, Serialize)] pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block parent_consensus_hash: ConsensusHash, @@ -255,6 +257,28 @@ pub struct AssembledAnchorBlock { tenure_begin: u128, } +/// Write any `serde_json` object to a file +/// TODO: Move this somewhere else +pub fn serialize_json_to_file(json: &J, filepath: P) -> Result<(), std::io::Error> +where + J: ?Sized + serde::Serialize, + P: AsRef, +{ + let file = File::create(filepath)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer(&mut writer, json)?; + writer.flush() +} + +impl AssembledAnchorBlock { + pub fn serialize_to_file

(&self, filepath: P) -> Result<(), std::io::Error> + where + P: AsRef, + { + serialize_json_to_file(self, filepath) + } +} + /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] pub struct MinerTip { @@ -2567,28 +2591,44 @@ impl BlockMinerThread { "attempt" => attempt ); + let NodeConfig { + mock_mining, + mock_mining_output_dir, + .. + } = self.config.get_node_config(false); + let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); + let assembled_block = AssembledAnchorBlock { + parent_consensus_hash: parent_block_info.parent_consensus_hash, + my_burn_hash: cur_burn_chain_tip.burn_header_hash, + my_block_height: cur_burn_chain_tip.block_height, + orig_burn_hash: self.burn_block.burn_header_hash, + anchored_block, + attempt, + tenure_begin, + }; if res.is_none() { self.failed_to_submit_last_attempt = true; - if !self.config.get_node_config(false).mock_mining { + if mock_mining { + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + if let Some(dir) = mock_mining_output_dir { + let stacks_block_height = assembled_block.anchored_block.header.total_work.work; + let filename = format!("{stacks_block_height}.json"); + let filepath = dir.join(filename); + assembled_block + .serialize_to_file(&filepath) + .unwrap_or_else(|e| panic!("Failed to write to file '{filepath:?}': {e}")); + } + } else { warn!("Relayer: Failed to submit Bitcoin transaction"); return None; } - debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); } else { self.failed_to_submit_last_attempt = false; } Some(MinerThreadResult::Block( - AssembledAnchorBlock { - parent_consensus_hash: parent_block_info.parent_consensus_hash, - my_burn_hash: cur_burn_chain_tip.burn_header_hash, - my_block_height: cur_burn_chain_tip.block_height, - orig_burn_hash: self.burn_block.burn_header_hash, - anchored_block, - attempt, - tenure_begin, - }, + assembled_block, microblock_private_key, bitcoin_controller.get_ongoing_commit(), )) @@ -3721,11 +3761,9 @@ impl RelayerThread { parent_consensus_hash, parent_block_hash ); - let mut microblock_thread_state = match MicroblockMinerThread::from_relayer_thread(self) { - Some(ts) => ts, - None => { - return false; - } + let Some(mut microblock_thread_state) = MicroblockMinerThread::from_relayer_thread(self) + else { + return false; }; if let Ok(miner_handle) = thread::Builder::new() @@ -3737,10 +3775,7 @@ impl RelayerThread { miner_tip, )) }) - .map_err(|e| { - error!("Relayer: Failed to start tenure thread: {:?}", &e); - e - }) + .inspect_err(|e| error!("Relayer: Failed to start tenure thread: {e:?}")) { // thread started! self.miner_thread = Some(miner_handle); From 7f12563b19214d5589517a7a4592463c01b46807 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 2 Aug 2024 09:37:49 -0400 Subject: [PATCH 0769/1400] feat: `stacks-inspect` reads mock mined blocks from files. No replay yet --- stacks-common/src/util/macros.rs | 22 +++++++ stacks-common/src/util/mod.rs | 26 +++++++++ stackslib/src/main.rs | 86 +++++++++++++++++++++++++++- testnet/stacks-node/src/neon_node.rs | 31 ++-------- 4 files changed, 138 insertions(+), 27 deletions(-) diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 4e15c5485b..b1b26ee014 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -617,6 +617,28 @@ macro_rules! impl_byte_array_serde { }; } +#[allow(unused_macros)] +#[macro_export] +macro_rules! impl_file_io_serde_json { + ($thing:ident) => { + impl $thing { + pub fn serialize_to_file

(&self, path: P) -> Result<(), std::io::Error> + where + P: AsRef, + { + $crate::util::serialize_json_to_file(self, path) + } + + pub fn deserialize_from_file

(path: P) -> Result + where + P: AsRef, + { + $crate::util::deserialize_json_from_file(path) + } + } + }; +} + // print debug statements while testing #[allow(unused_macros)] #[macro_export] diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index d4dfcda82f..8575fee283 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -28,6 +28,9 @@ pub mod uint; pub mod vrf; use std::collections::HashMap; +use std::fs::File; +use std::io::{BufReader, BufWriter, Write}; +use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; @@ -120,3 +123,26 @@ pub mod db_common { true } } + +/// Write any `serde_json` object directly to a file +pub fn serialize_json_to_file(json: &J, path: P) -> Result<(), std::io::Error> +where + J: ?Sized + serde::Serialize, + P: AsRef, +{ + let file = File::create(path)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer(&mut writer, json)?; + writer.flush() +} + +/// Read any `serde_json` object directly from a file +pub fn deserialize_json_from_file(path: P) -> Result +where + J: serde::de::DeserializeOwned, + P: AsRef, +{ + let file = File::open(path)?; + let reader = BufReader::new(file); + serde_json::from_reader::<_, J>(reader).map_err(std::io::Error::from) +} diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 8660e0e9a7..97428a2dff 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -26,6 +26,7 @@ extern crate stacks_common; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; +use regex::Regex; use stacks_common::types::MempoolCollectionBehavior; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -38,6 +39,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; +use std::path::PathBuf; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -92,7 +94,7 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; -use stacks_common::util::{get_epoch_time_ms, log, sleep_ms}; +use stacks_common::util::{deserialize_json_from_file, get_epoch_time_ms, log, sleep_ms}; #[cfg_attr(test, mutants::skip)] fn main() { @@ -1339,6 +1341,88 @@ simulating a miner. ); return; } + if argv[1] == "replay-mock-mining" { + let print_help_and_exit = || { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + process::exit(1); + }; + + // Process CLI args + let dir = argv + .get(2) + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| panic!("Not a valid path: {e}")) + .unwrap_or_else(print_help_and_exit); + + if !dir.is_dir() { + panic!("Not a valid directory: {dir:?}"); + } + + // Read entries in directory + let dir_entries = dir + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read directory: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + return None; + }; + let Some(m) = cap.get(0) else { + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = dir.join(filename); + info!("Replaying block from file"; + "block_height" => bh, + "filepath" => ?filepath + ); + // let block = AssembledAnchorBlock::deserialize_json_from_file(filepath) + // .unwrap_or_else(|e| panic!("Error reading block {block} from file: {e}")); + // debug!("Replaying block from {filepath:?}"; + // "block_height" => bh, + // "block" => %block + // ); + // TODO: Actually replay block + } + } if argv.len() < 4 { eprintln!("Usage: {} blockchain network working_dir", argv[0]); diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index fd3e240c9a..0b48717d25 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -140,14 +140,13 @@ use std::cmp; use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::fs::{self, File}; -use std::io::{BufWriter, Read, Write}; +use std::io::{Read, Write}; use std::net::SocketAddr; use std::path::Path; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; -use std::{mem, thread}; +use std::{fs, mem, thread}; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -191,6 +190,7 @@ use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; +use stacks::util::{deserialize_json_from_file, serialize_json_to_file}; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ @@ -239,7 +239,7 @@ pub(crate) enum MinerThreadResult { /// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was /// linked to the burnchain and what view(s) the miner had of the burnchain before and after /// completing the block. -#[derive(Clone, Serialize)] +#[derive(Clone, Serialize, Deserialize)] pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block parent_consensus_hash: ConsensusHash, @@ -256,28 +256,7 @@ pub struct AssembledAnchorBlock { /// Epoch timestamp in milliseconds when we started producing the block. tenure_begin: u128, } - -/// Write any `serde_json` object to a file -/// TODO: Move this somewhere else -pub fn serialize_json_to_file(json: &J, filepath: P) -> Result<(), std::io::Error> -where - J: ?Sized + serde::Serialize, - P: AsRef, -{ - let file = File::create(filepath)?; - let mut writer = BufWriter::new(file); - serde_json::to_writer(&mut writer, json)?; - writer.flush() -} - -impl AssembledAnchorBlock { - pub fn serialize_to_file

(&self, filepath: P) -> Result<(), std::io::Error> - where - P: AsRef, - { - serialize_json_to_file(self, filepath) - } -} +impl_file_io_serde_json!(AssembledAnchorBlock); /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] From d4a92153f58be88f4ed770f176a2994429fef397 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 2 Aug 2024 15:40:37 -0400 Subject: [PATCH 0770/1400] chore: Move `replay-mock-mining` to stacks-node binary --- stackslib/src/main.rs | 85 +-------------------- testnet/stacks-node/src/main.rs | 110 +++++++++++++++++++++++---- testnet/stacks-node/src/neon_node.rs | 19 ++--- 3 files changed, 105 insertions(+), 109 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 97428a2dff..90adf0bf1f 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -39,7 +39,6 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; -use std::path::PathBuf; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -94,7 +93,7 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; -use stacks_common::util::{deserialize_json_from_file, get_epoch_time_ms, log, sleep_ms}; +use stacks_common::util::{get_epoch_time_ms, log, sleep_ms}; #[cfg_attr(test, mutants::skip)] fn main() { @@ -1341,88 +1340,6 @@ simulating a miner. ); return; } - if argv[1] == "replay-mock-mining" { - let print_help_and_exit = || { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - process::exit(1); - }; - - // Process CLI args - let dir = argv - .get(2) - .map(PathBuf::from) - .map(fs::canonicalize) - .transpose() - .unwrap_or_else(|e| panic!("Not a valid path: {e}")) - .unwrap_or_else(print_help_and_exit); - - if !dir.is_dir() { - panic!("Not a valid directory: {dir:?}"); - } - - // Read entries in directory - let dir_entries = dir - .read_dir() - .unwrap_or_else(|e| panic!("Failed to read directory: {e}")) - .filter_map(|e| e.ok()); - - // Get filenames, filtering out anything that isn't a regular file - let filenames = dir_entries.filter_map(|e| match e.file_type() { - Ok(t) if t.is_file() => e.file_name().into_string().ok(), - _ => None, - }); - - // Get vec of (block_height, filename), to prepare for sorting - // - // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, - // but that requires reading all files - let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); - let mut indexed_files = filenames - .filter_map(|filename| { - // Use regex to extract block number from filename - let Some(cap) = re.captures(&filename) else { - return None; - }; - let Some(m) = cap.get(0) else { - return None; - }; - let Ok(bh) = m.as_str().parse::() else { - return None; - }; - Some((bh, filename)) - }) - .collect::>(); - - // Sort by block height - indexed_files.sort_by_key(|(bh, _)| *bh); - - if indexed_files.is_empty() { - panic!("No block files found"); - } - - info!( - "Replaying {} blocks starting at {}", - indexed_files.len(), - indexed_files[0].0 - ); - - for (bh, filename) in indexed_files { - let filepath = dir.join(filename); - info!("Replaying block from file"; - "block_height" => bh, - "filepath" => ?filepath - ); - // let block = AssembledAnchorBlock::deserialize_json_from_file(filepath) - // .unwrap_or_else(|e| panic!("Error reading block {block} from file: {e}")); - // debug!("Replaying block from {filepath:?}"; - // "block_height" => bh, - // "block" => %block - // ); - // TODO: Actually replay block - } - } if argv.len() < 4 { eprintln!("Usage: {} blockchain network working_dir", argv[0]); diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 41b7426278..e3f191a946 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -10,9 +10,12 @@ extern crate stacks; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; +use regex::Regex; pub use stacks_common::util; use stacks_common::util::hash::hex_bytes; +use crate::neon_node::AssembledAnchorBlock; + pub mod monitoring; pub mod burnchains; @@ -31,7 +34,8 @@ pub mod syncctl; pub mod tenure; use std::collections::HashMap; -use std::{env, panic, process}; +use std::path::PathBuf; +use std::{env, fs, panic, process}; use backtrace::Backtrace; use pico_args::Arguments; @@ -166,10 +170,8 @@ fn cli_get_miner_spend( return 0.0; }; let Ok(active_miners_and_commits) = - MinerStats::get_active_miners(&sortdb, Some(burn_block_height)).map_err(|e| { - warn!("Failed to get active miners: {:?}", &e); - e - }) + MinerStats::get_active_miners(&sortdb, Some(burn_block_height)) + .inspect_err(|e| warn!("Failed to get active miners: {e:?}")) else { return 0.0; }; @@ -187,10 +189,7 @@ fn cli_get_miner_spend( let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(burn_block_height + 1, &active_miners) - .map_err(|e| { - warn!("Failed to find unconfirmed block-commits: {}", &e); - e - }) + .inspect_err(|e| warn!("Failed to find unconfirmed block-commits: {e}")) else { return 0.0; }; @@ -229,10 +228,7 @@ fn cli_get_miner_spend( &commit_outs, at_burnchain_height, ) - .map_err(|e| { - warn!("Failed to get unconfirmed burn distribution: {:?}", &e); - e - }) + .inspect_err(|e| warn!("Failed to get unconfirmed burn distribution: {e:?}")) else { return 0.0; }; @@ -265,6 +261,82 @@ fn cli_get_miner_spend( spend_amount } +fn cli_replay_mock_mining(config_path: &str, path: &str) { + info!("Loading config at path {config_path}"); + let config = match ConfigFile::from_path(&config_path) { + Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), + Err(e) => { + warn!("Invalid config file: {e}"); + process::exit(1); + } + }; + + // Validate directory path + let dir = PathBuf::from(path); + let dir = fs::canonicalize(dir).unwrap_or_else(|e| panic!("{path} is not a valid path: {e}")); + + if !dir.is_dir() { + panic!("{path} is not a valid directory"); + } + + // Read entries in directory + let dir_entries = dir + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read {path}: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + return None; + }; + let Some(m) = cap.get(0) else { + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = dir.join(filename); + let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + debug!("Replaying block from {filepath:?}"; + "block_height" => bh, + "block" => ?block + ); + // TODO: Actually replay block + } +} + fn main() { panic::set_hook(Box::new(|panic_info| { error!("Process abort due to thread panic: {}", panic_info); @@ -412,6 +484,13 @@ fn main() { println!("Will spend {}", spend_amount); process::exit(0); } + "replay-mock-mining" => { + let path: String = args.value_from_str("--path").unwrap(); + let config_path: String = args.value_from_str("--config").unwrap(); + args.finish(); + cli_replay_mock_mining(&config_path, &path); + process::exit(0); + } _ => { print_help(); return; @@ -502,6 +581,11 @@ key-for-seed\tOutput the associated secret key for a burnchain signer created wi \t\tCan be passed a config file for the seed via the `--config ` option *or* by supplying the hex seed on \t\tthe command line directly. +replay-mock-mining\tReplay mock mined blocks from

+\t\tArguments: +\t\t --path: path to directory of mock mined blocks +\t\t --config: path to the config file + help\t\tDisplay this help. OPTIONAL ARGUMENTS: diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 0b48717d25..58f3b877a6 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -239,7 +239,7 @@ pub(crate) enum MinerThreadResult { /// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was /// linked to the burnchain and what view(s) the miner had of the burnchain before and after /// completing the block. -#[derive(Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block parent_consensus_hash: ConsensusHash, @@ -3603,22 +3603,17 @@ impl RelayerThread { } } - let mut miner_thread_state = - match self.create_block_miner(registered_key, last_burn_block, issue_timestamp_ms) { - Some(state) => state, - None => { - return false; - } - }; + let Some(mut miner_thread_state) = + self.create_block_miner(registered_key, last_burn_block, issue_timestamp_ms) + else { + return false; + }; if let Ok(miner_handle) = thread::Builder::new() .name(format!("miner-block-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || miner_thread_state.run_tenure()) - .map_err(|e| { - error!("Relayer: Failed to start tenure thread: {:?}", &e); - e - }) + .inspect_err(|e| error!("Relayer: Failed to start tenure thread: {e:?}")) { self.miner_thread = Some(miner_handle); } From a6fd22bd44483c4be9e0fcc1d382acab7084d76a Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 6 Aug 2024 09:45:35 -0400 Subject: [PATCH 0771/1400] chore: Move `replay-block` back to stacks-inspect --- stackslib/src/main.rs | 90 ++++++++++++++++++++++++++++ testnet/stacks-node/src/main.rs | 89 +-------------------------- testnet/stacks-node/src/neon_node.rs | 2 - 3 files changed, 91 insertions(+), 90 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 90adf0bf1f..71ec309b1b 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -39,6 +39,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{File, OpenOptions}; use std::io::prelude::*; use std::io::BufReader; +use std::path::PathBuf; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -1341,6 +1342,11 @@ simulating a miner. return; } + if argv[1] == "replay-mock-mining" { + replay_mock_mining(argv); + process::exit(0); + } + if argv.len() < 4 { eprintln!("Usage: {} blockchain network working_dir", argv[0]); process::exit(1); @@ -1977,3 +1983,87 @@ fn analyze_sortition_mev(argv: Vec) { process::exit(0); } + +fn replay_mock_mining(argv: Vec) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + process::exit(1); + }; + + // Process CLI args + let chainstate_path = argv + .get(2) + .unwrap_or_else(|| print_help_and_exit()); + + let blocks_path = argv + .get(3) + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| panic!("Not a valid path: {e}")) + .unwrap_or_else(|| print_help_and_exit()); + + // Validate directory path + if !blocks_path.is_dir() { + panic!("{blocks_path:?} is not a valid directory"); + } + + // Read entries in directory + let dir_entries = blocks_path + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + return None; + }; + let Some(m) = cap.get(0) else { + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = blocks_path.join(filename); + // let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + // .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + debug!("Replaying block from {filepath:?}"; + "block_height" => bh, + "block" => ?block + ); + // TODO: Actually replay block + } +} diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index e3f191a946..fcdc9f5847 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -10,12 +10,9 @@ extern crate stacks; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; -use regex::Regex; pub use stacks_common::util; use stacks_common::util::hash::hex_bytes; -use crate::neon_node::AssembledAnchorBlock; - pub mod monitoring; pub mod burnchains; @@ -34,8 +31,7 @@ pub mod syncctl; pub mod tenure; use std::collections::HashMap; -use std::path::PathBuf; -use std::{env, fs, panic, process}; +use std::{env, panic, process}; use backtrace::Backtrace; use pico_args::Arguments; @@ -261,82 +257,6 @@ fn cli_get_miner_spend( spend_amount } -fn cli_replay_mock_mining(config_path: &str, path: &str) { - info!("Loading config at path {config_path}"); - let config = match ConfigFile::from_path(&config_path) { - Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), - Err(e) => { - warn!("Invalid config file: {e}"); - process::exit(1); - } - }; - - // Validate directory path - let dir = PathBuf::from(path); - let dir = fs::canonicalize(dir).unwrap_or_else(|e| panic!("{path} is not a valid path: {e}")); - - if !dir.is_dir() { - panic!("{path} is not a valid directory"); - } - - // Read entries in directory - let dir_entries = dir - .read_dir() - .unwrap_or_else(|e| panic!("Failed to read {path}: {e}")) - .filter_map(|e| e.ok()); - - // Get filenames, filtering out anything that isn't a regular file - let filenames = dir_entries.filter_map(|e| match e.file_type() { - Ok(t) if t.is_file() => e.file_name().into_string().ok(), - _ => None, - }); - - // Get vec of (block_height, filename), to prepare for sorting - // - // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, - // but that requires reading all files - let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); - let mut indexed_files = filenames - .filter_map(|filename| { - // Use regex to extract block number from filename - let Some(cap) = re.captures(&filename) else { - return None; - }; - let Some(m) = cap.get(0) else { - return None; - }; - let Ok(bh) = m.as_str().parse::() else { - return None; - }; - Some((bh, filename)) - }) - .collect::>(); - - // Sort by block height - indexed_files.sort_by_key(|(bh, _)| *bh); - - if indexed_files.is_empty() { - panic!("No block files found"); - } - - info!( - "Replaying {} blocks starting at {}", - indexed_files.len(), - indexed_files[0].0 - ); - - for (bh, filename) in indexed_files { - let filepath = dir.join(filename); - let block = AssembledAnchorBlock::deserialize_from_file(&filepath) - .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); - debug!("Replaying block from {filepath:?}"; - "block_height" => bh, - "block" => ?block - ); - // TODO: Actually replay block - } -} - fn main() { panic::set_hook(Box::new(|panic_info| { error!("Process abort due to thread panic: {}", panic_info); @@ -484,13 +404,6 @@ fn main() { println!("Will spend {}", spend_amount); process::exit(0); } - "replay-mock-mining" => { - let path: String = args.value_from_str("--path").unwrap(); - let config_path: String = args.value_from_str("--config").unwrap(); - args.finish(); - cli_replay_mock_mining(&config_path, &path); - process::exit(0); - } _ => { print_help(); return; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 58f3b877a6..f09287196f 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -142,7 +142,6 @@ use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::io::{Read, Write}; use std::net::SocketAddr; -use std::path::Path; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; use std::time::Duration; @@ -190,7 +189,6 @@ use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs}; use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; -use stacks::util::{deserialize_json_from_file, serialize_json_to_file}; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ From f4b89f80805ed4bd39369271d75845efbfa3247d Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 6 Aug 2024 12:38:56 -0400 Subject: [PATCH 0772/1400] refactor: Move `AssembledAnchorBlock` into stackslib --- stackslib/src/chainstate/stacks/miner.rs | 22 +++++++++++++++++++++ stackslib/src/main.rs | 10 ++++------ testnet/stacks-node/src/neon_node.rs | 25 ++---------------------- 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f2dfdf5dff..c41f8b5055 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -66,6 +66,28 @@ use crate::monitoring::{ use crate::net::relay::Relayer; use crate::net::Error as net_error; +/// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was +/// linked to the burnchain and what view(s) the miner had of the burnchain before and after +/// completing the block. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AssembledAnchorBlock { + /// Consensus hash of the parent Stacks block + pub parent_consensus_hash: ConsensusHash, + /// Burnchain tip's block hash when we finished mining + pub my_burn_hash: BurnchainHeaderHash, + /// Burnchain tip's block height when we finished mining + pub my_block_height: u64, + /// Burnchain tip's block hash when we started mining (could be different) + pub orig_burn_hash: BurnchainHeaderHash, + /// The block we produced + pub anchored_block: StacksBlock, + /// The attempt count of this block (multiple blocks will be attempted per burnchain block) + pub attempt: u64, + /// Epoch timestamp in milliseconds when we started producing the block. + pub tenure_begin: u128, +} +impl_file_io_serde_json!(AssembledAnchorBlock); + /// System status for mining. /// The miner can be Ready, in which case a miner is allowed to run /// The miner can be Blocked, in which case the miner *should not start* and/or *should terminate* diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 71ec309b1b..54c35eb7ab 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1988,14 +1988,12 @@ fn replay_mock_mining(argv: Vec) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); - eprintln!(" {n} "); + eprintln!(" {n} "); process::exit(1); }; // Process CLI args - let chainstate_path = argv - .get(2) - .unwrap_or_else(|| print_help_and_exit()); + let chainstate_path = argv.get(2).unwrap_or_else(|| print_help_and_exit()); let blocks_path = argv .get(3) @@ -2058,8 +2056,8 @@ fn replay_mock_mining(argv: Vec) { for (bh, filename) in indexed_files { let filepath = blocks_path.join(filename); - // let block = AssembledAnchorBlock::deserialize_from_file(&filepath) - // .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); debug!("Replaying block from {filepath:?}"; "block_height" => bh, "block" => ?block diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index f09287196f..c6be468a4d 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -167,7 +167,8 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::blocks::StagingBlock; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::miner::{ - signal_mining_blocked, signal_mining_ready, BlockBuilderSettings, StacksMicroblockBuilder, + signal_mining_blocked, signal_mining_ready, AssembledAnchorBlock, BlockBuilderSettings, + StacksMicroblockBuilder, }; use stacks::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockBuilder, StacksBlockHeader, @@ -234,28 +235,6 @@ pub(crate) enum MinerThreadResult { ), } -/// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was -/// linked to the burnchain and what view(s) the miner had of the burnchain before and after -/// completing the block. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AssembledAnchorBlock { - /// Consensus hash of the parent Stacks block - parent_consensus_hash: ConsensusHash, - /// Burnchain tip's block hash when we finished mining - my_burn_hash: BurnchainHeaderHash, - /// Burnchain tip's block height when we finished mining - my_block_height: u64, - /// Burnchain tip's block hash when we started mining (could be different) - orig_burn_hash: BurnchainHeaderHash, - /// The block we produced - anchored_block: StacksBlock, - /// The attempt count of this block (multiple blocks will be attempted per burnchain block) - attempt: u64, - /// Epoch timestamp in milliseconds when we started producing the block. - tenure_begin: u128, -} -impl_file_io_serde_json!(AssembledAnchorBlock); - /// Miner chain tip, on top of which to build microblocks #[derive(Debug, Clone, PartialEq)] pub struct MinerTip { From 792e1c123889f23d42d01324cf0015b2d0d2302f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 6 Aug 2024 15:21:47 -0400 Subject: [PATCH 0773/1400] refactor: Change args to `extract_connecting_microblocks()` --- stackslib/src/chainstate/stacks/db/blocks.rs | 22 +++++++++----------- stackslib/src/main.rs | 3 ++- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 63c22fafb6..0317db7b2f 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5997,13 +5997,14 @@ impl StacksChainState { /// the given block. pub fn extract_connecting_microblocks( parent_block_header_info: &StacksHeaderInfo, - next_staging_block: &StagingBlock, + next_block_consensus_hash: &ConsensusHash, + next_block_hash: &BlockHeaderHash, block: &StacksBlock, mut next_microblocks: Vec, ) -> Result, Error> { // NOTE: since we got the microblocks from staging, where their signatures were already // validated, we don't need to validate them again. - let microblock_terminus = match StacksChainState::validate_parent_microblock_stream( + let Some((microblock_terminus, _)) = StacksChainState::validate_parent_microblock_stream( parent_block_header_info .anchored_header .as_stacks_epoch2() @@ -6011,15 +6012,11 @@ impl StacksChainState { &block.header, &next_microblocks, false, - ) { - Some((terminus, _)) => terminus, - None => { - debug!( - "Stopping at block {}/{} -- discontiguous header stream", - next_staging_block.consensus_hash, next_staging_block.anchored_block_hash, - ); - return Ok(vec![]); - } + ) else { + debug!( + "Stopping at block {next_block_consensus_hash}/{next_block_hash} -- discontiguous header stream" + ); + return Ok(vec![]); }; // do not consider trailing microblocks that this anchored block does _not_ confirm @@ -6214,7 +6211,8 @@ impl StacksChainState { // block's parent to this block. let next_microblocks = StacksChainState::extract_connecting_microblocks( &parent_header_info, - &next_staging_block, + &parent_header_info.consensus_hash, + &next_staging_block.anchored_block_hash, &block, next_microblocks, )?; diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 54c35eb7ab..16a1319db2 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1701,7 +1701,8 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { // block's parent to this block. let next_microblocks = StacksChainState::extract_connecting_microblocks( &parent_header_info, - &next_staging_block, + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, &block, next_microblocks, ) From 91952ac541c1f48ab0df22a14d4c088625a371fe Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 8 Aug 2024 09:38:49 -0400 Subject: [PATCH 0774/1400] feat: Refactor `replay_block()` and use it to validate mock mined blocks --- stackslib/src/chainstate/stacks/block.rs | 8 + stackslib/src/chainstate/stacks/db/blocks.rs | 34 +- stackslib/src/chainstate/stacks/miner.rs | 2 + stackslib/src/main.rs | 405 ++++++++++++------- testnet/stacks-node/src/neon_node.rs | 1 + 5 files changed, 284 insertions(+), 166 deletions(-) diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 6ede2bc8e6..85bfcc5576 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -651,6 +651,14 @@ impl StacksBlock { pub fn has_microblock_parent(&self) -> bool { self.header.has_microblock_parent() } + + /// Returns size in bytes of `StacksMessageCodec` representation + /// Note that this will serialize the block, so don't call if there is a better way to get block size + pub fn block_size(&self) -> Result { + let mut buf = vec![]; + self.consensus_serialize(&mut buf)?; + Ok(buf.len()) + } } impl StacksMessageCodec for StacksMicroblockHeader { diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 0317db7b2f..be05151c12 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -3716,9 +3716,26 @@ impl StacksChainState { blocks_conn: &DBConn, staging_block: &StagingBlock, ) -> Result>, Error> { - if staging_block.parent_microblock_hash == EMPTY_MICROBLOCK_PARENT_HASH - && staging_block.parent_microblock_seq == 0 - { + Self::inner_find_parent_microblock_stream( + blocks_conn, + &staging_block.anchored_block_hash, + &staging_block.parent_anchored_block_hash, + &staging_block.parent_consensus_hash, + &staging_block.parent_microblock_hash, + staging_block.parent_microblock_seq, + ) + } + + /// Allow `find_parent_microblock_stream()` to be called without `StagingBlock` + pub fn inner_find_parent_microblock_stream( + blocks_conn: &DBConn, + anchored_block_hash: &BlockHeaderHash, + parent_anchored_block_hash: &BlockHeaderHash, + parent_consensus_hash: &ConsensusHash, + parent_microblock_hash: &BlockHeaderHash, + parent_microblock_seq: u16, + ) -> Result>, Error> { + if *parent_microblock_hash == EMPTY_MICROBLOCK_PARENT_HASH && parent_microblock_seq == 0 { // no parent microblocks, ever return Ok(Some(vec![])); } @@ -3726,9 +3743,9 @@ impl StacksChainState { // find the microblock stream fork that this block confirms match StacksChainState::load_microblock_stream_fork( blocks_conn, - &staging_block.parent_consensus_hash, - &staging_block.parent_anchored_block_hash, - &staging_block.parent_microblock_hash, + parent_consensus_hash, + parent_anchored_block_hash, + parent_microblock_hash, )? { Some(microblocks) => { return Ok(Some(microblocks)); @@ -3736,10 +3753,7 @@ impl StacksChainState { None => { // parent microblocks haven't arrived yet, or there are none debug!( - "No parent microblock stream for {}: expected a stream with tail {},{}", - staging_block.anchored_block_hash, - staging_block.parent_microblock_hash, - staging_block.parent_microblock_seq + "No parent microblock stream for {anchored_block_hash}: expected a stream with tail {parent_microblock_hash},{parent_microblock_seq}", ); return Ok(None); } diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index c41f8b5055..4cf3e1e65e 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -73,6 +73,8 @@ use crate::net::Error as net_error; pub struct AssembledAnchorBlock { /// Consensus hash of the parent Stacks block pub parent_consensus_hash: ConsensusHash, + /// Consensus hash this Stacks block + pub consensus_hash: ConsensusHash, /// Burnchain tip's block hash when we finished mining pub my_burn_hash: BurnchainHeaderHash, /// Burnchain tip's block height when we finished mining diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 16a1319db2..fbf64655d2 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -26,6 +26,9 @@ extern crate stacks_common; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; +use blockstack_lib::clarity_vm::clarity::ClarityInstance; +use clarity::types::chainstate::SortitionId; +use db::ChainstateTx; use regex::Regex; use stacks_common::types::MempoolCollectionBehavior; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] @@ -52,7 +55,7 @@ use blockstack_lib::burnchains::{ Address, Burnchain, PoxConstants, Txid, BLOCKSTACK_MAGIC_MAINNET, }; use blockstack_lib::chainstate::burn::db::sortdb::{ - get_block_commit_by_txid, SortitionDB, SortitionHandle, + get_block_commit_by_txid, SortitionDB, SortitionHandle, SortitionHandleContext, }; use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; @@ -77,7 +80,7 @@ use blockstack_lib::net::db::LocalPeer; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; use blockstack_lib::net::StacksMessage; -use blockstack_lib::util_lib::db::sqlite_open; +use blockstack_lib::util_lib::db::{sqlite_open, IndexDBTx}; use blockstack_lib::util_lib::strings::UrlString; use blockstack_lib::{clarity_cli, util_lib}; use libstackerdb::StackerDBChunkData; @@ -882,20 +885,20 @@ simulating a miner. let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); - eprintln!(" {n} "); - eprintln!(" {n} prefix "); - eprintln!(" {n} index-range "); - eprintln!(" {n} range "); - eprintln!(" {n} "); + eprintln!(" {n} "); + eprintln!(" {n} prefix "); + eprintln!(" {n} index-range "); + eprintln!(" {n} range "); + eprintln!(" {n} "); process::exit(1); }; if argv.len() < 2 { print_help_and_exit(); } let start = Instant::now(); - let stacks_path = &argv[2]; + let db_path = &argv[2]; let mode = argv.get(3).map(String::as_str); - let staging_blocks_db_path = format!("{stacks_path}/mainnet/chainstate/vm/index.sqlite"); + let staging_blocks_db_path = format!("{db_path}/mainnet/chainstate/vm/index.sqlite"); let conn = Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY) .unwrap(); @@ -913,7 +916,7 @@ simulating a miner. let arg4 = argv[4] .parse::() .expect(" not a valid u64"); - let arg5 = argv[5].parse::().expect(" not a valid u64"); + let arg5 = argv[5].parse::().expect(" not a valid u64"); let start = arg4.saturating_sub(1); let blocks = arg5.saturating_sub(arg4); format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") @@ -922,7 +925,7 @@ simulating a miner. let start = argv[4] .parse::() .expect(" not a valid u64"); - let end = argv[5].parse::().expect(" not a valid u64"); + let end = argv[5].parse::().expect(" not a valid u64"); let blocks = end.saturating_sub(start); format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") } @@ -949,7 +952,7 @@ simulating a miner. if i % 100 == 0 { println!("Checked {i}..."); } - replay_block(stacks_path, index_block_hash); + replay_staging_block(db_path, index_block_hash); } println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); process::exit(0); @@ -1590,11 +1593,94 @@ simulating a miner. process::exit(0); } -fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { - let index_block_hash = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); - let chain_state_path = format!("{stacks_path}/mainnet/chainstate/"); - let sort_db_path = format!("{stacks_path}/mainnet/burnchain/sortition"); - let burn_db_path = format!("{stacks_path}/mainnet/burnchain/burnchain.sqlite"); +fn replay_mock_mining(argv: Vec) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + process::exit(1); + }; + + // Process CLI args + let db_path = argv.get(2).unwrap_or_else(|| print_help_and_exit()); + + let blocks_path = argv + .get(3) + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| panic!("Not a valid path: {e}")) + .unwrap_or_else(|| print_help_and_exit()); + + // Validate directory path + if !blocks_path.is_dir() { + panic!("{blocks_path:?} is not a valid directory"); + } + + // Read entries in directory + let dir_entries = blocks_path + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + return None; + }; + let Some(m) = cap.get(0) else { + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = blocks_path.join(filename); + let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + debug!("Replaying block from {filepath:?}"; + "block_height" => bh, + "block" => ?block + ); + replay_mock_mined_block(&db_path, block); + } +} + +/// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate +fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { + let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); + let chain_state_path = format!("{db_path}/mainnet/chainstate/"); + let sort_db_path = format!("{db_path}/mainnet/burnchain/sortition"); + let burn_db_path = format!("{db_path}/mainnet/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); let (mut chainstate, _) = @@ -1611,14 +1697,14 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { true, ) .unwrap(); - let mut sort_tx = sortdb.tx_begin_at_tip(); + let sort_tx = sortdb.tx_begin_at_tip(); let blocks_path = chainstate.blocks_path.clone(); let (mut chainstate_tx, clarity_instance) = chainstate .chainstate_tx_begin() .expect("Failed to start chainstate tx"); let mut next_staging_block = - StacksChainState::load_staging_block_info(&chainstate_tx.tx, &index_block_hash) + StacksChainState::load_staging_block_info(&chainstate_tx.tx, &block_id) .expect("Failed to load staging block data") .expect("No such index block hash in block database"); @@ -1630,21 +1716,139 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { .unwrap() .unwrap_or_default(); - let Some(next_microblocks) = - StacksChainState::find_parent_microblock_stream(&chainstate_tx.tx, &next_staging_block) - .unwrap() + let Some(parent_header_info) = + StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() else { - println!("No microblock stream found for {index_block_hash_hex}"); + println!("Failed to load parent head info for block: {index_block_hash_hex}"); + return; + }; + + let block = + StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); + let block_size = next_staging_block.block_data.len() as u64; + + replay_block( + sort_tx, + chainstate_tx, + clarity_instance, + &burnchain_blocks_db, + &parent_header_info, + &next_staging_block.parent_microblock_hash, + next_staging_block.parent_microblock_seq, + &block_id, + &block, + block_size, + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, + next_staging_block.commit_burn, + next_staging_block.sortition_burn, + ); +} + +/// Process a mock mined block and call `replay_block()` to validate +fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock) { + let chain_state_path = format!("{db_path}/mainnet/chainstate/"); + let sort_db_path = format!("{db_path}/mainnet/burnchain/sortition"); + let burn_db_path = format!("{db_path}/mainnet/burnchain/burnchain.sqlite"); + let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + + let (mut chainstate, _) = + StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + + let mut sortdb = SortitionDB::connect( + &sort_db_path, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), + BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), + STACKS_EPOCHS_MAINNET.as_ref(), + PoxConstants::mainnet_default(), + None, + true, + ) + .unwrap(); + let sort_tx = sortdb.tx_begin_at_tip(); + + let (mut chainstate_tx, clarity_instance) = chainstate + .chainstate_tx_begin() + .expect("Failed to start chainstate tx"); + + let block_consensus_hash = &block.consensus_hash; + let block_hash = block.anchored_block.block_hash(); + let block_id = StacksBlockId::new(block_consensus_hash, &block_hash); + let block_size = block + .anchored_block + .block_size() + .map(u64::try_from) + .unwrap_or_else(|e| panic!("Error serializing block {block_hash}: {e}")) + .expect("u64 overflow"); + + let Some(parent_header_info) = StacksChainState::get_anchored_block_header_info( + &mut chainstate_tx, + &block.parent_consensus_hash, + &block.anchored_block.header.parent_block, + ) + .unwrap() else { + println!("Failed to load parent head info for block: {block_hash}"); + return; + }; + + replay_block( + sort_tx, + chainstate_tx, + clarity_instance, + &burnchain_blocks_db, + &parent_header_info, + &block.anchored_block.header.parent_microblock, + block.anchored_block.header.parent_microblock_sequence, + &block_id, + &block.anchored_block, + block_size, + block_consensus_hash, + &block_hash, + // I think the burn is used for miner rewards but not necessary for validation + 0, + 0, + ); +} + +/// Validate a block against chainstate +fn replay_block( + mut sort_tx: IndexDBTx, + mut chainstate_tx: ChainstateTx, + clarity_instance: &mut ClarityInstance, + burnchain_blocks_db: &BurnchainDB, + parent_header_info: &StacksHeaderInfo, + parent_microblock_hash: &BlockHeaderHash, + parent_microblock_seq: u16, + block_id: &StacksBlockId, + block: &StacksBlock, + block_size: u64, + block_consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + block_commit_burn: u64, + block_sortition_burn: u64, +) { + let parent_block_header = match &parent_header_info.anchored_header { + StacksBlockHeaderTypes::Epoch2(bh) => bh, + StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"), + }; + let parent_block_hash = parent_block_header.block_hash(); + + let Some(next_microblocks) = StacksChainState::inner_find_parent_microblock_stream( + &chainstate_tx.tx, + &block_hash, + &parent_block_hash, + &parent_header_info.consensus_hash, + parent_microblock_hash, + parent_microblock_seq, + ) + .unwrap() else { + println!("No microblock stream found for {block_id}"); return; }; let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) = - match SortitionDB::get_block_snapshot_consensus( - &sort_tx, - &next_staging_block.consensus_hash, - ) - .unwrap() - { + match SortitionDB::get_block_snapshot_consensus(&sort_tx, &block_consensus_hash).unwrap() { Some(sn) => ( sn.burn_header_hash, sn.block_height as u32, @@ -1653,42 +1857,19 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { ), None => { // shouldn't happen - panic!( - "CORRUPTION: staging block {}/{} does not correspond to a burn block", - &next_staging_block.consensus_hash, &next_staging_block.anchored_block_hash - ); + panic!("CORRUPTION: staging block {block_consensus_hash}/{block_hash} does not correspond to a burn block"); } }; info!( "Process block {}/{} = {} in burn block {}, parent microblock {}", - next_staging_block.consensus_hash, - next_staging_block.anchored_block_hash, - &index_block_hash, - &burn_header_hash, - &next_staging_block.parent_microblock_hash, + block_consensus_hash, block_hash, &block_id, &burn_header_hash, parent_microblock_hash, ); - let Some(parent_header_info) = - StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() - else { - println!("Failed to load parent head info for block: {index_block_hash_hex}"); - return; - }; - - let block = - StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); - let block_size = next_staging_block.block_data.len() as u64; - - let parent_block_header = match &parent_header_info.anchored_header { - StacksBlockHeaderTypes::Epoch2(bh) => bh, - StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"), - }; - if !StacksChainState::check_block_attachment(&parent_block_header, &block.header) { let msg = format!( "Invalid stacks block {}/{} -- does not attach to parent {}/{}", - &next_staging_block.consensus_hash, + &block_consensus_hash, block.block_hash(), parent_block_header.block_hash(), &parent_header_info.consensus_hash @@ -1701,9 +1882,9 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { // block's parent to this block. let next_microblocks = StacksChainState::extract_connecting_microblocks( &parent_header_info, - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, - &block, + &block_consensus_hash, + &block_hash, + block, next_microblocks, ) .unwrap(); @@ -1717,20 +1898,14 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { ) } }; - assert_eq!( - next_staging_block.parent_microblock_hash, - last_microblock_hash - ); - assert_eq!( - next_staging_block.parent_microblock_seq, - last_microblock_seq - ); + assert_eq!(*parent_microblock_hash, last_microblock_hash); + assert_eq!(parent_microblock_seq, last_microblock_seq); let block_am = StacksChainState::find_stacks_tip_affirmation_map( - &burnchain_blocks_db, + burnchain_blocks_db, sort_tx.tx(), - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, + block_consensus_hash, + block_hash, ) .unwrap(); @@ -1742,23 +1917,23 @@ fn replay_block(stacks_path: &str, index_block_hash_hex: &str) { &mut sort_tx, &pox_constants, &parent_header_info, - &next_staging_block.consensus_hash, + block_consensus_hash, &burn_header_hash, burn_header_height, burn_header_timestamp, &block, block_size, &next_microblocks, - next_staging_block.commit_burn, - next_staging_block.sortition_burn, + block_commit_burn, + block_sortition_burn, block_am.weight(), true, ) { Ok((_receipt, _, _)) => { - info!("Block processed successfully! block = {index_block_hash}"); + info!("Block processed successfully! block = {block_id}"); } Err(e) => { - println!("Failed processing block! block = {index_block_hash}, error = {e:?}"); + println!("Failed processing block! block = {block_id}, error = {e:?}"); process::exit(1); } }; @@ -1984,85 +2159,3 @@ fn analyze_sortition_mev(argv: Vec) { process::exit(0); } - -fn replay_mock_mining(argv: Vec) { - let print_help_and_exit = || -> ! { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - process::exit(1); - }; - - // Process CLI args - let chainstate_path = argv.get(2).unwrap_or_else(|| print_help_and_exit()); - - let blocks_path = argv - .get(3) - .map(PathBuf::from) - .map(fs::canonicalize) - .transpose() - .unwrap_or_else(|e| panic!("Not a valid path: {e}")) - .unwrap_or_else(|| print_help_and_exit()); - - // Validate directory path - if !blocks_path.is_dir() { - panic!("{blocks_path:?} is not a valid directory"); - } - - // Read entries in directory - let dir_entries = blocks_path - .read_dir() - .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) - .filter_map(|e| e.ok()); - - // Get filenames, filtering out anything that isn't a regular file - let filenames = dir_entries.filter_map(|e| match e.file_type() { - Ok(t) if t.is_file() => e.file_name().into_string().ok(), - _ => None, - }); - - // Get vec of (block_height, filename), to prepare for sorting - // - // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, - // but that requires reading all files - let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); - let mut indexed_files = filenames - .filter_map(|filename| { - // Use regex to extract block number from filename - let Some(cap) = re.captures(&filename) else { - return None; - }; - let Some(m) = cap.get(0) else { - return None; - }; - let Ok(bh) = m.as_str().parse::() else { - return None; - }; - Some((bh, filename)) - }) - .collect::>(); - - // Sort by block height - indexed_files.sort_by_key(|(bh, _)| *bh); - - if indexed_files.is_empty() { - panic!("No block files found"); - } - - info!( - "Replaying {} blocks starting at {}", - indexed_files.len(), - indexed_files[0].0 - ); - - for (bh, filename) in indexed_files { - let filepath = blocks_path.join(filename); - let block = AssembledAnchorBlock::deserialize_from_file(&filepath) - .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); - debug!("Replaying block from {filepath:?}"; - "block_height" => bh, - "block" => ?block - ); - // TODO: Actually replay block - } -} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c6be468a4d..fa9cc4ad35 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2556,6 +2556,7 @@ impl BlockMinerThread { let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); let assembled_block = AssembledAnchorBlock { parent_consensus_hash: parent_block_info.parent_consensus_hash, + consensus_hash: cur_burn_chain_tip.consensus_hash, my_burn_hash: cur_burn_chain_tip.burn_header_hash, my_block_height: cur_burn_chain_tip.block_height, orig_burn_hash: self.burn_block.burn_header_hash, From 29ee7ed425b27bcf076625b25129ef88a596a7d9 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 8 Aug 2024 16:39:15 -0400 Subject: [PATCH 0775/1400] test: Add partial integration test for mock miner replay --- .github/workflows/bitcoin-tests.yml | 1 + stacks-common/src/types/chainstate.rs | 2 +- stackslib/src/chainstate/stacks/miner.rs | 4 +- testnet/stacks-node/src/neon_node.rs | 86 +++++---- .../src/tests/neon_integrations.rs | 179 +++++++++++++++++- 5 files changed, 229 insertions(+), 43 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e14934558a..3fde40ae47 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -72,6 +72,7 @@ jobs: - tests::neon_integrations::confirm_unparsed_ongoing_ops - tests::neon_integrations::min_txs - tests::neon_integrations::vote_for_aggregate_key_burn_op_test + - tests::neon_integrations::mock_miner_replay - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 47d6c3c499..c5208d02f9 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -30,11 +30,11 @@ impl_byte_array_serde!(TrieHash); pub const TRIEHASH_ENCODED_SIZE: usize = 32; -#[derive(Serialize, Deserialize)] pub struct BurnchainHeaderHash(pub [u8; 32]); impl_array_newtype!(BurnchainHeaderHash, u8, 32); impl_array_hexstring_fmt!(BurnchainHeaderHash); impl_byte_array_newtype!(BurnchainHeaderHash, u8, 32); +impl_byte_array_serde!(BurnchainHeaderHash); pub struct BlockHeaderHash(pub [u8; 32]); impl_array_newtype!(BlockHeaderHash, u8, 32); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 4cf3e1e65e..f0e4c96307 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -76,9 +76,9 @@ pub struct AssembledAnchorBlock { /// Consensus hash this Stacks block pub consensus_hash: ConsensusHash, /// Burnchain tip's block hash when we finished mining - pub my_burn_hash: BurnchainHeaderHash, + pub burn_hash: BurnchainHeaderHash, /// Burnchain tip's block height when we finished mining - pub my_block_height: u64, + pub burn_block_height: u64, /// Burnchain tip's block hash when we started mining (could be different) pub orig_burn_hash: BurnchainHeaderHash, /// The block we produced diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index fa9cc4ad35..afef51f47c 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -140,7 +140,7 @@ use std::cmp; use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::io::{Read, Write}; +use std::io::{ErrorKind, Read, Write}; use std::net::SocketAddr; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; @@ -1123,7 +1123,7 @@ impl BlockMinerThread { ) -> Vec<&AssembledAnchorBlock> { let mut ret = vec![]; for (_, (assembled_block, _)) in last_mined_blocks.iter() { - if assembled_block.my_block_height >= burn_height { + if assembled_block.burn_block_height >= burn_height { ret.push(assembled_block); } } @@ -1633,7 +1633,7 @@ impl BlockMinerThread { &prev_block.anchored_block.block_hash(), &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, - &prev_block.my_burn_hash, + &prev_block.burn_hash, &prev_block.anchored_block.txs.len() ); max_txs = cmp::max(max_txs, prev_block.anchored_block.txs.len()); @@ -1645,7 +1645,7 @@ impl BlockMinerThread { continue; } if prev_block.parent_consensus_hash == *parent_consensus_hash - && prev_block.my_burn_hash == self.burn_block.burn_header_hash + && prev_block.burn_hash == self.burn_block.burn_header_hash && prev_block.anchored_block.header.parent_block == stacks_parent_header.anchored_header.block_hash() { @@ -1677,7 +1677,7 @@ impl BlockMinerThread { // already have. info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); return None; } @@ -1688,7 +1688,7 @@ impl BlockMinerThread { // fee minus the old BTC fee info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); best_attempt = cmp::max(best_attempt, prev_block.attempt); } @@ -1697,20 +1697,20 @@ impl BlockMinerThread { // no microblock stream to confirm, and the stacks tip hasn't changed info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); return None; } } } else { - if self.burn_block.burn_header_hash == prev_block.my_burn_hash { + if self.burn_block.burn_header_hash == prev_block.burn_hash { // only try and re-mine if there was no sortition since the last chain tip info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", - parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.my_burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); + parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); best_attempt = cmp::max(best_attempt, prev_block.attempt); } else { info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.my_burn_hash); + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); } } } @@ -2554,34 +2554,48 @@ impl BlockMinerThread { } = self.config.get_node_config(false); let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); + if res.is_none() { + self.failed_to_submit_last_attempt = true; + if !mock_mining { + warn!("Relayer: Failed to submit Bitcoin transaction"); + return None; + } + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + } else { + self.failed_to_submit_last_attempt = false; + } + let assembled_block = AssembledAnchorBlock { parent_consensus_hash: parent_block_info.parent_consensus_hash, consensus_hash: cur_burn_chain_tip.consensus_hash, - my_burn_hash: cur_burn_chain_tip.burn_header_hash, - my_block_height: cur_burn_chain_tip.block_height, + burn_hash: cur_burn_chain_tip.burn_header_hash, + burn_block_height: cur_burn_chain_tip.block_height, orig_burn_hash: self.burn_block.burn_header_hash, anchored_block, attempt, tenure_begin, }; - if res.is_none() { - self.failed_to_submit_last_attempt = true; - if mock_mining { - debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); - if let Some(dir) = mock_mining_output_dir { - let stacks_block_height = assembled_block.anchored_block.header.total_work.work; - let filename = format!("{stacks_block_height}.json"); - let filepath = dir.join(filename); - assembled_block - .serialize_to_file(&filepath) - .unwrap_or_else(|e| panic!("Failed to write to file '{filepath:?}': {e}")); - } - } else { - warn!("Relayer: Failed to submit Bitcoin transaction"); - return None; + + if mock_mining { + let stacks_block_height = assembled_block.anchored_block.header.total_work.work; + info!("Mock mined Stacks block {stacks_block_height}"); + if let Some(dir) = mock_mining_output_dir { + info!("Writing mock mined Stacks block {stacks_block_height} to file"); + fs::create_dir_all(&dir).unwrap_or_else(|e| match e.kind() { + ErrorKind::AlreadyExists => { /* This is fine */ } + _ => error!("Failed to create directory '{dir:?}': {e}"), + }); + let filename = format!("{stacks_block_height}.json"); + let filepath = dir.join(filename); + assembled_block + .serialize_to_file(&filepath) + .unwrap_or_else(|e| match e.kind() { + ErrorKind::AlreadyExists => { + error!("Failed to overwrite file '{filepath:?}'") + } + _ => error!("Failed to write to file '{filepath:?}': {e}"), + }); } - } else { - self.failed_to_submit_last_attempt = false; } Some(MinerThreadResult::Block( @@ -3010,7 +3024,7 @@ impl RelayerThread { let AssembledAnchorBlock { parent_consensus_hash, anchored_block: mined_block, - my_burn_hash: mined_burn_hash, + burn_hash: mined_burn_hash, attempt: _, .. } = last_mined_block_data; @@ -3423,16 +3437,16 @@ impl RelayerThread { fn clear_stale_mined_blocks(burn_height: u64, last_mined_blocks: MinedBlocks) -> MinedBlocks { let mut ret = HashMap::new(); for (stacks_bhh, (assembled_block, microblock_privkey)) in last_mined_blocks.into_iter() { - if assembled_block.my_block_height < burn_height { + if assembled_block.burn_block_height < burn_height { debug!( "Stale mined block: {} (as of {},{})", - &stacks_bhh, &assembled_block.my_burn_hash, assembled_block.my_block_height + &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height ); continue; } debug!( "Mined block in-flight: {} (as of {},{})", - &stacks_bhh, &assembled_block.my_burn_hash, assembled_block.my_block_height + &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height ); ret.insert(stacks_bhh, (assembled_block, microblock_privkey)); } @@ -3760,7 +3774,7 @@ impl RelayerThread { ) => { // finished mining a block if BlockMinerThread::find_inflight_mined_blocks( - last_mined_block.my_block_height, + last_mined_block.burn_block_height, &self.last_mined_blocks, ) .len() @@ -3769,7 +3783,7 @@ impl RelayerThread { // first time we've mined a block in this burnchain block debug!( "Bump block processed for burnchain block {}", - &last_mined_block.my_block_height + &last_mined_block.burn_block_height ); self.globals.counters.bump_blocks_processed(); } @@ -3779,7 +3793,7 @@ impl RelayerThread { &last_mined_block.anchored_block.block_hash() ); - let bhh = last_mined_block.my_burn_hash.clone(); + let bhh = last_mined_block.burn_hash.clone(); let orig_bhh = last_mined_block.orig_burn_hash.clone(); let tenure_begin = last_mined_block.tenure_begin; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ac6a3ea978..4474a9e991 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1,5 +1,5 @@ use std::collections::{HashMap, HashSet}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; @@ -12389,6 +12389,7 @@ fn next_block_and_wait_all( btc_controller: &mut BitcoinRegtestController, miner_blocks_processed: &Arc, follower_blocks_processed: &[&Arc], + timeout: Option, ) -> bool { let followers_current: Vec<_> = follower_blocks_processed .iter() @@ -12400,17 +12401,26 @@ fn next_block_and_wait_all( } // wait for followers to catch up + let timer = Instant::now(); loop { let finished = follower_blocks_processed .iter() .zip(followers_current.iter()) - .map(|(blocks_processed, current)| blocks_processed.load(Ordering::SeqCst) <= *current) - .fold(true, |acc, loaded| acc && loaded); + .map(|(blocks_processed, start_count)| { + blocks_processed.load(Ordering::SeqCst) > *start_count + }) + .all(|b| b); if finished { break; } + if let Some(t) = timeout { + if timer.elapsed() > t { + panic!("next_block_and_wait_all() timed out after {t:?}") + } + } + thread::sleep(Duration::from_millis(100)); } @@ -12425,6 +12435,7 @@ fn bitcoin_reorg_flap_with_follower() { } let (conf, _miner_account) = neon_integration_test_conf(); + let timeout = None; let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -12485,13 +12496,19 @@ fn bitcoin_reorg_flap_with_follower() { eprintln!("Follower bootup complete!"); // first block wakes up the run loop - next_block_and_wait_all(&mut btc_regtest_controller, &miner_blocks_processed, &[]); + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[], + timeout, + ); // first block will hold our VRF registration next_block_and_wait_all( &mut btc_regtest_controller, &miner_blocks_processed, &[&follower_blocks_processed], + timeout, ); let mut miner_sort_height = miner_channel.get_sortitions_processed(); @@ -12506,6 +12523,7 @@ fn bitcoin_reorg_flap_with_follower() { &mut btc_regtest_controller, &miner_blocks_processed, &[&follower_blocks_processed], + timeout, ); miner_sort_height = miner_channel.get_sortitions_processed(); follower_sort_height = miner_channel.get_sortitions_processed(); @@ -12583,3 +12601,156 @@ fn bitcoin_reorg_flap_with_follower() { miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } + +/// Tests the following: +/// - Mock miner output to file +/// - Test replay of mock mined blocks using `stacks-inspect replay-mock-mining`` +#[test] +#[ignore] +fn mock_miner_replay() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let timeout = Some(Duration::from_secs(30)); + // Had to add this so that mock miner makes an attempt on EVERY block + let block_gap = Duration::from_secs(1); + + let test_dir = PathBuf::from("/tmp/stacks-integration-test-mock_miner_replay"); + _ = fs::remove_dir_all(&test_dir); + + let (conf, _miner_account) = neon_integration_test_conf(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut miner_run_loop = neon::RunLoop::new(conf.clone()); + let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc(); + let miner_channel = miner_run_loop.get_coordinator_channel().unwrap(); + + let mut follower_conf = conf.clone(); + follower_conf.events_observers.clear(); + follower_conf.node.mock_mining = true; + follower_conf.node.mock_mining_output_dir = Some(test_dir.join("mock-miner-output")); + follower_conf.node.working_dir = format!("{}-follower", &conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + let mut rng = rand::thread_rng(); + + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b { + break (a, b); + } + }; + + let localhost = "127.0.0.1"; + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); + + thread::spawn(move || miner_run_loop.start(None, 0)); + wait_for_runloop(&miner_blocks_processed); + + // figure out the started node's port + let node_info = get_chain_info(&conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + conf.node.p2p_bind + ), + CHAIN_ID_TESTNET, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = neon::RunLoop::new(follower_conf.clone()); + let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc(); + let follower_channel = follower_run_loop.get_coordinator_channel().unwrap(); + + let miner_blocks_processed_start = miner_channel.get_stacks_blocks_processed(); + let follower_blocks_processed_start = follower_channel.get_stacks_blocks_processed(); + + thread::spawn(move || follower_run_loop.start(None, 0)); + wait_for_runloop(&follower_blocks_processed); + + eprintln!("Follower bootup complete!"); + + // first block wakes up the run loop + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[], + timeout, + ); + + thread::sleep(block_gap); + + // first block will hold our VRF registration + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[&follower_blocks_processed], + timeout, + ); + + thread::sleep(block_gap); + + // Third block will be the first mined Stacks block. + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[&follower_blocks_processed], + timeout, + ); + + thread::sleep(block_gap); + + // ---------- Setup finished, start test ---------- + + // Mine some blocks for mock miner output + for _ in 0..10 { + next_block_and_wait_all( + &mut btc_regtest_controller, + &miner_blocks_processed, + &[&follower_blocks_processed], + timeout, + ); + thread::sleep(block_gap); + } + + let miner_blocks_processed_end = miner_channel.get_stacks_blocks_processed(); + let follower_blocks_processed_end = follower_channel.get_stacks_blocks_processed(); + + let blocks_dir = follower_conf.node.mock_mining_output_dir.clone().unwrap(); + let file_count = follower_conf + .node + .mock_mining_output_dir + .unwrap() + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read directory: {e}")) + .count(); + + // Check that expected output files exist + assert!(test_dir.is_dir()); + assert!(blocks_dir.is_dir()); + assert_eq!(file_count, 12); + assert_eq!(miner_blocks_processed_end, follower_blocks_processed_end); + + // ---------- Test finished, clean up ---------- + + btcd_controller.stop_bitcoind().unwrap(); + miner_channel.stop_chains_coordinator(); + follower_channel.stop_chains_coordinator(); +} From 674cb5f3aa7c9d544da43f41528a6d2bf94ba94f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 12 Aug 2024 23:06:56 -0400 Subject: [PATCH 0776/1400] refactor: Move parts of stackslib CLI out of main.rs for integration testing --- stackslib/src/cli.rs | 471 ++++++++++++++++++ stackslib/src/lib.rs | 9 +- stackslib/src/main.rs | 466 +---------------- .../src/tests/neon_integrations.rs | 22 +- 4 files changed, 508 insertions(+), 460 deletions(-) create mode 100644 stackslib/src/cli.rs diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs new file mode 100644 index 0000000000..27efb44bb9 --- /dev/null +++ b/stackslib/src/cli.rs @@ -0,0 +1,471 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Subcommands used by `stacks-inspect` binary + +use std::path::PathBuf; +use std::time::Instant; +use std::{env, fs, io, process, thread}; + +use clarity::types::chainstate::SortitionId; +use db::ChainstateTx; +use regex::Regex; +use rusqlite::{Connection, OpenFlags}; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; +use stacks_common::types::sqlite::NO_PARAMS; + +use crate::burnchains::db::BurnchainDB; +use crate::burnchains::PoxConstants; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleContext}; +use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use crate::chainstate::stacks::db::blocks::StagingBlock; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; +use crate::chainstate::stacks::miner::*; +use crate::chainstate::stacks::*; +use crate::clarity_vm::clarity::ClarityInstance; +use crate::core::*; +use crate::util_lib::db::IndexDBTx; + +/// Replay blocks from chainstate database +/// Takes args in CLI format: ` [args...]` +/// Terminates on error using `process::exit()` +pub fn command_replay_block(argv: &[String]) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + eprintln!(" {n} prefix "); + eprintln!(" {n} index-range "); + eprintln!(" {n} range "); + eprintln!(" {n} "); + process::exit(1); + }; + let start = Instant::now(); + let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit()); + let mode = argv.get(2).map(String::as_str); + let staging_blocks_db_path = format!("{db_path}/chainstate/vm/index.sqlite"); + let conn = + Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY) + .unwrap(); + + let query = match mode { + Some("prefix") => format!( + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", + argv[3] + ), + Some("first") => format!( + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", + argv[3] + ), + Some("range") => { + let arg4 = argv[3] + .parse::() + .expect(" not a valid u64"); + let arg5 = argv[4].parse::().expect(" not a valid u64"); + let start = arg4.saturating_sub(1); + let blocks = arg5.saturating_sub(arg4); + format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") + } + Some("index-range") => { + let start = argv[3] + .parse::() + .expect(" not a valid u64"); + let end = argv[4].parse::().expect(" not a valid u64"); + let blocks = end.saturating_sub(start); + format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") + } + Some("last") => format!( + "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", + argv[3] + ), + Some(_) => print_help_and_exit(), + // Default to ALL blocks + None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(), + }; + + let mut stmt = conn.prepare(&query).unwrap(); + let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); + + let mut index_block_hashes: Vec = vec![]; + while let Ok(Some(row)) = hashes_set.next() { + index_block_hashes.push(row.get(0).unwrap()); + } + + let total = index_block_hashes.len(); + println!("Will check {total} blocks"); + for (i, index_block_hash) in index_block_hashes.iter().enumerate() { + if i % 100 == 0 { + println!("Checked {i}..."); + } + replay_staging_block(db_path, index_block_hash); + } + println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); +} + +/// Replay mock mined blocks from JSON files +/// Takes args in CLI format: ` [args...]` +/// Terminates on error using `process::exit()` +pub fn command_replay_mock_mining(argv: &[String]) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + process::exit(1); + }; + + // Process CLI args + let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit()); + + let blocks_path = argv + .get(2) + .map(PathBuf::from) + .map(fs::canonicalize) + .transpose() + .unwrap_or_else(|e| panic!("Not a valid path: {e}")) + .unwrap_or_else(|| print_help_and_exit()); + + // Validate directory path + if !blocks_path.is_dir() { + panic!("{blocks_path:?} is not a valid directory"); + } + + // Read entries in directory + let dir_entries = blocks_path + .read_dir() + .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) + .filter_map(|e| e.ok()); + + // Get filenames, filtering out anything that isn't a regular file + let filenames = dir_entries.filter_map(|e| match e.file_type() { + Ok(t) if t.is_file() => e.file_name().into_string().ok(), + _ => None, + }); + + // Get vec of (block_height, filename), to prepare for sorting + // + // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, + // but that requires reading all files + let re = Regex::new(r"^([0-9]+)\.json$").unwrap(); + let mut indexed_files = filenames + .filter_map(|filename| { + // Use regex to extract block number from filename + let Some(cap) = re.captures(&filename) else { + debug!("Regex capture failed on {filename}"); + return None; + }; + // cap.get(0) return entire filename + // cap.get(1) return block number + let i = 1; + let Some(m) = cap.get(i) else { + debug!("cap.get({i}) failed on {filename} match"); + return None; + }; + let Ok(bh) = m.as_str().parse::() else { + debug!("parse::() failed on '{}'", m.as_str()); + return None; + }; + Some((bh, filename)) + }) + .collect::>(); + + // Sort by block height + indexed_files.sort_by_key(|(bh, _)| *bh); + + if indexed_files.is_empty() { + panic!("No block files found in {blocks_path:?}"); + } + + info!( + "Replaying {} blocks starting at {}", + indexed_files.len(), + indexed_files[0].0 + ); + + for (bh, filename) in indexed_files { + let filepath = blocks_path.join(filename); + let block = AssembledAnchorBlock::deserialize_from_file(&filepath) + .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); + info!("Replaying block from {filepath:?}"; + "block_height" => bh, + "block" => ?block + ); + replay_mock_mined_block(&db_path, block); + } +} + +/// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate +fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { + let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); + let chain_state_path = format!("{db_path}/chainstate/"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); + let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + + let (mut chainstate, _) = + StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + + let mut sortdb = SortitionDB::connect( + &sort_db_path, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), + BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), + STACKS_EPOCHS_MAINNET.as_ref(), + PoxConstants::mainnet_default(), + None, + true, + ) + .unwrap(); + let sort_tx = sortdb.tx_begin_at_tip(); + + let blocks_path = chainstate.blocks_path.clone(); + let (mut chainstate_tx, clarity_instance) = chainstate + .chainstate_tx_begin() + .expect("Failed to start chainstate tx"); + let mut next_staging_block = + StacksChainState::load_staging_block_info(&chainstate_tx.tx, &block_id) + .expect("Failed to load staging block data") + .expect("No such index block hash in block database"); + + next_staging_block.block_data = StacksChainState::load_block_bytes( + &blocks_path, + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, + ) + .unwrap() + .unwrap_or_default(); + + let Some(parent_header_info) = + StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() + else { + println!("Failed to load parent head info for block: {index_block_hash_hex}"); + return; + }; + + let block = + StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); + let block_size = next_staging_block.block_data.len() as u64; + + replay_block( + sort_tx, + chainstate_tx, + clarity_instance, + &burnchain_blocks_db, + &parent_header_info, + &next_staging_block.parent_microblock_hash, + next_staging_block.parent_microblock_seq, + &block_id, + &block, + block_size, + &next_staging_block.consensus_hash, + &next_staging_block.anchored_block_hash, + next_staging_block.commit_burn, + next_staging_block.sortition_burn, + ); +} + +/// Process a mock mined block and call `replay_block()` to validate +fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock) { + let chain_state_path = format!("{db_path}/chainstate/"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); + let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + + let (mut chainstate, _) = + StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + + let mut sortdb = SortitionDB::connect( + &sort_db_path, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), + BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), + STACKS_EPOCHS_MAINNET.as_ref(), + PoxConstants::mainnet_default(), + None, + true, + ) + .unwrap(); + let sort_tx = sortdb.tx_begin_at_tip(); + + let (mut chainstate_tx, clarity_instance) = chainstate + .chainstate_tx_begin() + .expect("Failed to start chainstate tx"); + + let block_consensus_hash = &block.consensus_hash; + let block_hash = block.anchored_block.block_hash(); + let block_id = StacksBlockId::new(block_consensus_hash, &block_hash); + let block_size = block + .anchored_block + .block_size() + .map(u64::try_from) + .unwrap_or_else(|e| panic!("Error serializing block {block_hash}: {e}")) + .expect("u64 overflow"); + + let Some(parent_header_info) = StacksChainState::get_anchored_block_header_info( + &mut chainstate_tx, + &block.parent_consensus_hash, + &block.anchored_block.header.parent_block, + ) + .unwrap() else { + println!("Failed to load parent head info for block: {block_hash}"); + return; + }; + + replay_block( + sort_tx, + chainstate_tx, + clarity_instance, + &burnchain_blocks_db, + &parent_header_info, + &block.anchored_block.header.parent_microblock, + block.anchored_block.header.parent_microblock_sequence, + &block_id, + &block.anchored_block, + block_size, + block_consensus_hash, + &block_hash, + // I think the burn is used for miner rewards but not necessary for validation + 0, + 0, + ); +} + +/// Validate a block against chainstate +fn replay_block( + mut sort_tx: IndexDBTx, + mut chainstate_tx: ChainstateTx, + clarity_instance: &mut ClarityInstance, + burnchain_blocks_db: &BurnchainDB, + parent_header_info: &StacksHeaderInfo, + parent_microblock_hash: &BlockHeaderHash, + parent_microblock_seq: u16, + block_id: &StacksBlockId, + block: &StacksBlock, + block_size: u64, + block_consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + block_commit_burn: u64, + block_sortition_burn: u64, +) { + let parent_block_header = match &parent_header_info.anchored_header { + StacksBlockHeaderTypes::Epoch2(bh) => bh, + StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"), + }; + let parent_block_hash = parent_block_header.block_hash(); + + let Some(next_microblocks) = StacksChainState::inner_find_parent_microblock_stream( + &chainstate_tx.tx, + &block_hash, + &parent_block_hash, + &parent_header_info.consensus_hash, + parent_microblock_hash, + parent_microblock_seq, + ) + .unwrap() else { + println!("No microblock stream found for {block_id}"); + return; + }; + + let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) = + match SortitionDB::get_block_snapshot_consensus(&sort_tx, &block_consensus_hash).unwrap() { + Some(sn) => ( + sn.burn_header_hash, + sn.block_height as u32, + sn.burn_header_timestamp, + sn.winning_block_txid, + ), + None => { + // shouldn't happen + panic!("CORRUPTION: staging block {block_consensus_hash}/{block_hash} does not correspond to a burn block"); + } + }; + + info!( + "Process block {}/{} = {} in burn block {}, parent microblock {}", + block_consensus_hash, block_hash, &block_id, &burn_header_hash, parent_microblock_hash, + ); + + if !StacksChainState::check_block_attachment(&parent_block_header, &block.header) { + let msg = format!( + "Invalid stacks block {}/{} -- does not attach to parent {}/{}", + &block_consensus_hash, + block.block_hash(), + parent_block_header.block_hash(), + &parent_header_info.consensus_hash + ); + println!("{msg}"); + return; + } + + // validation check -- validate parent microblocks and find the ones that connect the + // block's parent to this block. + let next_microblocks = StacksChainState::extract_connecting_microblocks( + &parent_header_info, + &block_consensus_hash, + &block_hash, + block, + next_microblocks, + ) + .unwrap(); + let (last_microblock_hash, last_microblock_seq) = match next_microblocks.len() { + 0 => (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0), + _ => { + let l = next_microblocks.len(); + ( + next_microblocks[l - 1].block_hash(), + next_microblocks[l - 1].header.sequence, + ) + } + }; + assert_eq!(*parent_microblock_hash, last_microblock_hash); + assert_eq!(parent_microblock_seq, last_microblock_seq); + + let block_am = StacksChainState::find_stacks_tip_affirmation_map( + burnchain_blocks_db, + sort_tx.tx(), + block_consensus_hash, + block_hash, + ) + .unwrap(); + + let pox_constants = sort_tx.context.pox_constants.clone(); + + match StacksChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut sort_tx, + &pox_constants, + &parent_header_info, + block_consensus_hash, + &burn_header_hash, + burn_header_height, + burn_header_timestamp, + &block, + block_size, + &next_microblocks, + block_commit_burn, + block_sortition_burn, + block_am.weight(), + true, + ) { + Ok((_receipt, _, _)) => { + info!("Block processed successfully! block = {block_id}"); + } + Err(e) => { + println!("Failed processing block! block = {block_id}, error = {e:?}"); + process::exit(1); + } + }; +} diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index bd634cef64..31f97628a6 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -59,18 +59,15 @@ pub extern crate libstackerdb; pub mod chainstate; pub mod burnchains; - +pub mod clarity_cli; /// A high level library for interacting with the Clarity vm pub mod clarity_vm; +pub mod cli; pub mod core; +pub mod cost_estimates; pub mod deps; - pub mod monitoring; -pub mod cost_estimates; - -pub mod clarity_cli; - // set via _compile-time_ envars const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index fbf64655d2..479bfaa9b7 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused_imports)] #![allow(dead_code)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] @@ -23,14 +22,9 @@ #[macro_use] extern crate stacks_common; -#[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] +#[macro_use(slog_debug, slog_info, slog_warn)] extern crate slog; -use blockstack_lib::clarity_vm::clarity::ClarityInstance; -use clarity::types::chainstate::SortitionId; -use db::ChainstateTx; -use regex::Regex; -use stacks_common::types::MempoolCollectionBehavior; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; @@ -39,23 +33,16 @@ use tikv_jemallocator::Jemalloc; static GLOBAL: Jemalloc = Jemalloc; use std::collections::{BTreeMap, HashMap, HashSet}; -use std::fs::{File, OpenOptions}; +use std::fs::File; use std::io::prelude::*; use std::io::BufReader; -use std::path::PathBuf; -use std::time::Instant; use std::{env, fs, io, process, thread}; -use blockstack_lib::burnchains::bitcoin::indexer::{ - BitcoinIndexer, BitcoinIndexerConfig, BitcoinIndexerRuntime, -}; use blockstack_lib::burnchains::bitcoin::{spv, BitcoinNetworkType}; use blockstack_lib::burnchains::db::{BurnchainBlockData, BurnchainDB}; -use blockstack_lib::burnchains::{ - Address, Burnchain, PoxConstants, Txid, BLOCKSTACK_MAGIC_MAINNET, -}; +use blockstack_lib::burnchains::{Address, Burnchain, PoxConstants}; use blockstack_lib::chainstate::burn::db::sortdb::{ - get_block_commit_by_txid, SortitionDB, SortitionHandle, SortitionHandleContext, + get_block_commit_by_txid, SortitionDB, SortitionHandle, }; use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; @@ -63,7 +50,7 @@ use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewa use blockstack_lib::chainstate::nakamoto::NakamotoChainState; use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; use blockstack_lib::chainstate::stacks::db::{ - ChainStateBootData, StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo, + ChainStateBootData, StacksBlockHeaderTypes, StacksChainState, }; use blockstack_lib::chainstate::stacks::index::marf::{MARFOpenOpts, MarfConnection, MARF}; use blockstack_lib::chainstate::stacks::index::ClarityMarfTrieId; @@ -72,7 +59,6 @@ use blockstack_lib::chainstate::stacks::{StacksBlockHeader, *}; use blockstack_lib::clarity::vm::costs::ExecutionCost; use blockstack_lib::clarity::vm::types::StacksAddressExtensions; use blockstack_lib::clarity::vm::ClarityVersion; -use blockstack_lib::clarity_cli::vm_execute; use blockstack_lib::core::{MemPoolDB, *}; use blockstack_lib::cost_estimates::metrics::UnitMetric; use blockstack_lib::cost_estimates::UnitEstimator; @@ -80,24 +66,24 @@ use blockstack_lib::net::db::LocalPeer; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; use blockstack_lib::net::StacksMessage; -use blockstack_lib::util_lib::db::{sqlite_open, IndexDBTx}; +use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; -use blockstack_lib::{clarity_cli, util_lib}; +use blockstack_lib::{clarity_cli, cli}; use libstackerdb::StackerDBChunkData; -use rusqlite::types::ToSql; use rusqlite::{params, Connection, Error as SqliteError, OpenFlags}; use serde_json::{json, Value}; use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, PoxId, StacksAddress, StacksBlockId, + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; use stacks_common::types::net::PeerAddress; use stacks_common::types::sqlite::NO_PARAMS; +use stacks_common::types::MempoolCollectionBehavior; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; use stacks_common::util::retry::LogReader; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; -use stacks_common::util::{get_epoch_time_ms, log, sleep_ms}; +use stacks_common::util::{get_epoch_time_ms, sleep_ms}; #[cfg_attr(test, mutants::skip)] fn main() { @@ -881,83 +867,6 @@ simulating a miner. return; } - if argv[1] == "replay-block" { - let print_help_and_exit = || -> ! { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - eprintln!(" {n} prefix "); - eprintln!(" {n} index-range "); - eprintln!(" {n} range "); - eprintln!(" {n} "); - process::exit(1); - }; - if argv.len() < 2 { - print_help_and_exit(); - } - let start = Instant::now(); - let db_path = &argv[2]; - let mode = argv.get(3).map(String::as_str); - let staging_blocks_db_path = format!("{db_path}/mainnet/chainstate/vm/index.sqlite"); - let conn = - Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY) - .unwrap(); - - let query = match mode { - Some("prefix") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", - argv[4] - ), - Some("first") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", - argv[4] - ), - Some("range") => { - let arg4 = argv[4] - .parse::() - .expect(" not a valid u64"); - let arg5 = argv[5].parse::().expect(" not a valid u64"); - let start = arg4.saturating_sub(1); - let blocks = arg5.saturating_sub(arg4); - format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") - } - Some("index-range") => { - let start = argv[4] - .parse::() - .expect(" not a valid u64"); - let end = argv[5].parse::().expect(" not a valid u64"); - let blocks = end.saturating_sub(start); - format!("SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") - } - Some("last") => format!( - "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", - argv[4] - ), - Some(_) => print_help_and_exit(), - // Default to ALL blocks - None => "SELECT index_block_hash FROM staging_blocks WHERE orphaned = 0".into(), - }; - - let mut stmt = conn.prepare(&query).unwrap(); - let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); - - let mut index_block_hashes: Vec = vec![]; - while let Ok(Some(row)) = hashes_set.next() { - index_block_hashes.push(row.get(0).unwrap()); - } - - let total = index_block_hashes.len(); - println!("Will check {total} blocks"); - for (i, index_block_hash) in index_block_hashes.iter().enumerate() { - if i % 100 == 0 { - println!("Checked {i}..."); - } - replay_staging_block(db_path, index_block_hash); - } - println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); - process::exit(0); - } - if argv[1] == "deserialize-db" { if argv.len() < 4 { eprintln!("Usage: {} clarity_sqlite_db [byte-prefix]", &argv[0]); @@ -1345,8 +1254,13 @@ simulating a miner. return; } + if argv[1] == "replay-block" { + cli::command_replay_block(&argv[1..]); + process::exit(0); + } + if argv[1] == "replay-mock-mining" { - replay_mock_mining(argv); + cli::command_replay_mock_mining(&argv[1..]); process::exit(0); } @@ -1357,7 +1271,7 @@ simulating a miner. } #[cfg_attr(test, mutants::skip)] -fn tip_mine() { +pub fn tip_mine() { let argv: Vec = env::args().collect(); if argv.len() < 6 { eprintln!( @@ -1593,352 +1507,6 @@ simulating a miner. process::exit(0); } -fn replay_mock_mining(argv: Vec) { - let print_help_and_exit = || -> ! { - let n = &argv[0]; - eprintln!("Usage:"); - eprintln!(" {n} "); - process::exit(1); - }; - - // Process CLI args - let db_path = argv.get(2).unwrap_or_else(|| print_help_and_exit()); - - let blocks_path = argv - .get(3) - .map(PathBuf::from) - .map(fs::canonicalize) - .transpose() - .unwrap_or_else(|e| panic!("Not a valid path: {e}")) - .unwrap_or_else(|| print_help_and_exit()); - - // Validate directory path - if !blocks_path.is_dir() { - panic!("{blocks_path:?} is not a valid directory"); - } - - // Read entries in directory - let dir_entries = blocks_path - .read_dir() - .unwrap_or_else(|e| panic!("Failed to read {blocks_path:?}: {e}")) - .filter_map(|e| e.ok()); - - // Get filenames, filtering out anything that isn't a regular file - let filenames = dir_entries.filter_map(|e| match e.file_type() { - Ok(t) if t.is_file() => e.file_name().into_string().ok(), - _ => None, - }); - - // Get vec of (block_height, filename), to prepare for sorting - // - // NOTE: Trusting the filename is not ideal. We could sort on data read from the file, - // but that requires reading all files - let re = Regex::new(r"^([0-9]+\.json)$").unwrap(); - let mut indexed_files = filenames - .filter_map(|filename| { - // Use regex to extract block number from filename - let Some(cap) = re.captures(&filename) else { - return None; - }; - let Some(m) = cap.get(0) else { - return None; - }; - let Ok(bh) = m.as_str().parse::() else { - return None; - }; - Some((bh, filename)) - }) - .collect::>(); - - // Sort by block height - indexed_files.sort_by_key(|(bh, _)| *bh); - - if indexed_files.is_empty() { - panic!("No block files found"); - } - - info!( - "Replaying {} blocks starting at {}", - indexed_files.len(), - indexed_files[0].0 - ); - - for (bh, filename) in indexed_files { - let filepath = blocks_path.join(filename); - let block = AssembledAnchorBlock::deserialize_from_file(&filepath) - .unwrap_or_else(|e| panic!("Error reading block {bh} from file: {e}")); - debug!("Replaying block from {filepath:?}"; - "block_height" => bh, - "block" => ?block - ); - replay_mock_mined_block(&db_path, block); - } -} - -/// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { - let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); - let chain_state_path = format!("{db_path}/mainnet/chainstate/"); - let sort_db_path = format!("{db_path}/mainnet/burnchain/sortition"); - let burn_db_path = format!("{db_path}/mainnet/burnchain/burnchain.sqlite"); - let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - - let (mut chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); - - let mut sortdb = SortitionDB::connect( - &sort_db_path, - BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), - BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - STACKS_EPOCHS_MAINNET.as_ref(), - PoxConstants::mainnet_default(), - None, - true, - ) - .unwrap(); - let sort_tx = sortdb.tx_begin_at_tip(); - - let blocks_path = chainstate.blocks_path.clone(); - let (mut chainstate_tx, clarity_instance) = chainstate - .chainstate_tx_begin() - .expect("Failed to start chainstate tx"); - let mut next_staging_block = - StacksChainState::load_staging_block_info(&chainstate_tx.tx, &block_id) - .expect("Failed to load staging block data") - .expect("No such index block hash in block database"); - - next_staging_block.block_data = StacksChainState::load_block_bytes( - &blocks_path, - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, - ) - .unwrap() - .unwrap_or_default(); - - let Some(parent_header_info) = - StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block).unwrap() - else { - println!("Failed to load parent head info for block: {index_block_hash_hex}"); - return; - }; - - let block = - StacksChainState::extract_stacks_block(&next_staging_block).expect("Failed to get block"); - let block_size = next_staging_block.block_data.len() as u64; - - replay_block( - sort_tx, - chainstate_tx, - clarity_instance, - &burnchain_blocks_db, - &parent_header_info, - &next_staging_block.parent_microblock_hash, - next_staging_block.parent_microblock_seq, - &block_id, - &block, - block_size, - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, - next_staging_block.commit_burn, - next_staging_block.sortition_burn, - ); -} - -/// Process a mock mined block and call `replay_block()` to validate -fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock) { - let chain_state_path = format!("{db_path}/mainnet/chainstate/"); - let sort_db_path = format!("{db_path}/mainnet/burnchain/sortition"); - let burn_db_path = format!("{db_path}/mainnet/burnchain/burnchain.sqlite"); - let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); - - let (mut chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); - - let mut sortdb = SortitionDB::connect( - &sort_db_path, - BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), - BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - STACKS_EPOCHS_MAINNET.as_ref(), - PoxConstants::mainnet_default(), - None, - true, - ) - .unwrap(); - let sort_tx = sortdb.tx_begin_at_tip(); - - let (mut chainstate_tx, clarity_instance) = chainstate - .chainstate_tx_begin() - .expect("Failed to start chainstate tx"); - - let block_consensus_hash = &block.consensus_hash; - let block_hash = block.anchored_block.block_hash(); - let block_id = StacksBlockId::new(block_consensus_hash, &block_hash); - let block_size = block - .anchored_block - .block_size() - .map(u64::try_from) - .unwrap_or_else(|e| panic!("Error serializing block {block_hash}: {e}")) - .expect("u64 overflow"); - - let Some(parent_header_info) = StacksChainState::get_anchored_block_header_info( - &mut chainstate_tx, - &block.parent_consensus_hash, - &block.anchored_block.header.parent_block, - ) - .unwrap() else { - println!("Failed to load parent head info for block: {block_hash}"); - return; - }; - - replay_block( - sort_tx, - chainstate_tx, - clarity_instance, - &burnchain_blocks_db, - &parent_header_info, - &block.anchored_block.header.parent_microblock, - block.anchored_block.header.parent_microblock_sequence, - &block_id, - &block.anchored_block, - block_size, - block_consensus_hash, - &block_hash, - // I think the burn is used for miner rewards but not necessary for validation - 0, - 0, - ); -} - -/// Validate a block against chainstate -fn replay_block( - mut sort_tx: IndexDBTx, - mut chainstate_tx: ChainstateTx, - clarity_instance: &mut ClarityInstance, - burnchain_blocks_db: &BurnchainDB, - parent_header_info: &StacksHeaderInfo, - parent_microblock_hash: &BlockHeaderHash, - parent_microblock_seq: u16, - block_id: &StacksBlockId, - block: &StacksBlock, - block_size: u64, - block_consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - block_commit_burn: u64, - block_sortition_burn: u64, -) { - let parent_block_header = match &parent_header_info.anchored_header { - StacksBlockHeaderTypes::Epoch2(bh) => bh, - StacksBlockHeaderTypes::Nakamoto(_) => panic!("Nakamoto blocks not supported yet"), - }; - let parent_block_hash = parent_block_header.block_hash(); - - let Some(next_microblocks) = StacksChainState::inner_find_parent_microblock_stream( - &chainstate_tx.tx, - &block_hash, - &parent_block_hash, - &parent_header_info.consensus_hash, - parent_microblock_hash, - parent_microblock_seq, - ) - .unwrap() else { - println!("No microblock stream found for {block_id}"); - return; - }; - - let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) = - match SortitionDB::get_block_snapshot_consensus(&sort_tx, &block_consensus_hash).unwrap() { - Some(sn) => ( - sn.burn_header_hash, - sn.block_height as u32, - sn.burn_header_timestamp, - sn.winning_block_txid, - ), - None => { - // shouldn't happen - panic!("CORRUPTION: staging block {block_consensus_hash}/{block_hash} does not correspond to a burn block"); - } - }; - - info!( - "Process block {}/{} = {} in burn block {}, parent microblock {}", - block_consensus_hash, block_hash, &block_id, &burn_header_hash, parent_microblock_hash, - ); - - if !StacksChainState::check_block_attachment(&parent_block_header, &block.header) { - let msg = format!( - "Invalid stacks block {}/{} -- does not attach to parent {}/{}", - &block_consensus_hash, - block.block_hash(), - parent_block_header.block_hash(), - &parent_header_info.consensus_hash - ); - println!("{msg}"); - return; - } - - // validation check -- validate parent microblocks and find the ones that connect the - // block's parent to this block. - let next_microblocks = StacksChainState::extract_connecting_microblocks( - &parent_header_info, - &block_consensus_hash, - &block_hash, - block, - next_microblocks, - ) - .unwrap(); - let (last_microblock_hash, last_microblock_seq) = match next_microblocks.len() { - 0 => (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0), - _ => { - let l = next_microblocks.len(); - ( - next_microblocks[l - 1].block_hash(), - next_microblocks[l - 1].header.sequence, - ) - } - }; - assert_eq!(*parent_microblock_hash, last_microblock_hash); - assert_eq!(parent_microblock_seq, last_microblock_seq); - - let block_am = StacksChainState::find_stacks_tip_affirmation_map( - burnchain_blocks_db, - sort_tx.tx(), - block_consensus_hash, - block_hash, - ) - .unwrap(); - - let pox_constants = sort_tx.context.pox_constants.clone(); - - match StacksChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut sort_tx, - &pox_constants, - &parent_header_info, - block_consensus_hash, - &burn_header_hash, - burn_header_height, - burn_header_timestamp, - &block, - block_size, - &next_microblocks, - block_commit_burn, - block_sortition_burn, - block_am.weight(), - true, - ) { - Ok((_receipt, _, _)) => { - info!("Block processed successfully! block = {block_id}"); - } - Err(e) => { - println!("Failed processing block! block = {block_id}, error = {e:?}"); - process::exit(1); - } - }; -} - /// Perform an analysis of the anti-MEV algorithm in epoch 3.0, vis-a-vis the status quo. /// Results are printed to stdout. /// Exits with 0 on success, and 1 on failure. diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 4474a9e991..6dbc1fee22 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -38,6 +38,7 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; +use stacks::cli; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ @@ -12630,7 +12631,7 @@ fn mock_miner_replay() { btc_regtest_controller.bootstrap_chain(201); - eprintln!("Chain bootstrapped..."); + info!("Chain bootstrapped..."); let mut miner_run_loop = neon::RunLoop::new(conf.clone()); let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc(); @@ -12679,13 +12680,10 @@ fn mock_miner_replay() { let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc(); let follower_channel = follower_run_loop.get_coordinator_channel().unwrap(); - let miner_blocks_processed_start = miner_channel.get_stacks_blocks_processed(); - let follower_blocks_processed_start = follower_channel.get_stacks_blocks_processed(); - thread::spawn(move || follower_run_loop.start(None, 0)); wait_for_runloop(&follower_blocks_processed); - eprintln!("Follower bootup complete!"); + info!("Follower bootup complete!"); // first block wakes up the run loop next_block_and_wait_all( @@ -12719,6 +12717,9 @@ fn mock_miner_replay() { // ---------- Setup finished, start test ---------- + // PART 1 + // Run mock miner configured to output to files + // Mine some blocks for mock miner output for _ in 0..10 { next_block_and_wait_all( @@ -12730,6 +12731,8 @@ fn mock_miner_replay() { thread::sleep(block_gap); } + info!("Mock minining finished"); + let miner_blocks_processed_end = miner_channel.get_stacks_blocks_processed(); let follower_blocks_processed_end = follower_channel.get_stacks_blocks_processed(); @@ -12748,6 +12751,15 @@ fn mock_miner_replay() { assert_eq!(file_count, 12); assert_eq!(miner_blocks_processed_end, follower_blocks_processed_end); + // PART 2 + // Run `mock_miner_replay()` + let blocks_dir = blocks_dir.into_os_string().into_string().unwrap(); + let db_path = format!("{}/neon", conf.node.working_dir); + let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; + + info!("Replaying mock mined blocks..."); + cli::command_replay_mock_mining(&args); + // ---------- Test finished, clean up ---------- btcd_controller.stop_bitcoind().unwrap(); From e5ad64ff538535b2685e8bcecc7d340094a3e454 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 14 Aug 2024 12:14:17 -0400 Subject: [PATCH 0777/1400] chore: Allow optional config to `command_replay_mock_mining()` --- stackslib/src/cli.rs | 90 ++++++++++++++----- stackslib/src/main.rs | 4 +- .../src/tests/neon_integrations.rs | 26 +++++- 3 files changed, 95 insertions(+), 25 deletions(-) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 27efb44bb9..493ab18de5 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -16,6 +16,7 @@ //! Subcommands used by `stacks-inspect` binary +use std::cell::LazyCell; use std::path::PathBuf; use std::time::Instant; use std::{env, fs, io, process, thread}; @@ -39,10 +40,40 @@ use crate::clarity_vm::clarity::ClarityInstance; use crate::core::*; use crate::util_lib::db::IndexDBTx; +/// Can be used with CLI commands to support non-mainnet chainstate +/// Allows integration testing of these functions +pub struct StacksChainConfig { + pub chain_id: u32, + pub first_block_height: u64, + pub first_burn_header_hash: BurnchainHeaderHash, + pub first_burn_header_timestamp: u64, + pub pox_constants: PoxConstants, + pub epochs: Vec, +} + +impl StacksChainConfig { + pub fn default_mainnet() -> Self { + Self { + chain_id: CHAIN_ID_MAINNET, + first_block_height: BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH) + .unwrap(), + first_burn_header_timestamp: BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), + pox_constants: PoxConstants::mainnet_default(), + epochs: STACKS_EPOCHS_MAINNET.to_vec(), + } + } +} + +const STACKS_CHAIN_CONFIG_DEFAULT_MAINNET: LazyCell = + LazyCell::new(StacksChainConfig::default_mainnet); + /// Replay blocks from chainstate database -/// Takes args in CLI format: ` [args...]` /// Terminates on error using `process::exit()` -pub fn command_replay_block(argv: &[String]) { +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -110,15 +141,18 @@ pub fn command_replay_block(argv: &[String]) { if i % 100 == 0 { println!("Checked {i}..."); } - replay_staging_block(db_path, index_block_hash); + replay_staging_block(db_path, index_block_hash, conf); } println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); } /// Replay mock mined blocks from JSON files -/// Takes args in CLI format: ` [args...]` /// Terminates on error using `process::exit()` -pub fn command_replay_mock_mining(argv: &[String]) { +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +/// - `conf`: Optional config for running on non-mainnet chainstate +pub fn command_replay_mock_mining(argv: &[String], conf: Option<&StacksChainConfig>) { let print_help_and_exit = || -> ! { let n = &argv[0]; eprintln!("Usage:"); @@ -202,28 +236,36 @@ pub fn command_replay_mock_mining(argv: &[String]) { "block_height" => bh, "block" => ?block ); - replay_mock_mined_block(&db_path, block); + replay_mock_mined_block(&db_path, block, conf); } } /// Fetch and process a `StagingBlock` from database and call `replay_block()` to validate -fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { +fn replay_staging_block( + db_path: &str, + index_block_hash_hex: &str, + conf: Option<&StacksChainConfig>, +) { let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; + let conf = conf.unwrap_or(&default_conf); + + let mainnet = conf.chain_id == CHAIN_ID_MAINNET; let (mut chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); let mut sortdb = SortitionDB::connect( &sort_db_path, - BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), - BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - STACKS_EPOCHS_MAINNET.as_ref(), - PoxConstants::mainnet_default(), + conf.first_block_height, + &conf.first_burn_header_hash, + conf.first_burn_header_timestamp, + &conf.epochs, + conf.pox_constants.clone(), None, true, ) @@ -277,22 +319,30 @@ fn replay_staging_block(db_path: &str, index_block_hash_hex: &str) { } /// Process a mock mined block and call `replay_block()` to validate -fn replay_mock_mined_block(db_path: &str, block: AssembledAnchorBlock) { +fn replay_mock_mined_block( + db_path: &str, + block: AssembledAnchorBlock, + conf: Option<&StacksChainConfig>, +) { let chain_state_path = format!("{db_path}/chainstate/"); let sort_db_path = format!("{db_path}/burnchain/sortition"); let burn_db_path = format!("{db_path}/burnchain/burnchain.sqlite"); let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap(); + let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; + let conf = conf.unwrap_or(&default_conf); + + let mainnet = conf.chain_id == CHAIN_ID_MAINNET; let (mut chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); + StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); let mut sortdb = SortitionDB::connect( &sort_db_path, - BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, - &BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), - BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), - STACKS_EPOCHS_MAINNET.as_ref(), - PoxConstants::mainnet_default(), + conf.first_block_height, + &conf.first_burn_header_hash, + conf.first_burn_header_timestamp, + &conf.epochs, + conf.pox_constants.clone(), None, true, ) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 479bfaa9b7..e22e903e78 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1255,12 +1255,12 @@ simulating a miner. } if argv[1] == "replay-block" { - cli::command_replay_block(&argv[1..]); + cli::command_replay_block(&argv[1..], None); process::exit(0); } if argv[1] == "replay-mock-mining" { - cli::command_replay_mock_mining(&argv[1..]); + cli::command_replay_mock_mining(&argv[1..], None); process::exit(0); } diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 6dbc1fee22..64b1ca70da 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -5,6 +5,7 @@ use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; use std::{cmp, env, fs, io, thread}; +use clarity::consts::BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -38,7 +39,7 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; -use stacks::cli; +use stacks::cli::{self, StacksChainConfig}; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ @@ -12627,7 +12628,13 @@ fn mock_miner_replay() { .start_bitcoind() .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); btc_regtest_controller.bootstrap_chain(201); @@ -12756,9 +12763,22 @@ fn mock_miner_replay() { let blocks_dir = blocks_dir.into_os_string().into_string().unwrap(); let db_path = format!("{}/neon", conf.node.working_dir); let args: Vec = vec!["replay-mock-mining".into(), db_path, blocks_dir]; + let SortitionDB { + first_block_height, + first_burn_header_hash, + .. + } = *btc_regtest_controller.sortdb_mut(); + let replay_config = StacksChainConfig { + chain_id: conf.burnchain.chain_id, + first_block_height, + first_burn_header_hash, + first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), + pox_constants: burnchain_config.pox_constants, + epochs: conf.burnchain.epochs.expect("Missing `epochs` in config"), + }; info!("Replaying mock mined blocks..."); - cli::command_replay_mock_mining(&args); + cli::command_replay_mock_mining(&args, Some(&replay_config)); // ---------- Test finished, clean up ---------- From 1d836175730eff88eaec9440c608267eb6303494 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 5 Aug 2024 14:02:14 -0500 Subject: [PATCH 0778/1400] fix: consistent treatment of mod 0 blocks in nakamoto --- stacks-common/src/types/mod.rs | 1 + stackslib/src/burnchains/burnchain.rs | 74 ++++------ stackslib/src/burnchains/mod.rs | 50 ++++++- stackslib/src/chainstate/burn/db/sortdb.rs | 79 ++-------- stackslib/src/chainstate/coordinator/mod.rs | 13 +- stackslib/src/chainstate/coordinator/tests.rs | 2 +- .../chainstate/nakamoto/coordinator/mod.rs | 135 ++++++++++-------- .../chainstate/nakamoto/coordinator/tests.rs | 9 +- .../src/chainstate/nakamoto/tests/node.rs | 2 +- .../chainstate/stacks/boot/contract_tests.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 2 +- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- stackslib/src/core/tests/mod.rs | 5 +- stackslib/src/net/api/getstackers.rs | 3 +- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/relay.rs | 5 +- stackslib/src/net/tests/download/nakamoto.rs | 5 +- stackslib/src/net/tests/relay/nakamoto.rs | 15 +- stackslib/src/net/unsolicited.rs | 9 +- testnet/stacks-node/src/neon_node.rs | 6 +- testnet/stacks-node/src/tests/epoch_21.rs | 7 +- .../src/tests/nakamoto_integrations.rs | 2 +- 22 files changed, 209 insertions(+), 221 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index c995359459..23f2b006db 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -82,6 +82,7 @@ pub enum StacksEpochId { Epoch30 = 0x03000, } +#[derive(Debug)] pub enum MempoolCollectionBehavior { ByStacksHeight, ByReceiveTime, diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 4002c253ae..a5ecaa0458 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -549,47 +549,43 @@ impl Burnchain { .expect("Overflowed u64 in calculating expected sunset_burn") } + /// Is this the first block to receive rewards in its cycle? + /// This is the mod 1 block. Note: in nakamoto, the signer set for cycle N signs + /// the mod 0 block. pub fn is_reward_cycle_start(&self, burn_height: u64) -> bool { self.pox_constants .is_reward_cycle_start(self.first_block_height, burn_height) } + /// Is this the first block to be signed by the signer set in cycle N? + /// This is the mod 0 block. + pub fn is_naka_signing_cycle_start(&self, burn_height: u64) -> bool { + self.pox_constants + .is_naka_signing_cycle_start(self.first_block_height, burn_height) + } + + /// return the first burn block which receives reward in `reward_cycle`. + /// this is the modulo 1 block pub fn reward_cycle_to_block_height(&self, reward_cycle: u64) -> u64 { self.pox_constants .reward_cycle_to_block_height(self.first_block_height, reward_cycle) } - /// Compute the reward cycle ID of the PoX reward set which is active as of this burn_height. - /// The reward set is calculated at reward cycle index 1, so if this block height is at or after - /// reward cycle index 1, then this behaves like `block_height_to_reward_cycle()`. However, - /// if it's reward cycle index is 0, then it belongs to the previous reward cycle. - pub fn pox_reward_cycle(&self, block_height: u64) -> Option { - let cycle = self.block_height_to_reward_cycle(block_height)?; - let effective_height = block_height.checked_sub(self.first_block_height)?; - if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 { - Some(cycle.saturating_sub(1)) - } else { - Some(cycle) - } + /// the first burn block that must be *signed* by the signer set of `reward_cycle`. + /// this is the modulo 0 block + pub fn nakamoto_first_block_of_cycle(&self, reward_cycle: u64) -> u64 { + self.pox_constants + .nakamoto_first_block_of_cycle(self.first_block_height, reward_cycle) } + /// What is the reward cycle for this block height? + /// This considers the modulo 0 block to be in reward cycle `n`, even though + /// rewards for cycle `n` do not begin until modulo 1. pub fn block_height_to_reward_cycle(&self, block_height: u64) -> Option { self.pox_constants .block_height_to_reward_cycle(self.first_block_height, block_height) } - pub fn static_block_height_to_reward_cycle( - block_height: u64, - first_block_height: u64, - reward_cycle_length: u64, - ) -> Option { - PoxConstants::static_block_height_to_reward_cycle( - block_height, - first_block_height, - reward_cycle_length, - ) - } - /// Is this block either the first block in a reward cycle or /// right before the reward phase starts? This is the mod 0 or mod 1 /// block. Reward cycle start events (like auto-unlocks) process *after* @@ -607,27 +603,19 @@ impl Burnchain { (effective_height % reward_cycle_length) <= 1 } - pub fn static_is_in_prepare_phase( - first_block_height: u64, - reward_cycle_length: u64, - prepare_length: u64, - block_height: u64, - ) -> bool { - PoxConstants::static_is_in_prepare_phase( - first_block_height, - reward_cycle_length, - prepare_length, - block_height, - ) + /// Does this block include reward slots? + /// This is either in the last prepare_phase_length blocks of the cycle + /// or the modulo 0 block + pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { + self.pox_constants + .is_in_prepare_phase(self.first_block_height, block_height) } - pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { - Self::static_is_in_prepare_phase( - self.first_block_height, - self.pox_constants.reward_cycle_length as u64, - self.pox_constants.prepare_length.into(), - block_height, - ) + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn is_in_naka_prepare_phase(&self, block_height: u64) -> bool { + self.pox_constants + .is_in_naka_prepare_phase(self.first_block_height, block_height) } pub fn regtest(working_dir: &str) -> Burnchain { diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 30cd9f81ee..07a2f73c10 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -517,7 +517,7 @@ impl PoxConstants { } } - /// What's the first block in the prepare phase + /// The first block of the prepare phase during `reward_cycle`. This is the prepare phase _for the next cycle_. pub fn prepare_phase_start(&self, first_block_height: u64, reward_cycle: u64) -> u64 { let reward_cycle_start = self.reward_cycle_to_block_height(first_block_height, reward_cycle); @@ -526,18 +526,37 @@ impl PoxConstants { prepare_phase_start } + /// Is this the first block to receive rewards in its cycle? + /// This is the mod 1 block. Note: in nakamoto, the signer set for cycle N signs + /// the mod 0 block. pub fn is_reward_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { let effective_height = burn_height - first_block_height; // first block of the new reward cycle (effective_height % u64::from(self.reward_cycle_length)) == 1 } + /// Is this the first block to be signed by the signer set in cycle N? + /// This is the mod 0 block. + pub fn is_naka_signing_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { + let effective_height = burn_height - first_block_height; + // first block of the new reward cycle + (effective_height % u64::from(self.reward_cycle_length)) == 0 + } + + /// return the first burn block which receives reward in `reward_cycle`. + /// this is the modulo 1 block pub fn reward_cycle_to_block_height(&self, first_block_height: u64, reward_cycle: u64) -> u64 { // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not // mod 0. first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + 1 } + /// the first burn block that must be *signed* by the signer set of `reward_cycle`. + /// this is the modulo 0 block + pub fn nakamoto_first_block_of_cycle(&self, first_block_height: u64, reward_cycle: u64) -> u64 { + first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + } + pub fn reward_cycle_index(&self, first_block_height: u64, burn_height: u64) -> Option { let effective_height = burn_height.checked_sub(first_block_height)?; Some(effective_height % u64::from(self.reward_cycle_length)) @@ -609,6 +628,35 @@ impl PoxConstants { } } + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn is_in_naka_prepare_phase(&self, first_block_height: u64, block_height: u64) -> bool { + Self::static_is_in_naka_prepare_phase( + first_block_height, + u64::from(self.reward_cycle_length), + u64::from(self.prepare_length), + block_height, + ) + } + + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn static_is_in_naka_prepare_phase( + first_block_height: u64, + reward_cycle_length: u64, + prepare_length: u64, + block_height: u64, + ) -> bool { + if block_height <= first_block_height { + // not a reward cycle start if we're the first block after genesis. + false + } else { + let effective_height = block_height - first_block_height; + let reward_index = effective_height % reward_cycle_length; + reward_index > u64::from(reward_cycle_length - prepare_length) + } + } + /// Returns the active reward cycle at the given burn block height /// * `first_block_ht` - the first burn block height that the Stacks network monitored /// * `reward_cycle_len` - the length of each reward cycle in the network. diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 15a3bf5641..909ea46b9f 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3584,42 +3584,6 @@ impl SortitionDB { Ok(()) } - /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_ccyle() - pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( - &self, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - self.index_conn() - .get_prepare_phase_end_sortition_id_for_reward_cycle( - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - - /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). - pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( - &self, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - self.index_conn() - .get_prepare_phase_start_sortition_id_for_reward_cycle( - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - /// Figure out the reward cycle for `tip` and lookup the preprocessed /// reward set (if it exists) for the active reward cycle during `tip`. /// Returns the reward cycle info on success. @@ -3934,33 +3898,6 @@ impl<'a> SortitionDBConn<'a> { .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) } - /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( - &self, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - let prepare_phase_end = pox_constants - .reward_cycle_to_block_height(first_block_height, reward_cycle_id) - .saturating_sub(1); - - let last_sortition = - get_ancestor_sort_id(self, prepare_phase_end, tip)?.ok_or_else(|| { - error!( - "Could not find prepare phase end ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_end_height" => prepare_phase_end - ); - db_error::NotFoundError - })?; - Ok(last_sortition) - } - /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned /// sortition will be in the preceding reward cycle) @@ -3971,9 +3908,11 @@ impl<'a> SortitionDBConn<'a> { tip: &SortitionId, reward_cycle_id: u64, ) -> Result { - let prepare_phase_start = pox_constants - .reward_cycle_to_block_height(first_block_height, reward_cycle_id) - .saturating_sub(pox_constants.prepare_length.into()); + let reward_cycle_of_prepare_phase = reward_cycle_id.checked_sub(1).ok_or_else(|| db_error::Other("No prepare phase exists for cycle 0".into()))?; + let prepare_phase_start = pox_constants.prepare_phase_start( + first_block_height, + reward_cycle_of_prepare_phase, + ); let first_sortition = get_ancestor_sort_id(self, prepare_phase_start, tip)?.ok_or_else(|| { @@ -5945,10 +5884,10 @@ impl<'a> SortitionHandleTx<'a> { /// Get the expected number of PoX payouts per output fn get_num_pox_payouts(&self, burn_block_height: u64) -> usize { - let op_num_outputs = if Burnchain::static_is_in_prepare_phase( + let op_num_outputs = if PoxConstants::static_is_in_prepare_phase( self.context.first_block_height, - self.context.pox_constants.reward_cycle_length as u64, - self.context.pox_constants.prepare_length.into(), + u64::from(self.context.pox_constants.reward_cycle_length), + u64::from(self.context.pox_constants.prepare_length), burn_block_height, ) { 1 @@ -6173,7 +6112,7 @@ impl<'a> SortitionHandleTx<'a> { } // if there are qualifying auto-unlocks, record them if !reward_set.start_cycle_state.is_empty() { - let cycle_number = Burnchain::static_block_height_to_reward_cycle( + let cycle_number = PoxConstants::static_block_height_to_reward_cycle( snapshot.block_height, self.context.first_block_height, self.context.pox_constants.reward_cycle_length.into(), diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index d3b6fd5f3e..60d8699686 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -297,8 +297,8 @@ pub trait RewardSetProvider { fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, + cycle: u64, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, @@ -372,20 +372,13 @@ impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, + reward_cycle: u64, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { - self.read_reward_set_nakamoto( - cycle_start_burn_height, - chainstate, - burnchain, - sortdb, - block_id, - false, - ) + self.read_reward_set_nakamoto(chainstate, reward_cycle, burnchain, sortdb, block_id, false) } } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 7bd06aaaea..81167c6462 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -520,8 +520,8 @@ impl RewardSetProvider for StubbedRewardSetProvider { fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, + cycle: u64, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 15cc7f0852..31549d22b0 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -29,7 +29,7 @@ use stacks_common::types::chainstate::{ use stacks_common::types::{StacksEpoch, StacksEpochId}; use crate::burnchains::db::{BurnchainBlockData, BurnchainDB, BurnchainHeaderReader}; -use crate::burnchains::{Burnchain, BurnchainBlockHeader}; +use crate::burnchains::{self, burnchain, Burnchain, BurnchainBlockHeader}; use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleConn, }; @@ -88,16 +88,13 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { /// RPC endpoints to expose this without flooding loggers. pub fn read_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, + cycle: u64, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, debug_log: bool, ) -> Result { - let cycle = burnchain - .block_height_to_reward_cycle(cycle_start_burn_height) - .expect("FATAL: no reward cycle for burn height"); self.read_reward_set_nakamoto_of_cycle(cycle, chainstate, sortdb, block_id, debug_log) } @@ -280,9 +277,10 @@ fn find_prepare_phase_sortitions( } /// Try to get the reward cycle information for a Nakamoto reward cycle, identified by the -/// burn_height. The reward cycle info returned will be from the reward cycle that is active as of -/// `burn_height`. `sortition_tip` can be any sortition ID that's at a higher height than -/// `burn_height`. +/// `reward_cycle` number. +/// +/// `sortition_tip` can be any sortition ID that's at a higher height than +/// `reward_cycle`'s start height (the 0 block). /// /// In Nakamoto, the PoX anchor block for reward cycle _R_ is the _first_ Stacks block mined in the /// _last_ tenure of _R - 1_'s reward phase (i.e. which takes place toward the end of reward cycle). @@ -297,14 +295,16 @@ fn find_prepare_phase_sortitions( /// Returns Ok(None) if we're still waiting for the PoX anchor block sortition /// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase pub fn get_nakamoto_reward_cycle_info( - burn_height: u64, sortition_tip: &SortitionId, + reward_cycle: u64, burnchain: &Burnchain, chain_state: &mut StacksChainState, stacks_tip: &StacksBlockId, sort_db: &mut SortitionDB, provider: &U, ) -> Result, Error> { + let burn_height = burnchain.nakamoto_first_block_of_cycle(reward_cycle); + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)) .epoch_id; @@ -314,14 +314,8 @@ pub fn get_nakamoto_reward_cycle_info( "FATAL: called a nakamoto function outside of epoch 3" ); - // calculating the reward set for the current reward cycle - let reward_cycle = burnchain - .pox_reward_cycle(burn_height) - .expect("FATAL: no reward cycle for burn height"); - debug!("Processing reward set for Nakamoto reward cycle"; "stacks_tip" => %stacks_tip, - "burn_height" => burn_height, "reward_cycle" => reward_cycle, "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, "prepare_phase_length" => burnchain.pox_constants.prepare_length); @@ -376,33 +370,23 @@ pub fn load_nakamoto_reward_set( sort_db: &SortitionDB, provider: &U, ) -> Result, Error> { - let prepare_end_height = burnchain - .reward_cycle_to_block_height(reward_cycle) - .saturating_sub(1); + let cycle_start_height = burnchain.nakamoto_first_block_of_cycle(reward_cycle); - let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_end_height)? + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), cycle_start_height)? .unwrap_or_else(|| { panic!( "FATAL: no epoch defined for burn height {}", - prepare_end_height + cycle_start_height ) }); - let Some(prepare_end_sortition_id) = - get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? - else { - // reward cycle is too far in the future - warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, prepare_end_height, sortition_tip); - return Ok(None); - }; - // Find the first Stacks block in this reward cycle's preceding prepare phase. // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. // Note that we may not have processed it yet. But, if we do find it, then it's // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). let first_epoch30_reward_cycle = burnchain - .pox_reward_cycle(epoch_at_height.start_height) + .block_height_to_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); if !epoch_at_height @@ -412,6 +396,14 @@ pub fn load_nakamoto_reward_set( // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. + let Some(prepare_end_sortition_id) = + get_ancestor_sort_id(&sort_db.index_conn(), cycle_start_height, sortition_tip)? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, cycle_start_height, sortition_tip); + return Ok(None); + }; + if let Ok(persisted_reward_cycle_info) = sort_db.get_preprocessed_reward_set_of(&prepare_end_sortition_id) { @@ -475,8 +467,18 @@ pub fn load_nakamoto_reward_set( } // find the reward cycle's prepare-phase sortitions (in the preceding reward cycle) + let Some(prior_cycle_end) = get_ancestor_sort_id( + &sort_db.index_conn(), + cycle_start_height.saturating_sub(1), + sortition_tip, + )? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, cycle_start_height.saturating_sub(1), sortition_tip); + return Ok(None); + }; let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, &prepare_end_sortition_id)?; + find_prepare_phase_sortitions(sort_db, burnchain, &prior_cycle_end)?; // iterate over the prepare_phase_sortitions, finding the first such sortition // with a processed stacks block @@ -505,7 +507,7 @@ pub fn load_nakamoto_reward_set( Err(e) => return Some(Err(e)), Ok(None) => { // no header for this snapshot (possibly invalid) - debug!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); + info!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); return None } } @@ -542,12 +544,12 @@ pub fn load_nakamoto_reward_set( "block_hash" => %stacks_block_hash, "consensus_hash" => %anchor_block_sn.consensus_hash, "txid" => %txid, - "prepare_end_height" => %prepare_end_height, + "cycle_start_height" => %cycle_start_height, "burnchain_height" => %anchor_block_sn.block_height); let reward_set = provider.get_reward_set_nakamoto( - prepare_end_height, chain_state, + reward_cycle, burnchain, sort_db, &block_id, @@ -581,26 +583,28 @@ pub fn get_nakamoto_next_recipients( stacks_tip: &StacksBlockId, burnchain: &Burnchain, ) -> Result, Error> { - let reward_cycle_info = - if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { - let Some((reward_set, _)) = load_nakamoto_reward_set( - burnchain - .pox_reward_cycle(sortition_tip.block_height.saturating_add(1)) - .expect("Sortition block height has no reward cycle"), - &sortition_tip.sortition_id, - burnchain, - chain_state, - stacks_tip, - sort_db, - &OnChainRewardSetProvider::new(), - )? - else { - return Ok(None); - }; - Some(reward_set) - } else { - None + let next_burn_height = sortition_tip.block_height.saturating_add(1); + let Some(reward_cycle) = burnchain.block_height_to_reward_cycle(next_burn_height) else { + error!("CORRUPTION: evaluating burn block height before starting burn height"); + return Err(Error::BurnchainError(burnchains::Error::NoStacksEpoch)); + }; + let reward_cycle_info = if burnchain.is_reward_cycle_start(next_burn_height) { + let Some((reward_set, _)) = load_nakamoto_reward_set( + reward_cycle, + &sortition_tip.sortition_id, + burnchain, + chain_state, + stacks_tip, + sort_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(None); }; + Some(reward_set) + } else { + None + }; sort_db .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) .map_err(Error::from) @@ -670,7 +674,7 @@ impl< // only proceed if we have processed the _anchor block_ for this reward cycle. let Some((rc_info, _)) = load_nakamoto_reward_set( self.burnchain - .pox_reward_cycle(canonical_sn.block_height) + .block_height_to_reward_cycle(canonical_sn.block_height) .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, @@ -906,7 +910,11 @@ impl< }); // are we in the prepare phase? - if !self.burnchain.is_in_prepare_phase(stacks_sn.block_height) { + // TODO: this should *not* include the 0 block! + if !self + .burnchain + .is_in_naka_prepare_phase(stacks_sn.block_height) + { // next ready stacks block continue; } @@ -930,7 +938,7 @@ impl< // cycle data let Some((rc_info, _)) = load_nakamoto_reward_set( self.burnchain - .pox_reward_cycle(canonical_sn.block_height) + .block_height_to_reward_cycle(canonical_sn.block_height) .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, @@ -966,8 +974,8 @@ impl< /// Given a burnchain header, find the PoX reward cycle info fn get_nakamoto_reward_cycle_info( &mut self, - block_height: u64, stacks_tip: &StacksBlockId, + reward_cycle: u64, ) -> Result, Error> { let sortition_tip_id = self .canonical_sortition_tip @@ -975,8 +983,8 @@ impl< .expect("FATAL: Processing anchor block, but no known sortition tip"); get_nakamoto_reward_cycle_info( - block_height, sortition_tip_id, + reward_cycle, &self.burnchain, &mut self.chain_state_db, stacks_tip, @@ -1117,10 +1125,15 @@ impl< return Ok(false); }; - let reward_cycle_info = self.get_nakamoto_reward_cycle_info( - header.block_height, - &local_best_nakamoto_tip, - )?; + let Some(reward_cycle) = self + .burnchain + .block_height_to_reward_cycle(header.block_height) + else { + error!("CORRUPTION: Evaluating burn block before start burn height"; "burn_height" => header.block_height); + return Ok(false); + }; + let reward_cycle_info = + self.get_nakamoto_reward_cycle_info(&local_best_nakamoto_tip, reward_cycle)?; if let Some(rc_info) = reward_cycle_info.as_ref() { // in nakamoto, if we have any reward cycle info at all, it will be known. // otherwise, we may have to process some more Stacks blocks diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 569114aa12..1b971869bc 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -598,7 +598,7 @@ impl<'a> TestPeer<'a> { info!( "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", pox_constants.is_in_prepare_phase(first_burn_height, burn_height), - pox_constants.is_reward_cycle_start(first_burn_height, burn_height) + pox_constants.is_naka_signing_cycle_start(first_burn_height, burn_height) ); let vrf_proof = self.make_nakamoto_vrf_proof(miner_key); @@ -761,6 +761,9 @@ fn pox_treatment() { peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); blocks.push(block); + // note: we use `is_reward_cycle_start` here rather than naka_reward_cycle_start + // because in this test, we're interested in getting to the reward blocks, + // not validating the signer set. the reward blocks only begin at modulo 1 if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 1) { break; } @@ -1571,7 +1574,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a if peer .config .burnchain - .is_reward_cycle_start(tip.block_height) + .is_naka_signing_cycle_start(tip.block_height) { rc_blocks.push(all_blocks.clone()); rc_burn_ops.push(all_burn_ops.clone()); @@ -2316,7 +2319,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe if peer .config .burnchain - .is_reward_cycle_start(tip.block_height) + .is_naka_signing_cycle_start(tip.block_height) { rc_blocks.push(all_blocks.clone()); rc_burn_ops.push(all_burn_ops.clone()); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index d3f190de1f..bd12072a01 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -737,7 +737,7 @@ impl TestStacksNode { let reward_set = load_nakamoto_reward_set( miner .burnchain - .pox_reward_cycle(sort_tip_sn.block_height) + .block_height_to_reward_cycle(sort_tip_sn.block_height) .expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, &miner.burnchain, diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 1a47613c89..04b74ba2e9 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -490,7 +490,7 @@ impl BurnStateDB for TestSimBurnStateDB { let first_block = self.get_burn_start_height(); let prepare_len = self.get_pox_prepare_length(); let rc_len = self.get_pox_reward_cycle_length(); - if Burnchain::static_is_in_prepare_phase( + if PoxConstants::static_is_in_prepare_phase( first_block.into(), rc_len.into(), prepare_len.into(), diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 63c22fafb6..ba95d77ead 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4952,7 +4952,7 @@ impl StacksChainState { chain_tip_burn_header_height: u32, parent_sortition_id: &SortitionId, ) -> Result, Error> { - let pox_reward_cycle = Burnchain::static_block_height_to_reward_cycle( + let pox_reward_cycle = PoxConstants::static_block_height_to_reward_cycle( burn_tip_height, burn_dbconn.get_burn_start_height().into(), burn_dbconn.get_pox_reward_cycle_length().into(), diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 356b117b8b..d28faed4f2 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2749,7 +2749,7 @@ pub mod test { } pub fn chainstate_path(test_name: &str) -> String { - format!("/tmp/blockstack-test-chainstate-{}", test_name) + format!("/tmp/stacks-node-tests/cs-{}", test_name) } #[test] diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 158feeeba5..72b29cc097 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -1392,8 +1392,9 @@ fn mempool_do_not_replace_tx() { #[case(MempoolCollectionBehavior::ByStacksHeight)] #[case(MempoolCollectionBehavior::ByReceiveTime)] fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let chainstate_path = chainstate_path(function_name!()); + let path_name = format!("{}::{:?}", function_name!(), behavior); + let mut chainstate = instantiate_chainstate(false, 0x80000000, &path_name); + let chainstate_path = chainstate_path(&path_name); let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let mut txs = codec_all_transactions( diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4fd4234070..0b494d19a0 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -92,7 +92,6 @@ impl GetStackersResponse { cycle_number: u64, ) -> Result { let cycle_start_height = burnchain.reward_cycle_to_block_height(cycle_number); - let pox_contract_name = burnchain .pox_constants .active_pox_contract(cycle_start_height); @@ -107,7 +106,7 @@ impl GetStackersResponse { let provider = OnChainRewardSetProvider::new(); let stacker_set = provider - .read_reward_set_nakamoto(cycle_start_height, chainstate, burnchain, sortdb, tip, true) + .read_reward_set_nakamoto(chainstate, cycle_number, burnchain, sortdb, tip, true) .map_err(GetStackersErrors::NotAvailableYet)?; Ok(Self { stacker_set }) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 861a6e6cfa..26605c7e84 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4292,7 +4292,7 @@ impl PeerNetwork { let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { - let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc); + let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc); let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 32dc7d065a..089f71fdaf 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -599,7 +599,7 @@ impl Relayer { // is the block signed by the active reward set? let sn_rc = burnchain - .pox_reward_cycle(sn.block_height) + .block_height_to_reward_cycle(sn.block_height) .expect("FATAL: sortition has no reward cycle"); let reward_cycle_info = if let Some(rc_info) = loaded_reward_sets.get(&sn_rc) { rc_info @@ -840,6 +840,7 @@ impl Relayer { // NOTE: it's `+ 1` because the first Nakamoto block is built atop the last epoch 2.x // tenure, right after the last 2.x sortition + // TODO: is this true? let epoch_id = SortitionDB::get_stacks_epoch(sort_handle, block_sn.block_height + 1)? .expect("FATAL: no epoch defined") .epoch_id; @@ -885,7 +886,7 @@ impl Relayer { let reward_info = match load_nakamoto_reward_set( burnchain - .pox_reward_cycle(block_sn.block_height) + .block_height_to_reward_cycle(block_sn.block_height) .expect("FATAL: block snapshot has no reward cycle"), &tip, burnchain, diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 9de9fb087b..dd0e9e60c4 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -2101,7 +2101,10 @@ fn test_nakamoto_download_run_2_peers() { .get_nakamoto_tip_block_id() .unwrap() .unwrap(); - assert_eq!(tip.block_height, 81); + assert_eq!( + tip.block_height, + 41 + bitvecs.iter().map(|x| x.len() as u64).sum::() + ); // make a neighbor from this peer let boot_observer = TestEventObserver::new(); diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 4df3171474..a0aae1c035 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -618,7 +618,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle(block_sn.block_height) + .block_height_to_reward_cycle(block_sn.block_height) .unwrap() ), true @@ -642,7 +642,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle( + .block_height_to_reward_cycle( follower.network.burnchain_tip.block_height ) .unwrap() @@ -670,7 +670,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle(ancestor_sn.block_height) + .block_height_to_reward_cycle(ancestor_sn.block_height) .unwrap() ), true @@ -816,9 +816,12 @@ fn test_buffer_nonready_nakamoto_blocks() { let mut all_blocks = vec![]; thread::scope(|s| { - s.spawn(|| { - SeedNode::main(peer, rc_len, seed_comms); - }); + thread::Builder::new() + .name("seed".into()) + .spawn_scoped(s, || { + SeedNode::main(peer, rc_len, seed_comms); + }) + .unwrap(); let mut seed_exited = false; let mut exited_peer = None; diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 5aeadc3dfd..f9ab5de87e 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -715,10 +715,11 @@ impl PeerNetwork { ) -> bool { let Some(rc_data) = self.current_reward_sets.get(&reward_cycle) else { info!( - "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", + "{:?}: Failed to validate Nakamoto block {}/{}: no reward set for cycle {}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, - &nakamoto_block.header.block_hash() + &nakamoto_block.header.block_hash(), + reward_cycle, ); return false; }; @@ -733,7 +734,7 @@ impl PeerNetwork { if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { info!( - "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e + "{:?}: signature verification failure for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e ); return false; } @@ -788,7 +789,7 @@ impl PeerNetwork { let reward_set_sn_rc = self .burnchain - .pox_reward_cycle(reward_set_sn.block_height) + .block_height_to_reward_cycle(reward_set_sn.block_height) .expect("FATAL: sortition has no reward cycle"); return (Some(reward_set_sn_rc), can_process); diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 8c3c4ed179..f63227441a 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -818,7 +818,7 @@ impl MicroblockMinerThread { &mined_microblock.block_hash() ); - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { @@ -1773,7 +1773,7 @@ impl BlockMinerThread { /// /// In testing, we ignore the parent stacks block hash because we don't have an easy way to /// reproduce it in integration tests. - #[cfg(not(any(test, feature = "testing")))] + #[cfg(not(test))] fn make_microblock_private_key( &mut self, parent_stacks_hash: &StacksBlockId, @@ -1786,7 +1786,7 @@ impl BlockMinerThread { /// Get the microblock private key we'll be using for this tenure, should we win. /// Return the private key on success - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn make_microblock_private_key( &mut self, _parent_stacks_hash: &StacksBlockId, diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 6696467930..bb168b28b9 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1296,12 +1296,7 @@ fn transition_adds_get_pox_addr_recipients() { // NOTE: there's an even number of payouts here, so this works eprintln!("payout at {} = {}", burn_block_height, &payout); - if Burnchain::static_is_in_prepare_phase( - 0, - pox_constants.reward_cycle_length as u64, - pox_constants.prepare_length.into(), - burn_block_height, - ) { + if pox_constants.is_in_prepare_phase(0, burn_block_height) { // in prepare phase eprintln!("{} in prepare phase", burn_block_height); assert_eq!(payout, conf.burnchain.burn_fee_cap as u128); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f3cf76af04..3c36565cc8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -510,7 +510,7 @@ pub fn read_and_sign_block_proposal( let reward_set = load_nakamoto_reward_set( burnchain - .pox_reward_cycle(tip.block_height.saturating_add(1)) + .block_height_to_reward_cycle(tip.block_height) .unwrap(), &tip.sortition_id, &burnchain, From 94a0039b9541eecd0df2cda6ef839fed3aff4ff8 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 13 Aug 2024 13:02:28 -0700 Subject: [PATCH 0779/1400] feat: update prepare_pox_4_test to optionally run nakamoto --- .../src/chainstate/stacks/boot/pox_4_tests.rs | 408 ++++++++++++++---- .../chainstate/stacks/boot/signers_tests.rs | 3 +- stackslib/src/net/tests/mod.rs | 27 +- 3 files changed, 352 insertions(+), 86 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index ac59772f32..db890316ec 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -49,7 +49,7 @@ use wsts::curve::point::{Compressed, Point}; use super::test::*; use super::RawRewardSetEntry; use crate::burnchains::{Burnchain, PoxConstants}; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::tests::pox_addr_from; @@ -81,6 +81,7 @@ use crate::clarity_vm::database::marf::{MarfedKV, WritableMarfStore}; use crate::clarity_vm::database::HeadersDBConn; use crate::core::*; use crate::net::test::{TestEventObserver, TestEventObserverBlock, TestPeer, TestPeerConfig}; +use crate::net::tests::NakamotoBootPlan; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{DBConn, FromRow}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; @@ -138,7 +139,7 @@ fn make_simple_pox_4_lock( ) } -pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { +pub fn make_test_epochs_pox(use_nakamoto: bool) -> (Vec, PoxConstants) { let EMPTY_SORTITIONS = 25; let EPOCH_2_1_HEIGHT = EMPTY_SORTITIONS + 11; // 36 let EPOCH_2_2_HEIGHT = EPOCH_2_1_HEIGHT + 14; // 50 @@ -147,8 +148,9 @@ pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { // this means that cycle 11 should also be treated like a "burn" let EPOCH_2_4_HEIGHT = EPOCH_2_3_HEIGHT + 4; // 56 let EPOCH_2_5_HEIGHT = EPOCH_2_4_HEIGHT + 44; // 100 + let EPOCH_3_0_HEIGHT = EPOCH_2_5_HEIGHT + 23; // 123 - let epochs = vec![ + let mut epochs = vec![ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -201,12 +203,28 @@ pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: EPOCH_2_5_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: { + if use_nakamoto { + EPOCH_3_0_HEIGHT + } else { + STACKS_EPOCH_MAX + } + }, block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_5, }, ]; + if use_nakamoto { + epochs.push(StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: EPOCH_3_0_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_3_0, + }); + } + let mut pox_constants = PoxConstants::mainnet_default(); pox_constants.reward_cycle_length = 5; pox_constants.prepare_length = 2; @@ -230,7 +248,7 @@ fn pox_extend_transition() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -890,7 +908,7 @@ fn pox_lock_unlock() { // Config for this test // We are going to try locking for 2 reward cycles (10 blocks) let lock_period = 2; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1064,7 +1082,7 @@ fn pox_3_defunct() { // Config for this test // We are going to try locking for 2 reward cycles (10 blocks) let lock_period = 2; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1199,7 +1217,7 @@ fn pox_3_unlocks() { // Config for this test // We are going to try locking for 4 reward cycles (20 blocks) let lock_period = 4; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1350,7 +1368,7 @@ fn pox_3_unlocks() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1739,7 +1757,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2166,7 +2184,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2398,7 +2416,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() #[test] fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2519,7 +2537,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { #[test] fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2638,7 +2656,7 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { #[test] fn pox_4_delegate_stack_increase_events() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2744,7 +2762,7 @@ fn pox_4_delegate_stack_increase_events() { #[test] fn pox_4_revoke_delegate_stx_events() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2982,7 +3000,7 @@ fn verify_signer_key_sig( #[test] fn verify_signer_key_signatures() { - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -3278,8 +3296,8 @@ fn verify_signer_key_signatures() { fn stack_stx_verify_signer_sig() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -3598,8 +3616,8 @@ fn stack_stx_verify_signer_sig() { fn stack_extend_verify_sig() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -3851,8 +3869,8 @@ fn stack_extend_verify_sig() { fn stack_agg_commit_verify_sig() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -4662,8 +4680,8 @@ fn stack_agg_increase() { fn stack_increase_verify_signer_key() { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -4950,8 +4968,8 @@ fn stack_increase_verify_signer_key() { fn stack_increase_different_signer_keys() { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -5133,11 +5151,22 @@ fn balances_from_keys( .collect() } -#[test] -fn stack_stx_signer_key() { +#[rstest] +#[case(true)] +#[case(false)] +fn stack_stx_signer_key(#[case] use_nakamoto: bool) { let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); + + info!("--- starting stack-stx test ---"); let stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5147,6 +5176,7 @@ fn stack_stx_signer_key() { let signer_key_val = Value::buff_from(signer_public_key.to_bytes_compressed()).unwrap(); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); + info!("Reward cycle: {reward_cycle}"); // (define-public (stack-stx (amount-ustx uint) // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) @@ -5181,7 +5211,15 @@ fn stack_stx_signer_key() { ], )]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + // let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &txs, + &mut coinbase_nonce, + &mut test_signers, + use_nakamoto, + ); + // peer.make_nakamoto_tenure(tenure_change, coinbase, signers, block_builder) let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5209,13 +5247,20 @@ fn stack_stx_signer_key() { .block_height_to_reward_cycle(block_height) .unwrap(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| { + entry.reward_address == PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap() + }) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), &signer_public_key.to_bytes_compressed().as_slice(), @@ -5226,8 +5271,8 @@ fn stack_stx_signer_key() { /// Test `stack-stx` using signer key authorization fn stack_stx_signer_auth() { let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5334,8 +5379,8 @@ fn stack_stx_signer_auth() { fn stack_agg_commit_signer_auth() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -5455,8 +5500,8 @@ fn stack_agg_commit_signer_auth() { fn stack_extend_signer_auth() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -5561,8 +5606,8 @@ fn stack_extend_signer_auth() { fn test_set_signer_key_auth() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -5768,8 +5813,8 @@ fn test_set_signer_key_auth() { #[test] fn stack_extend_signer_key() { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), None, false); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5880,8 +5925,8 @@ fn stack_extend_signer_key() { #[test] fn delegate_stack_stx_signer_key() { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), None, false); let stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5997,8 +6042,8 @@ fn delegate_stack_stx_signer_key() { #[test] fn delegate_stack_stx_extend_signer_key() { let lock_period: u128 = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), None, false); let alice_nonce = 0; let alice_stacker_key = &keys[0]; @@ -6189,8 +6234,8 @@ fn delegate_stack_stx_extend_signer_key() { fn stack_increase() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut alice_nonce = 0; let alice_stacking_private_key = &keys[0]; @@ -6332,8 +6377,8 @@ fn stack_increase() { fn delegate_stack_increase() { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let alice_nonce = 0; let alice_key = &keys[0]; @@ -8066,8 +8111,8 @@ fn test_scenario_four() { fn delegate_stack_increase_err() { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let alice_nonce = 0; let alice_key = &keys[0]; @@ -8358,6 +8403,7 @@ pub fn get_stacking_minimum(peer: &mut TestPeer, latest_block: &StacksBlockId) - pub fn prepare_pox4_test<'a>( test_name: &str, observer: Option<&'a TestEventObserver>, + use_nakamoto: bool, ) -> ( Burnchain, TestPeer<'a>, @@ -8365,8 +8411,9 @@ pub fn prepare_pox4_test<'a>( StacksBlockId, u64, usize, + TestSigners, ) { - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(use_nakamoto); let mut burnchain = Burnchain::default_unittest( 0, @@ -8377,33 +8424,228 @@ pub fn prepare_pox4_test<'a>( let (mut peer, keys) = instantiate_pox_peer_with_epoch(&burnchain, test_name, Some(epochs.clone()), observer); - assert_eq!(burnchain.pox_constants.reward_slots(), 6); - let mut coinbase_nonce = 0; + if use_nakamoto { + let test_key = keys[3].clone(); + let test_keys = vec![test_key.clone()]; + + let private_key = StacksPrivateKey::from_seed(&[2]); + let test_signers = TestSigners::new(test_keys.clone()); + let test_stackers = test_keys + .iter() + .map(|key| TestStacker { + signer_private_key: key.clone(), + stacker_private_key: key.clone(), + // amount: u64::MAX as u128 - 10000, + // amount: 2048000 * POX_THRESHOLD_STEPS_USTX * 2, + amount: 1024 * POX_THRESHOLD_STEPS_USTX, + pox_addr: Some(pox_addr_from(&key)), + max_amount: None, + }) + .collect::>(); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 41; + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_test_stackers(test_stackers) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.add_default_balance = false; + let addrs: Vec = keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + + let balances: Vec<(PrincipalData, u64)> = addrs + .clone() + .into_iter() + .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) + .collect(); + boot_plan.initial_balances = balances; + boot_plan.pox_constants = pox_constants.clone(); + burnchain.pox_constants = pox_constants.clone(); + + info!("---- Booting into Nakamoto Peer ----"); + let peer = boot_plan.boot_into_nakamoto_peer(vec![], observer); + // let mut blocks = vec![]; + let sort_db = peer.sortdb.as_ref().unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + let coinbase_nonce = 0; - // Advance into pox4 - let target_height = burnchain.pox_constants.pox_4_activation_height; - let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { - assert_latest_was_burn(&mut peer); - } - } + let block_height = get_tip(peer.sortdb.as_ref()).block_height; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + info!("Block height: {}", block_height); - info!("Block height: {}", block_height); + ( + burnchain, + peer, + keys, + latest_block, + block_height, + coinbase_nonce, + test_signers, + ) + } else { + // assert_eq!(burnchain.pox_constants.reward_slots(), 6); - ( - burnchain, - peer, - keys, - latest_block, - block_height, - coinbase_nonce, - ) + // Advance into pox4 + let target_height = burnchain.pox_constants.pox_4_activation_height; + let mut coinbase_nonce = 0; + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if we reach epoch 2.1, perform the check + if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + assert_latest_was_burn(&mut peer); + } + } + let block_height = get_tip(peer.sortdb.as_ref()).block_height; + ( + burnchain, + peer, + keys, + latest_block, + block_height, + coinbase_nonce, + TestSigners::new(vec![]), + ) + // let target_epoch = if use_nakamoto { + // StacksEpochId::Epoch30 + // } else { + // StacksEpochId::Epoch25 + // }; + // let height_25 = epochs + // .iter() + // .find(|e| e.epoch_id == StacksEpochId::Epoch25) + // .unwrap() + // .start_height; + // // let height_25 = epochs.iter().find(|e| e.epoch_id == StacksEpochId::Epoch25).unwrap().start_height; + // let target_height = epochs + // .iter() + // .find(|e| e.epoch_id == target_epoch) + // .unwrap() + // .start_height; + // let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if use_nakamoto { + // // Go to 2.5, stack, then 3.0 + // while get_tip(peer.sortdb.as_ref()).block_height < height_25 { + // latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // } + + // let tip = get_tip(peer.sortdb.as_ref()); + // let reward_cycle = peer.get_reward_cycle() as u128; + + // let min_ustx = with_sortdb(&mut peer, |chainstate, sortdb| { + // chainstate.get_stacking_minimum(sortdb, &latest_block) + // }) + // .unwrap(); + // info!("Building stacking txs"); + // // Make all the test Stackers stack + // let stack_txs: Vec<_> = test_stackers + // .clone() + // .iter() + // .map(|test_stacker| { + // info!( + // "Making PoX-4 lockup for {}; {}", + // test_stacker.amount, + // test_stacker.amount > min_ustx + // ); + // let pox_addr = test_stacker.pox_addr.clone().unwrap(); + // let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); + // let signature = make_signer_key_signature( + // &pox_addr, + // &test_stacker.signer_private_key, + // reward_cycle.into(), + // &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, + // 12_u128, + // max_amount, + // 1, + // ); + // make_pox_4_lockup( + // &test_stacker.stacker_private_key, + // 0, + // test_stacker.amount, + // &pox_addr, + // 12, + // &StacksPublicKey::from_private(&test_stacker.signer_private_key), + // tip.block_height, + // Some(signature), + // max_amount, + // 1, + // ) + // }) + // .collect(); + // latest_block = peer.tenure_with_txs(&stack_txs, &mut coinbase_nonce); + // } + // while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + // latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // // if we reach epoch 2.1, perform the check + // // if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + // // assert_latest_was_burn(&mut peer); + // // } + // } + } +} + +pub fn tenure_with_txs( + peer: &mut TestPeer, + txs: &[StacksTransaction], + coinbase_nonce: &mut usize, + test_signers: &mut TestSigners, + use_nakamoto: bool, +) -> StacksBlockId { + if use_nakamoto { + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + test_signers, + |_miner, _chainstate, _sort_dbconn, _blocks| { + info!("Building nakamoto block. Blocks len {}", _blocks.len()); + if _blocks.len() == 0 { + txs.to_vec() + } else { + vec![] + } + }, + ); + let blocks: Vec<_> = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + // let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + // .unwrap() + // .unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + // let tip = StacksBlockId:: + latest_block + } else { + peer.tenure_with_txs(txs, coinbase_nonce) + } } + pub fn get_last_block_sender_transactions( observer: &TestEventObserver, address: StacksAddress, @@ -8434,7 +8676,7 @@ fn missed_slots_no_unlock() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, mut pox_constants) = make_test_epochs_pox(); + let (epochs, mut pox_constants) = make_test_epochs_pox(false); pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; let mut burnchain = Burnchain::default_unittest( @@ -8685,7 +8927,7 @@ fn no_lockups_2_5() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, mut pox_constants) = make_test_epochs_pox(); + let (epochs, mut pox_constants) = make_test_epochs_pox(false); pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; let mut burnchain = Burnchain::default_unittest( diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index 37b2e016b7..bf3b5f312c 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -171,7 +171,8 @@ fn make_signer_sanity_panic_1() { #[test] fn signers_get_config() { - let (burnchain, mut peer, keys, latest_block, ..) = prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, ..) = + prepare_pox4_test(function_name!(), None, false); assert_eq!( readonly_call( diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 05477bb08c..462a9fcb97 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -89,6 +89,8 @@ pub struct NakamotoBootPlan { pub test_signers: TestSigners, pub observer: Option, pub num_peers: usize, + /// Whether to add an initial balance for `private_key`'s account + pub add_default_balance: bool, } impl NakamotoBootPlan { @@ -103,6 +105,7 @@ impl NakamotoBootPlan { test_signers, observer: Some(TestEventObserver::new()), num_peers: 0, + add_default_balance: true, } } @@ -347,8 +350,12 @@ impl NakamotoBootPlan { + 1) .into(), )); - peer_config.initial_balances = - vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.initial_balances = vec![]; + if self.add_default_balance { + peer_config + .initial_balances + .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); + } peer_config .initial_balances .append(&mut self.initial_balances.clone()); @@ -467,6 +474,17 @@ impl NakamotoBootPlan { .block_height_to_reward_cycle(sortition_height.into()) .unwrap(); + let sortdb = peer.sortdb(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip_index_block = tip.get_canonical_stacks_block_id(); + + let min_ustx = with_sortdb(peer, |chainstate, sortdb| { + chainstate.get_stacking_minimum(sortdb, &tip_index_block) + }) + .unwrap(); + + info!("Minimum USTX for stacking: {}", min_ustx); + // Make all the test Stackers stack let stack_txs: Vec<_> = peer .config @@ -475,6 +493,11 @@ impl NakamotoBootPlan { .unwrap_or(vec![]) .iter() .map(|test_stacker| { + info!( + "Making PoX-4 lockup for {}; {}", + test_stacker.amount, + test_stacker.amount > min_ustx + ); let pox_addr = test_stacker .pox_addr .clone() From de2e88e38e15a4715ef41eae3b6a030a800756d5 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 13 Aug 2024 13:04:38 -0700 Subject: [PATCH 0780/1400] fix: remove comments & debugging log --- .../src/chainstate/stacks/boot/pox_4_tests.rs | 80 ------------------- stackslib/src/net/tests/mod.rs | 16 ---- 2 files changed, 96 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index db890316ec..779da57e3c 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -8435,8 +8435,6 @@ pub fn prepare_pox4_test<'a>( .map(|key| TestStacker { signer_private_key: key.clone(), stacker_private_key: key.clone(), - // amount: u64::MAX as u128 - 10000, - // amount: 2048000 * POX_THRESHOLD_STEPS_USTX * 2, amount: 1024 * POX_THRESHOLD_STEPS_USTX, pox_addr: Some(pox_addr_from(&key)), max_amount: None, @@ -8466,7 +8464,6 @@ pub fn prepare_pox4_test<'a>( info!("---- Booting into Nakamoto Peer ----"); let peer = boot_plan.boot_into_nakamoto_peer(vec![], observer); - // let mut blocks = vec![]; let sort_db = peer.sortdb.as_ref().unwrap(); let latest_block = sort_db .index_handle_at_tip() @@ -8489,8 +8486,6 @@ pub fn prepare_pox4_test<'a>( test_signers, ) } else { - // assert_eq!(burnchain.pox_constants.reward_slots(), 6); - // Advance into pox4 let target_height = burnchain.pox_constants.pox_4_activation_height; let mut coinbase_nonce = 0; @@ -8512,81 +8507,6 @@ pub fn prepare_pox4_test<'a>( coinbase_nonce, TestSigners::new(vec![]), ) - // let target_epoch = if use_nakamoto { - // StacksEpochId::Epoch30 - // } else { - // StacksEpochId::Epoch25 - // }; - // let height_25 = epochs - // .iter() - // .find(|e| e.epoch_id == StacksEpochId::Epoch25) - // .unwrap() - // .start_height; - // // let height_25 = epochs.iter().find(|e| e.epoch_id == StacksEpochId::Epoch25).unwrap().start_height; - // let target_height = epochs - // .iter() - // .find(|e| e.epoch_id == target_epoch) - // .unwrap() - // .start_height; - // let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // if use_nakamoto { - // // Go to 2.5, stack, then 3.0 - // while get_tip(peer.sortdb.as_ref()).block_height < height_25 { - // latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // } - - // let tip = get_tip(peer.sortdb.as_ref()); - // let reward_cycle = peer.get_reward_cycle() as u128; - - // let min_ustx = with_sortdb(&mut peer, |chainstate, sortdb| { - // chainstate.get_stacking_minimum(sortdb, &latest_block) - // }) - // .unwrap(); - // info!("Building stacking txs"); - // // Make all the test Stackers stack - // let stack_txs: Vec<_> = test_stackers - // .clone() - // .iter() - // .map(|test_stacker| { - // info!( - // "Making PoX-4 lockup for {}; {}", - // test_stacker.amount, - // test_stacker.amount > min_ustx - // ); - // let pox_addr = test_stacker.pox_addr.clone().unwrap(); - // let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); - // let signature = make_signer_key_signature( - // &pox_addr, - // &test_stacker.signer_private_key, - // reward_cycle.into(), - // &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, - // 12_u128, - // max_amount, - // 1, - // ); - // make_pox_4_lockup( - // &test_stacker.stacker_private_key, - // 0, - // test_stacker.amount, - // &pox_addr, - // 12, - // &StacksPublicKey::from_private(&test_stacker.signer_private_key), - // tip.block_height, - // Some(signature), - // max_amount, - // 1, - // ) - // }) - // .collect(); - // latest_block = peer.tenure_with_txs(&stack_txs, &mut coinbase_nonce); - // } - // while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - // latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // // if we reach epoch 2.1, perform the check - // // if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { - // // assert_latest_was_burn(&mut peer); - // // } - // } } } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 462a9fcb97..de07c60140 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -474,17 +474,6 @@ impl NakamotoBootPlan { .block_height_to_reward_cycle(sortition_height.into()) .unwrap(); - let sortdb = peer.sortdb(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - let tip_index_block = tip.get_canonical_stacks_block_id(); - - let min_ustx = with_sortdb(peer, |chainstate, sortdb| { - chainstate.get_stacking_minimum(sortdb, &tip_index_block) - }) - .unwrap(); - - info!("Minimum USTX for stacking: {}", min_ustx); - // Make all the test Stackers stack let stack_txs: Vec<_> = peer .config @@ -493,11 +482,6 @@ impl NakamotoBootPlan { .unwrap_or(vec![]) .iter() .map(|test_stacker| { - info!( - "Making PoX-4 lockup for {}; {}", - test_stacker.amount, - test_stacker.amount > min_ustx - ); let pox_addr = test_stacker .pox_addr .clone() From 0fa2d24c1672fb4fb546817acd3a9043434ff83f Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 13 Aug 2024 16:43:39 -0700 Subject: [PATCH 0781/1400] feat: update majority of pox4 tests to use nakamoto case --- .../src/chainstate/stacks/boot/pox_4_tests.rs | 395 ++++++++++++------ 1 file changed, 271 insertions(+), 124 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 779da57e3c..362ac3670a 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -97,6 +97,14 @@ pub fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() } +/// Helper rstest template for running tests in both 2.5 +/// and 3.0 epochs. +#[template] +#[rstest] +#[case::epoch_30(true)] +#[case::epoch_25(false)] +fn nakamoto_cases(#[case] use_nakamoto: bool) {} + fn make_simple_pox_4_lock( key: &StacksPrivateKey, peer: &mut TestPeer, @@ -4676,12 +4684,12 @@ fn stack_agg_increase() { assert_eq!(bob_aggregate_commit_reward_index, &Value::UInt(1)); } -#[test] -fn stack_increase_verify_signer_key() { +#[apply(nakamoto_cases)] +fn stack_increase_verify_signer_key(use_nakamoto: bool) { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -4919,7 +4927,8 @@ fn stack_increase_verify_signer_key() { 1, ); - let latest_block = peer.tenure_with_txs( + let latest_block = tenure_with_txs( + &mut peer, &[ stack_tx, invalid_cycle_tx, @@ -4933,6 +4942,7 @@ fn stack_increase_verify_signer_key() { stack_increase, ], &mut coinbase_nonce, + &mut test_signers, ); let txs = get_last_block_sender_transactions(&observer, stacker_addr); @@ -4961,15 +4971,15 @@ fn stack_increase_verify_signer_key() { .expect("Expected ok result from tx"); } -#[test] +#[apply(nakamoto_cases)] /// Verify that when calling `stack-increase`, the function /// fails if the signer key for each cycle being updated is not the same /// as the provided `signer-key` argument -fn stack_increase_different_signer_keys() { +fn stack_increase_different_signer_keys(use_nakamoto: bool) { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5054,8 +5064,12 @@ fn stack_increase_different_signer_keys() { 1, ); - let latest_block = - peer.tenure_with_txs(&[stack_tx, extend_tx, stack_increase], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[stack_tx, extend_tx, stack_increase], + &mut coinbase_nonce, + &mut test_signers, + ); let txs = get_last_block_sender_transactions(&observer, stacker_addr.clone()); @@ -5151,10 +5165,8 @@ fn balances_from_keys( .collect() } -#[rstest] -#[case(true)] -#[case(false)] -fn stack_stx_signer_key(#[case] use_nakamoto: bool) { +#[apply(nakamoto_cases)] +fn stack_stx_signer_key(use_nakamoto: bool) { let observer = TestEventObserver::new(); let ( burnchain, @@ -5212,13 +5224,7 @@ fn stack_stx_signer_key(#[case] use_nakamoto: bool) { )]; // let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); - let latest_block = tenure_with_txs( - &mut peer, - &txs, - &mut coinbase_nonce, - &mut test_signers, - use_nakamoto, - ); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); // peer.make_nakamoto_tenure(tenure_change, coinbase, signers, block_builder) let stacking_state = get_stacking_state_pox_4( &mut peer, @@ -5267,12 +5273,19 @@ fn stack_stx_signer_key(#[case] use_nakamoto: bool) { ); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-stx` using signer key authorization -fn stack_stx_signer_auth() { +fn stack_stx_signer_auth(use_nakamoto: bool) { let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5336,7 +5349,7 @@ fn stack_stx_signer_auth() { let txs = vec![failed_stack_tx, enable_auth_tx, valid_stack_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5374,13 +5387,13 @@ fn stack_stx_signer_auth() { assert_eq!(enable_tx_result, Value::okay_true()); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-aggregation-commit` using signer key authorization -fn stack_agg_commit_signer_auth() { +fn stack_agg_commit_signer_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5479,7 +5492,7 @@ fn stack_agg_commit_signer_auth() { valid_agg_tx, ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_txs = get_last_block_sender_transactions(&observer, delegate_addr); @@ -5494,14 +5507,14 @@ fn stack_agg_commit_signer_auth() { .expect("Expected ok result from stack-agg-commit tx"); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-extend` using signer key authorization /// instead of signatures -fn stack_extend_signer_auth() { +fn stack_extend_signer_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5585,7 +5598,7 @@ fn stack_extend_signer_auth() { let txs = vec![stack_tx, invalid_cycle_tx, enable_auth_tx, valid_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_txs = get_last_block_sender_transactions(&observer, stacker_addr); @@ -5601,13 +5614,13 @@ fn stack_extend_signer_auth() { .expect("Expected ok result from stack-extend tx"); } -#[test] +#[apply(nakamoto_cases)] /// Test `set-signer-key-authorization` function -fn test_set_signer_key_auth() { +fn test_set_signer_key_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5684,7 +5697,8 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs( + let latest_block = tenure_with_txs( + &mut peer, &[ invalid_enable_tx, invalid_tx_period, @@ -5692,6 +5706,7 @@ fn test_set_signer_key_auth() { disable_auth_tx, ], &mut coinbase_nonce, + &mut test_signers, ); let alice_txs = get_last_block_sender_transactions(&observer, alice_addr); @@ -5761,7 +5776,12 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs(&[enable_auth_tx], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[enable_auth_tx], + &mut coinbase_nonce, + &mut test_signers, + ); let signer_key_enabled = get_signer_key_authorization_pox_4( &mut peer, @@ -5793,7 +5813,12 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs(&[disable_auth_tx], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[disable_auth_tx], + &mut coinbase_nonce, + &mut test_signers, + ); let signer_key_enabled = get_signer_key_authorization_pox_4( &mut peer, @@ -5810,11 +5835,18 @@ fn test_set_signer_key_auth() { assert_eq!(signer_key_enabled.unwrap(), false); } -#[test] -fn stack_extend_signer_key() { +#[apply(nakamoto_cases)] +fn stack_extend_signer_key(use_nakamoto: bool) { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), None, false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5864,7 +5896,7 @@ fn stack_extend_signer_key() { stacker_nonce += 1; - let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let mut latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let signature = make_signer_key_signature( &pox_addr, @@ -5887,7 +5919,12 @@ fn stack_extend_signer_key() { 1, )]; - latest_block = peer.tenure_with_txs(&update_txs, &mut coinbase_nonce); + latest_block = tenure_with_txs( + &mut peer, + &update_txs, + &mut coinbase_nonce, + &mut test_signers, + ); let new_stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5900,21 +5937,35 @@ fn stack_extend_signer_key() { let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let extend_cycle_ht = burnchain.reward_cycle_to_block_height(extend_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), signer_bytes.as_slice(),); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address + &reward_entry.signer.unwrap(), + signer_extend_bytes.as_slice(), ); assert_eq!( &reward_entry.signer.unwrap(), @@ -5922,11 +5973,18 @@ fn stack_extend_signer_key() { ); } -#[test] -fn delegate_stack_stx_signer_key() { +#[apply(nakamoto_cases)] +fn delegate_stack_stx_signer_key(use_nakamoto: bool) { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), None, false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let stacker_nonce = 0; let stacker_key = &keys[0]; @@ -6000,7 +6058,7 @@ fn delegate_stack_stx_signer_key() { ), ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegation_state = get_delegation_state_pox_4( &mut peer, @@ -6019,13 +6077,18 @@ fn delegate_stack_stx_signer_key() { .expect_tuple(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), signer_key.to_bytes_compressed().as_slice() @@ -6039,11 +6102,18 @@ fn delegate_stack_stx_signer_key() { // // This test asserts that the signing key in Alice's stacking state // is equal to Bob's 'new' signer key. -#[test] -fn delegate_stack_stx_extend_signer_key() { +#[apply(nakamoto_cases)] +fn delegate_stack_stx_extend_signer_key(use_nakamoto: bool) { let lock_period: u128 = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), None, false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let alice_nonce = 0; let alice_stacker_key = &keys[0]; @@ -6094,7 +6164,7 @@ fn delegate_stack_stx_extend_signer_key() { // Both are pox_4 helpers found in mod.rs let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegation_state = get_delegation_state_pox_4( &mut peer, @@ -6199,7 +6269,7 @@ fn delegate_stack_stx_extend_signer_key() { // Next tx arr calls a delegate_stack_extend pox_4 helper found in mod.rs let txs = vec![delegate_stack_extend, agg_tx_0, agg_tx_1]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let new_stacking_state = get_stacking_state_pox_4(&mut peer, &latest_block, &alice_principal) .unwrap() .expect_tuple(); @@ -6207,16 +6277,32 @@ fn delegate_stack_stx_extend_signer_key() { let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let extend_cycle_ht = burnchain.reward_cycle_to_block_height(extend_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), signer_bytes.as_slice(),); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), signer_extend_bytes.as_slice(), @@ -6230,12 +6316,19 @@ fn delegate_stack_stx_extend_signer_key() { // // This test asserts that Alice's total-locked is equal to // twice the stacking minimum after calling stack-increase. -#[test] -fn stack_increase() { +#[apply(nakamoto_cases)] +fn stack_increase(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut alice_nonce = 0; let alice_stacking_private_key = &keys[0]; @@ -6243,6 +6336,7 @@ fn stack_increase() { let signing_sk = StacksPrivateKey::from_seed(&[1]); let signing_pk = StacksPublicKey::from_private(&signing_sk); let signing_bytes = signing_pk.to_bytes_compressed(); + let alice_balance = get_balance(&mut peer, &alice_address.into()); let min_ustx = get_stacking_minimum(&mut peer, &latest_block); let pox_addr = PoxAddress::from_legacy( @@ -6278,7 +6372,7 @@ fn stack_increase() { // Initial tx arr includes a stack_stx pox_4 helper found in mod.rs let txs = vec![stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -6310,7 +6404,7 @@ fn stack_increase() { ); // Next tx arr includes a stack_increase pox_4 helper found in mod.rs let txs = vec![stack_increase]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_transactions = get_last_block_sender_transactions(&observer, alice_address); let actual_result = stacker_transactions.first().cloned().unwrap().result; @@ -6342,12 +6436,29 @@ fn stack_increase() { ("auth-id", Value::UInt(1)), ]); + let alice_expected_balance = alice_balance - min_ustx; + + // Compute the expected unlock height because the 3.0 and 2.5 cases + // have different PoX constants + let cycle_len = burnchain.pox_constants.reward_cycle_length as u128; + let unlock_cycle = burnchain + .pox_constants + .block_height_to_reward_cycle( + 0, + ((block_height as u128) + ((lock_period + 1) * cycle_len)) + .try_into() + .unwrap(), + ) + .unwrap(); + let expected_unlock_height = + unlock_cycle * (burnchain.pox_constants.reward_cycle_length as u64); + let common_data = PoxPrintFields { op_name: "stack-increase".to_string(), stacker: Value::Principal(PrincipalData::from(alice_address.clone())), - balance: Value::UInt(10234866375000), - locked: Value::UInt(5133625000), - burnchain_unlock_height: Value::UInt(125), + balance: Value::UInt(alice_expected_balance), + locked: Value::UInt(min_ustx), + burnchain_unlock_height: Value::UInt(expected_unlock_height as u128), }; check_pox_print_event(&increase_event, common_data, increase_op_data); @@ -6360,10 +6471,18 @@ fn stack_increase() { .block_height_to_reward_cycle(block_height) .unwrap(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), &signing_bytes.as_slice()); } @@ -6373,12 +6492,19 @@ fn stack_increase() { // // This test asserts that Alice's total-locked is equal to // twice the stacking minimum after calling delegate-stack-increase. -#[test] -fn delegate_stack_increase() { +#[apply(nakamoto_cases)] +fn delegate_stack_increase(use_nakamoto: bool) { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let alice_nonce = 0; let alice_key = &keys[0]; @@ -6425,7 +6551,7 @@ fn delegate_stack_increase() { // Initial tx arr includes a delegate_stx & delegate_stack_stx pox_4 helper found in mod.rs let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); bob_nonce += 1; @@ -6464,7 +6590,7 @@ fn delegate_stack_increase() { // Next tx arr includes a delegate_increase pox_4 helper found in mod.rs let txs = vec![delegate_increase, agg_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_transactions = get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); @@ -6489,9 +6615,18 @@ fn delegate_stack_increase() { // test that the reward set contains the increased amount and the expected key let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(pox_addr, reward_entry.reward_address); assert_eq!(min_ustx * 2, reward_entry.amount_stacked); assert_eq!(&reward_entry.signer.unwrap(), signer_pk_bytes.as_slice()); @@ -8107,12 +8242,19 @@ fn test_scenario_four() { // In this test case, Alice delegates twice the stacking minimum to Bob. // Bob stacks Alice's funds, and then immediately tries to stacks-aggregation-increase. // This should return a clarity user error. -#[test] -fn delegate_stack_increase_err() { +#[apply(nakamoto_cases)] +fn delegate_stack_increase_err(use_nakamoto: bool) { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let alice_nonce = 0; let alice_key = &keys[0]; @@ -8158,7 +8300,7 @@ fn delegate_stack_increase_err() { let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); bob_nonce += 1; @@ -8172,13 +8314,15 @@ fn delegate_stack_increase_err() { 1, ); + let slot_idx = if use_nakamoto { 1 } else { 0 }; + // Bob's Aggregate Increase let bobs_aggregate_increase = make_pox_4_aggregation_increase( &bob_delegate_key, bob_nonce, &pox_addr, next_reward_cycle.into(), - 0, + slot_idx, Some(signature), &signer_pk, u128::MAX, @@ -8187,7 +8331,7 @@ fn delegate_stack_increase_err() { let txs = vec![bobs_aggregate_increase]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_transactions = get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); @@ -8202,7 +8346,11 @@ fn delegate_stack_increase_err() { // test that the reward set is empty let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert!(reward_set.is_empty()); + if use_nakamoto { + assert_eq!(reward_set.len(), 1); + } else { + assert!(reward_set.is_empty()); + } } pub fn get_stacking_state_pox_4( @@ -8411,7 +8559,7 @@ pub fn prepare_pox4_test<'a>( StacksBlockId, u64, usize, - TestSigners, + Option, ) { let (epochs, pox_constants) = make_test_epochs_pox(use_nakamoto); @@ -8483,7 +8631,7 @@ pub fn prepare_pox4_test<'a>( latest_block, block_height, coinbase_nonce, - test_signers, + Some(test_signers), ) } else { // Advance into pox4 @@ -8505,7 +8653,7 @@ pub fn prepare_pox4_test<'a>( latest_block, block_height, coinbase_nonce, - TestSigners::new(vec![]), + None, ) } } @@ -8514,10 +8662,9 @@ pub fn tenure_with_txs( peer: &mut TestPeer, txs: &[StacksTransaction], coinbase_nonce: &mut usize, - test_signers: &mut TestSigners, - use_nakamoto: bool, + test_signers: &mut Option, ) -> StacksBlockId { - if use_nakamoto { + if let Some(test_signers) = test_signers { let (burn_ops, mut tenure_change, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); From f746932617340c1c1b57a9a6e46864326329f128 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 14 Aug 2024 16:58:21 -0700 Subject: [PATCH 0782/1400] wip: updating pox4_scenario_tests --- stackslib/src/chainstate/nakamoto/mod.rs | 6 + .../src/chainstate/nakamoto/test_signers.rs | 6 + .../src/chainstate/stacks/boot/pox_4_tests.rs | 430 ++++++++++++++---- 3 files changed, 355 insertions(+), 87 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d059a96cb6..a9065a27fc 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -835,6 +835,12 @@ impl NakamotoBlockHeader { public_key_bytes.copy_from_slice(&public_key.to_bytes_compressed()[..]); let (signer, signer_index) = signers_by_pk.get(&public_key_bytes).ok_or_else(|| { + warn!( + "Found an invalid public key. Reward set has {} signers. Chain length {}. Signatures length {}", + signers.len(), + self.chain_length, + self.signer_signature.len(), + ); ChainstateError::InvalidStacksBlock(format!( "Public key {} not found in the reward set", public_key.to_hex() diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 13d7f2ff1e..4ab7613751 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -324,6 +324,12 @@ impl TestSigners { .map(|s| s.signing_key.to_vec()) .collect::>(); + info!( + "TestSigners: Signing Nakamoto block. TestSigners has {} signers. Reward set has {} signers.", + test_signers_by_pk.len(), + reward_set_keys.len(), + ); + let mut signatures = Vec::with_capacity(reward_set_keys.len()); let mut missing_keys = 0; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 362ac3670a..f879479d20 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -4244,12 +4244,24 @@ fn advance_to_block_height( txs: &[StacksTransaction], peer_nonce: &mut usize, target_height: u64, -) -> (StacksBlockId, TestEventObserverBlock) { + test_signers: &mut Option, +) -> ( + StacksBlockId, + TestEventObserverBlock, + Vec, +) { let mut tx_block = None; let mut latest_block = None; let mut passed_txs = txs; while peer.get_burn_block_height() < target_height { - latest_block = Some(peer.tenure_with_txs(&passed_txs, peer_nonce)); + info!( + "Advancing to block height: {} from {} with {} txs", + target_height, + peer.get_burn_block_height(), + passed_txs.len() + ); + // latest_block = Some(peer.tenure_with_txs(&passed_txs, peer_nonce)); + latest_block = Some(tenure_with_txs(peer, &passed_txs, peer_nonce, test_signers)); passed_txs = &[]; if tx_block.is_none() { tx_block = Some(observer.get_blocks().last().unwrap().clone()); @@ -4257,7 +4269,13 @@ fn advance_to_block_height( } let latest_block = latest_block.expect("Failed to get tip"); let tx_block = tx_block.expect("Failed to get tx block"); - (latest_block, tx_block) + let tx_block_receipts = if test_signers.is_some() { + tx_block.receipts[1..].to_vec() // remove TenureChange + } else { + tx_block.receipts.clone() + }; + // let tx_block_receipts = tx_block.receipts[2..].to_vec(); + (latest_block, tx_block, tx_block_receipts) } #[test] @@ -4456,12 +4474,14 @@ fn stack_agg_increase() { // Advance to next block in order to collect aggregate commit reward index target_height += 1; - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &txs, &mut peer_nonce, target_height.into(), + &mut None, + // Some(&mut test_signers), ); // Get Bob's aggregate commit reward index @@ -4602,12 +4622,14 @@ fn stack_agg_increase() { // Advance to next block in order to attempt aggregate increase target_height += 1; - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &txs, &mut peer_nonce, target_height.into(), + // &mut test_signers, + &mut None, ); // Fetch the error aggregate increase result & check that the err is ERR_INVALID_SIGNER_KEY @@ -6636,6 +6658,7 @@ pub fn pox_4_scenario_test_setup<'a>( test_name: &str, observer: &'a TestEventObserver, initial_balances: Vec<(PrincipalData, u64)>, + use_nakamoto: bool, ) -> ( TestPeer<'a>, usize, @@ -6643,9 +6666,12 @@ pub fn pox_4_scenario_test_setup<'a>( u128, u128, u128, - u128, TestPeerConfig, + Option, ) { + if use_nakamoto { + return pox_4_scenario_test_setup_nakamoto(test_name, observer, initial_balances); + } // Setup code extracted from your original test let test_signers = TestSigners::new(vec![]); let aggregate_public_key = test_signers.aggregate_public_key.clone(); @@ -6702,20 +6728,132 @@ pub fn pox_4_scenario_test_setup<'a>( peer, peer_nonce, burn_block_height, - target_height as u128, reward_cycle as u128, next_reward_cycle as u128, min_ustx as u128, peer_config.clone(), + None, + ) +} + +pub fn pox_4_scenario_test_setup_nakamoto<'a>( + test_name: &str, + observer: &'a TestEventObserver, + initial_balances: Vec<(PrincipalData, u64)>, +) -> ( + TestPeer<'a>, + usize, + u64, + u128, + u128, + u128, + TestPeerConfig, + Option, +) { + let (epochs, pox_constants) = make_test_epochs_pox(true); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let (peer, keys) = instantiate_pox_peer_with_epoch( + &burnchain, + test_name, + Some(epochs.clone()), + Some(observer), + ); + + let test_key = keys[3].clone(); + let test_keys = vec![test_key.clone()]; + let test_addr = key_to_stacks_addr(&test_key); + let test_signers = TestSigners::new(vec![test_key.clone()]); + let aggregate_public_key = test_signers.aggregate_public_key.clone(); + + let private_key = StacksPrivateKey::from_seed(&[2]); + let test_signers = TestSigners::new(test_keys.clone()); + let addrs: Vec = test_keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + let initial_stacker_balance = initial_balances + .get(0) + .expect("Expected at least 1 initial balance") + .1; + let test_stackers = vec![TestStacker { + signer_private_key: test_key.clone(), + stacker_private_key: test_key.clone(), + amount: initial_stacker_balance as u128, + pox_addr: Some(pox_addr_from(&test_key)), + max_amount: None, + }]; + let mut peer_config = TestPeerConfig::default(); + peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); + let mut pox_constants = peer_config.clone().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 41; + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_test_stackers(test_stackers) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.add_default_balance = false; + + // let balances: Vec<(PrincipalData, u64)> = addrs + // .clone() + // .into_iter() + // .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) + // .collect(); + boot_plan.initial_balances = initial_balances; + boot_plan.pox_constants = pox_constants.clone(); + burnchain.pox_constants = pox_constants.clone(); + + info!("---- Booting into Nakamoto Peer ----"); + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], Some(observer)); + let sort_db = peer.sortdb.as_ref().unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + let coinbase_nonce = 0; + + let burn_block_height = get_tip(peer.sortdb.as_ref()).block_height; + // let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap() as u128; + let min_ustx = get_stacking_minimum(&mut peer, &latest_block); + + info!("Block height: {}", burn_block_height); + + // ( + // burnchain, + // peer, + // keys, + // latest_block, + // block_height, + // coinbase_nonce, + // Some(test_signers), + // ) + ( + peer, + coinbase_nonce, + burn_block_height, + reward_cycle as u128, + reward_cycle.wrapping_add(1), + min_ustx as u128, + peer_config.clone(), + Some(test_signers), ) } +#[apply(nakamoto_cases)] // In this test two solo stacker-signers Alice & Bob sign & stack // for two reward cycles. Alice provides a signature, Bob uses // 'set-signer-key-authorizations' to authorize. Two cycles later, // when no longer stacked, they both try replaying their auths. -#[test] -fn test_scenario_one() { +fn test_scenario_one(use_nakamoto: bool) { // Alice solo stacker-signer setup let mut alice = StackerSignerInfo::new(); // Bob solo stacker-signer setup @@ -6731,12 +6869,24 @@ fn test_scenario_one() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_one", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_one", + &observer, + initial_balances, + use_nakamoto, + ); + + // Add alice and bob to test_signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } // Alice Signatures let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -6863,8 +7013,14 @@ fn test_scenario_one() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Alice stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -6880,9 +7036,26 @@ fn test_scenario_one() { assert_eq!(first_reward_cycle, next_reward_cycle); assert_eq!(pox_address, bob.pox_address); + info!("Got {} receipts", receipts.clone().len()); + + for receipt in receipts.clone() { + info!("Receipt: {:?}", receipt); + } + + let signer_keys_len = test_signers + .clone() + .map(|t| t.signer_keys.len()) + .unwrap_or(0); + // let signer_keys_len = if let Some(ref test_signers) = test_signers { + // test_signers.signer_keys.len() + // } else { + // 0 + // }; + + info!("Test signers now has {} keys", signer_keys_len); + // 1. Check bob's low authorization transaction - let bob_tx_result_low = tx_block - .receipts + let bob_tx_result_low = receipts .get(1) .unwrap() .result @@ -6892,8 +7065,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_low, Value::Bool(true)); // 2. Check bob's expected authorization transaction - let bob_tx_result_ok = tx_block - .receipts + let bob_tx_result_ok = receipts .get(2) .unwrap() .result @@ -6903,8 +7075,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_ok, Value::Bool(true)); // 3. Check alice's low stack transaction - let alice_tx_result_err = tx_block - .receipts + let alice_tx_result_err = receipts .get(3) .unwrap() .result @@ -6914,8 +7085,7 @@ fn test_scenario_one() { assert_eq!(alice_tx_result_err, Value::Int(38)); // Get alice's expected stack transaction - let alice_tx_result_ok = tx_block - .receipts + let alice_tx_result_ok = receipts .get(4) .unwrap() .result @@ -6958,8 +7128,7 @@ fn test_scenario_one() { assert_eq!(unlock_height_expected, unlock_height_actual); // 5. Check bob's error stack transaction - let bob_tx_result_err = tx_block - .receipts + let bob_tx_result_err = receipts .get(5) .unwrap() .result @@ -6969,8 +7138,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_err, Value::Int(38)); // Get bob's expected stack transaction - let bob_tx_result_ok = tx_block - .receipts + let bob_tx_result_ok = receipts .get(6) .unwrap() .result @@ -7044,14 +7212,30 @@ fn test_scenario_one() { bob.nonce += 1; let txs = vec![alice_vote, bob_vote]; - let target_reward_cycle = 8; + let target_reward_cycle = next_reward_cycle; // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let mut target_height = peer .config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + info!("Submitting block with vote transactions"); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height + 1, + &mut test_signers, + ); + info!("Submitting empty block."); + let (latest_block, _tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &vec![], + &mut peer_nonce, + target_height + 2, + &mut test_signers, + ); let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) .expect("No approved key found"); @@ -7089,12 +7273,17 @@ fn test_scenario_one() { // Commit replay txs & advance to the second burn block of reward cycle 8 (block 162) target_height += 1; - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice replay, expect (err 35) - ERR_INVALID_SIGNATURE_PUBKEY - let alice_replay_result = tx_block - .receipts + let alice_replay_result = receipts .get(1) .unwrap() .result @@ -7104,8 +7293,7 @@ fn test_scenario_one() { assert_eq!(alice_replay_result, Value::Int(35)); // Check Bob replay, expect (err 19) - ERR_SIGNER_AUTH_USED - let bob_tx_result = tx_block - .receipts + let bob_tx_result = receipts .get(2) .unwrap() .result @@ -7115,11 +7303,11 @@ fn test_scenario_one() { assert_eq!(bob_tx_result, Value::Int(19)); } +#[apply(nakamoto_cases)] // In this test two solo service signers, Alice & Bob, provide auth // for Carl & Dave, solo stackers. Alice provides a signature for Carl, // Bob uses 'set-signer-key...' for Dave. -#[test] -fn test_scenario_two() { +fn test_scenario_two(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -7141,12 +7329,17 @@ fn test_scenario_two() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_two", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_two", + &observer, + initial_balances, + use_nakamoto, + ); // Alice Signature For Carl let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -7250,8 +7443,14 @@ fn test_scenario_two() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Carl Stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -7266,8 +7465,7 @@ fn test_scenario_two() { assert_eq!(pox_address, dave.pox_address); // Check Carl's malformed signature stack transaction (err 35 - INVALID_SIGNATURE_PUBKEY) - let carl_tx_result_err = tx_block - .receipts + let carl_tx_result_err = receipts .get(2) .unwrap() .result @@ -7277,8 +7475,7 @@ fn test_scenario_two() { assert_eq!(carl_tx_result_err, Value::Int(35)); // Check Carl's expected stack transaction - let carl_tx_result_ok = tx_block - .receipts + let carl_tx_result_ok = receipts .get(3) .unwrap() .result @@ -7307,8 +7504,7 @@ fn test_scenario_two() { assert_eq!(signer_key_expected, signer_key_actual); // Check Dave's malformed pox stack transaction (err 19 - INVALID_SIGNER_AUTH) - let dave_tx_result_err = tx_block - .receipts + let dave_tx_result_err = receipts .get(4) .unwrap() .result @@ -7318,8 +7514,7 @@ fn test_scenario_two() { assert_eq!(dave_tx_result_err, Value::Int(19)); // Check Dave's expected stack transaction - let dave_tx_result_ok = tx_block - .receipts + let dave_tx_result_ok = receipts .get(5) .unwrap() .result @@ -7408,18 +7603,23 @@ fn test_scenario_two() { bob_vote_expected, ]; - let target_reward_cycle = 8; + let target_reward_cycle = next_reward_cycle; // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let target_height = peer .config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's expected vote - let alice_expected_vote = tx_block - .receipts + let alice_expected_vote = receipts .get(1) .unwrap() .result @@ -7429,8 +7629,7 @@ fn test_scenario_two() { assert_eq!(alice_expected_vote, Value::Bool(true)); // Check Alice's duplicate vote (err 15 - DUPLICATE_ROUND) - let alice_duplicate_vote = tx_block - .receipts + let alice_duplicate_vote = receipts .get(2) .unwrap() .result @@ -7440,8 +7639,7 @@ fn test_scenario_two() { assert_eq!(alice_duplicate_vote, Value::UInt(15)); // Check Bob's round err vote (err 17 - INVALID_ROUND) - let bob_round_err_vote = tx_block - .receipts + let bob_round_err_vote = receipts .get(3) .unwrap() .result @@ -7451,8 +7649,7 @@ fn test_scenario_two() { assert_eq!(bob_round_err_vote, Value::UInt(17)); // Check Bob's expected vote - let bob_expected_vote = tx_block - .receipts + let bob_expected_vote = receipts .get(4) .unwrap() .result @@ -7462,10 +7659,10 @@ fn test_scenario_two() { assert_eq!(bob_expected_vote, Value::Bool(true)); } +#[apply(nakamoto_cases)] // In this scenario, two solo stacker-signers (Alice, Bob), one service signer (Carl), // one stacking pool operator (Dave), & three pool stackers (Eve, Frank, Grace). -#[test] -fn test_scenario_three() { +fn test_scenario_three(use_nakamoto: bool) { // Alice stacker signer setup let mut alice = StackerSignerInfo::new(); // Bob stacker signer setup @@ -7496,12 +7693,17 @@ fn test_scenario_three() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_three", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_three", + &observer, + initial_balances, + use_nakamoto, + ); let lock_period = 2; let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -7743,8 +7945,14 @@ fn test_scenario_three() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Start of test checks // 1. Check that Alice can't stack with an lock_period different than signature @@ -7937,11 +8145,11 @@ fn test_scenario_three() { assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(2)); } +#[apply(nakamoto_cases)] // In this test scenario two solo stacker-signers (Alice & Bob), // test out the updated stack-extend & stack-increase functions // across multiple cycles. -#[test] -fn test_scenario_four() { +fn test_scenario_four(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -7957,12 +8165,17 @@ fn test_scenario_four() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_four", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_four", + &observer, + initial_balances, + use_nakamoto, + ); // Initial Alice Signature let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -8044,8 +8257,14 @@ fn test_scenario_four() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Alice Stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -8115,8 +8334,14 @@ fn test_scenario_four() { .burnchain .reward_cycle_to_block_height(7 as u64) .wrapping_add(15); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's err vote (err 10 - INVALID_SIGNER_INDEX) let alice_err_vote = tx_block @@ -8199,8 +8424,14 @@ fn test_scenario_four() { alice_vote_expected_err.clone(), ]; let target_height = target_height.wrapping_add(1); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's err stack-extend tx (err 35 - INVALID_SIGNATURE_PUBKEY) let alice_err_extend = tx_block @@ -9092,8 +9323,8 @@ fn no_lockups_2_5() { // 5. Carl stx-stacks & self-signs for 3 reward cycle // 6. In Carl's second reward cycle, he calls stx-extend for 3 more reward cycles // 7. In Carl's third reward cycle, he calls stx-increase and should fail as he is straddling 2 keys -#[test] -fn test_scenario_five() { +#[apply(nakamoto_cases)] +fn test_scenario_five(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -9136,12 +9367,17 @@ fn test_scenario_five() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, mut peer_config, - ) = pox_4_scenario_test_setup("test_scenario_five", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_five", + &observer, + initial_balances, + use_nakamoto, + ); // Lock periods for each stacker let carl_lock_period = 3; @@ -9400,8 +9636,14 @@ fn test_scenario_five() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check that all of David's stackers have been added to the reward set for (stacker, stacker_lock_period) in davids_stackers { @@ -9481,12 +9723,13 @@ fn test_scenario_five() { .config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &vote_txs, &mut peer_nonce, target_height, + &mut test_signers, ); let mut observed_txs = HashSet::new(); @@ -9587,8 +9830,14 @@ fn test_scenario_five() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check that all of David's stackers are stacked for (stacker, stacker_lock_period) in davids_stackers { @@ -9673,12 +9922,13 @@ fn test_scenario_five() { .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); // Submit vote transactions - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &vote_txs, &mut peer_nonce, target_height, + &mut test_signers, ); let mut observed_txs = HashSet::new(); @@ -9787,8 +10037,14 @@ fn test_scenario_five() { (heidi.clone(), heidi_lock_period), ]; - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); for (stacker, _) in davids_stackers { let (pox_address, first_reward_cycle, _lock_period, _indices) = From fb4a2fa63eebbd3b5566612e4fc0907a80e724d7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 14 Aug 2024 18:20:03 -0700 Subject: [PATCH 0783/1400] feat: update (most) pox4 scenario tests --- stackslib/src/burnchains/mod.rs | 2 +- stackslib/src/chainstate/burn/db/sortdb.rs | 10 +- stackslib/src/chainstate/coordinator/mod.rs | 4 +- stackslib/src/chainstate/coordinator/tests.rs | 1 - .../chainstate/nakamoto/coordinator/mod.rs | 10 +- .../src/chainstate/stacks/boot/pox_4_tests.rs | 166 +++++++++++------- stackslib/src/net/api/getstackers.rs | 2 +- 7 files changed, 117 insertions(+), 78 deletions(-) diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 07a2f73c10..0bc68897cb 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -517,7 +517,7 @@ impl PoxConstants { } } - /// The first block of the prepare phase during `reward_cycle`. This is the prepare phase _for the next cycle_. + /// The first block of the prepare phase during `reward_cycle`. This is the prepare phase _for the next cycle_. pub fn prepare_phase_start(&self, first_block_height: u64, reward_cycle: u64) -> u64 { let reward_cycle_start = self.reward_cycle_to_block_height(first_block_height, reward_cycle); diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 909ea46b9f..3cf13a8a55 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3908,11 +3908,11 @@ impl<'a> SortitionDBConn<'a> { tip: &SortitionId, reward_cycle_id: u64, ) -> Result { - let reward_cycle_of_prepare_phase = reward_cycle_id.checked_sub(1).ok_or_else(|| db_error::Other("No prepare phase exists for cycle 0".into()))?; - let prepare_phase_start = pox_constants.prepare_phase_start( - first_block_height, - reward_cycle_of_prepare_phase, - ); + let reward_cycle_of_prepare_phase = reward_cycle_id + .checked_sub(1) + .ok_or_else(|| db_error::Other("No prepare phase exists for cycle 0".into()))?; + let prepare_phase_start = + pox_constants.prepare_phase_start(first_block_height, reward_cycle_of_prepare_phase); let first_sortition = get_ancestor_sort_id(self, prepare_phase_start, tip)?.ok_or_else(|| { diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 60d8699686..72e44f981c 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -299,7 +299,6 @@ pub trait RewardSetProvider { &self, chainstate: &mut StacksChainState, cycle: u64, - burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result; @@ -374,11 +373,10 @@ impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider &self, chainstate: &mut StacksChainState, reward_cycle: u64, - burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { - self.read_reward_set_nakamoto(chainstate, reward_cycle, burnchain, sortdb, block_id, false) + self.read_reward_set_nakamoto(chainstate, reward_cycle, sortdb, block_id, false) } } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 81167c6462..50127af176 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -522,7 +522,6 @@ impl RewardSetProvider for StubbedRewardSetProvider { &self, chainstate: &mut StacksChainState, cycle: u64, - burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 31549d22b0..de884a8d9c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -90,7 +90,6 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { &self, chainstate: &mut StacksChainState, cycle: u64, - burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, debug_log: bool, @@ -547,13 +546,8 @@ pub fn load_nakamoto_reward_set( "cycle_start_height" => %cycle_start_height, "burnchain_height" => %anchor_block_sn.block_height); - let reward_set = provider.get_reward_set_nakamoto( - chain_state, - reward_cycle, - burnchain, - sort_db, - &block_id, - )?; + let reward_set = + provider.get_reward_set_nakamoto(chain_state, reward_cycle, sort_db, &block_id)?; debug!( "Stacks anchor block (ch {}) {} cycle {} is processed", &anchor_block_header.consensus_hash, &block_id, reward_cycle; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index f879479d20..f7f0f21116 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -7210,30 +7210,45 @@ fn test_scenario_one(use_nakamoto: bool) { next_reward_cycle, ); bob.nonce += 1; - let txs = vec![alice_vote, bob_vote]; + let mut txs = vec![alice_vote, bob_vote]; + + // Also vote for aggregate key with default test signer if in Nakamoto: + if let Some(test_signers) = test_signers.clone() { + let tester_key = test_signers.signer_keys[0]; + let tester_addr = key_to_stacks_addr(&tester_key); + let tester_index = get_signer_index( + &mut peer, + latest_block, + tester_addr.clone(), + next_reward_cycle, + ); + let tester_vote = make_signers_vote_for_aggregate_public_key( + &tester_key, + 1, // only tx is a stack-stx + tester_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + txs.push(tester_vote); + } - let target_reward_cycle = next_reward_cycle; + let target_reward_cycle = next_reward_cycle + 1; // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let mut target_height = peer .config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); - info!("Submitting block with vote transactions"); + info!( + "Submitting block with vote transactions and advancing to reward cycle {} at block {}", + target_reward_cycle, target_height + ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &txs, &mut peer_nonce, - target_height + 1, - &mut test_signers, - ); - info!("Submitting empty block."); - let (latest_block, _tx_block, _receipts) = advance_to_block_height( - &mut peer, - &observer, - &vec![], - &mut peer_nonce, - target_height + 2, + target_height, &mut test_signers, ); @@ -7250,7 +7265,7 @@ fn test_scenario_one(use_nakamoto: bool) { &alice.pox_address, lock_period, &alice.public_key, - 161, + target_height, Some(alice_signature.clone()), u128::MAX, 1, @@ -7264,7 +7279,7 @@ fn test_scenario_one(use_nakamoto: bool) { &bob.pox_address, lock_period, &bob.public_key, - 161, + target_height, None, u128::MAX, 3, @@ -7303,10 +7318,10 @@ fn test_scenario_one(use_nakamoto: bool) { assert_eq!(bob_tx_result, Value::Int(19)); } -#[apply(nakamoto_cases)] // In this test two solo service signers, Alice & Bob, provide auth // for Carl & Dave, solo stackers. Alice provides a signature for Carl, // Bob uses 'set-signer-key...' for Dave. +#[apply(nakamoto_cases)] fn test_scenario_two(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); @@ -7341,6 +7356,13 @@ fn test_scenario_two(use_nakamoto: bool) { use_nakamoto, ); + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } + // Alice Signature For Carl let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; let lock_period = 1; @@ -7705,6 +7727,15 @@ fn test_scenario_three(use_nakamoto: bool) { use_nakamoto, ); + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers.signer_keys.extend(vec![ + alice.private_key.clone(), + bob.private_key.clone(), + carl.private_key.clone(), + ]); + } + let lock_period = 2; let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; let alice_signature_for_alice_err = make_signer_key_signature( @@ -7945,7 +7976,7 @@ fn test_scenario_three(use_nakamoto: bool) { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block, _receipts) = advance_to_block_height( + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -7956,8 +7987,7 @@ fn test_scenario_three(use_nakamoto: bool) { // Start of test checks // 1. Check that Alice can't stack with an lock_period different than signature - let alice_stack_tx_err = tx_block - .receipts + let alice_stack_tx_err = receipts .get(1) .unwrap() .result @@ -7967,8 +7997,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(alice_stack_tx_err, Value::Int(35)); // 2. Check that Alice can solo stack-sign - let alice_stack_tx_ok = tx_block - .receipts + let alice_stack_tx_ok = receipts .get(2) .unwrap() .result @@ -7997,8 +8026,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(signer_key_expected, signer_key_actual); // 3. Check that Bob can't stack with a signature that points to a reward cycle in the past - let bob_stack_tx_err = tx_block - .receipts + let bob_stack_tx_err = receipts .get(3) .unwrap() .result @@ -8008,8 +8036,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(bob_stack_tx_err, Value::Int(35)); // 4. Check that Bob can solo stack-sign - let bob_stack_tx_ok = tx_block - .receipts + let bob_stack_tx_ok = receipts .get(4) .unwrap() .result @@ -8030,8 +8057,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(signer_key_actual, signer_key_actual); // 5. Check that David can't delegate-stack-stx Eve if delegation expires during lock period - let eve_delegate_stx_to_david_err = tx_block - .receipts + let eve_delegate_stx_to_david_err = receipts .get(9) .unwrap() .result @@ -8041,8 +8067,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(eve_delegate_stx_to_david_err, Value::Int(21)); // 6. Check that Frank is correctly delegated to David - let frank_delegate_stx_to_david_tx = tx_block - .receipts + let frank_delegate_stx_to_david_tx = receipts .get(10) .unwrap() .result @@ -8071,8 +8096,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(stacker_expected, stacker_actual); // 7. Check that Grace is correctly delegated to David - let grace_delegate_stx_to_david_tx = tx_block - .receipts + let grace_delegate_stx_to_david_tx = receipts .get(11) .unwrap() .result @@ -8101,8 +8125,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(stacker_expected, stacker_actual); // 8. Check that Alice can't delegate-stack if already stacking - let alice_delegate_stx_to_david_err = tx_block - .receipts + let alice_delegate_stx_to_david_err = receipts .get(12) .unwrap() .result @@ -8112,8 +8135,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(alice_delegate_stx_to_david_err, Value::Int(3)); // 9. Check that David can't aggregate-commit-indexed if pointing to a reward cycle in the future - let david_aggregate_commit_indexed_err = tx_block - .receipts + let david_aggregate_commit_indexed_err = receipts .get(13) .unwrap() .result @@ -8123,8 +8145,7 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); // 10. Check that David can aggregate-commit-indexed if using the incorrect signature topic - let david_aggregate_commit_indexed_err = tx_block - .receipts + let david_aggregate_commit_indexed_err = receipts .get(14) .unwrap() .result @@ -8133,16 +8154,17 @@ fn test_scenario_three(use_nakamoto: bool) { .unwrap(); assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); + let david_index = if use_nakamoto { 3 } else { 2 }; + // 11. Check that David can aggregate-commit-indexed successfully, checking stacking index = 2 - let david_aggregate_commit_indexed_ok = tx_block - .receipts + let david_aggregate_commit_indexed_ok = receipts .get(15) .unwrap() .result .clone() .expect_result_ok() .unwrap(); - assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(2)); + assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(david_index)); } #[apply(nakamoto_cases)] @@ -8177,6 +8199,13 @@ fn test_scenario_four(use_nakamoto: bool) { use_nakamoto, ); + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } + // Initial Alice Signature let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; let lock_period = 2; @@ -8193,17 +8222,21 @@ fn test_scenario_four(use_nakamoto: bool) { let alice_signature_extend_err = make_signer_key_signature( &bob.pox_address, &bob.private_key, - next_reward_cycle.wrapping_add(1), + next_reward_cycle, &Pox4SignatureTopic::StackExtend, lock_period, u128::MAX, 1, ); + info!( + "Generating stack-extend signature for cycle {}", + next_reward_cycle + ); // Extend Alice Signature Expected let alice_signature_extend = make_signer_key_signature( &alice.pox_address, &alice.private_key, - next_reward_cycle.wrapping_add(1), + next_reward_cycle, &Pox4SignatureTopic::StackExtend, lock_period, u128::MAX, @@ -8322,19 +8355,40 @@ fn test_scenario_four(use_nakamoto: bool) { next_reward_cycle, ); bob.nonce += 1; - let txs = vec![ + let mut txs = vec![ alice_vote_err.clone(), alice_vote_expected.clone(), bob_vote_expected.clone(), ]; + // Also vote for aggregate key with default test signer if in Nakamoto: + if let Some(test_signers) = test_signers.clone() { + let tester_key = test_signers.signer_keys[0]; + let tester_addr = key_to_stacks_addr(&tester_key); + let tester_index = get_signer_index( + &mut peer, + latest_block, + tester_addr.clone(), + next_reward_cycle, + ); + let tester_vote = make_signers_vote_for_aggregate_public_key( + &tester_key, + 1, // only tx is a stack-stx + tester_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + txs.push(tester_vote); + } + // Commit vote txs & move to the prepare phase of reward cycle 7 (block 155) let target_height = peer .config .burnchain - .reward_cycle_to_block_height(7 as u64) - .wrapping_add(15); - let (latest_block, tx_block, _receipts) = advance_to_block_height( + .reward_cycle_to_block_height(next_reward_cycle as u64 + 1) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64); + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -8344,8 +8398,7 @@ fn test_scenario_four(use_nakamoto: bool) { ); // Check Alice's err vote (err 10 - INVALID_SIGNER_INDEX) - let alice_err_vote = tx_block - .receipts + let alice_err_vote = receipts .get(1) .unwrap() .result @@ -8355,8 +8408,7 @@ fn test_scenario_four(use_nakamoto: bool) { assert_eq!(alice_err_vote, Value::UInt(10)); // Check Alice's expected vote - let alice_expected_vote = tx_block - .receipts + let alice_expected_vote = receipts .get(2) .unwrap() .result @@ -8366,8 +8418,7 @@ fn test_scenario_four(use_nakamoto: bool) { assert_eq!(alice_expected_vote, Value::Bool(true)); // Check Bob's expected vote - let bob_expected_vote = tx_block - .receipts + let bob_expected_vote = receipts .get(3) .unwrap() .result @@ -8424,7 +8475,7 @@ fn test_scenario_four(use_nakamoto: bool) { alice_vote_expected_err.clone(), ]; let target_height = target_height.wrapping_add(1); - let (latest_block, tx_block, _receipts) = advance_to_block_height( + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -8434,8 +8485,7 @@ fn test_scenario_four(use_nakamoto: bool) { ); // Check Alice's err stack-extend tx (err 35 - INVALID_SIGNATURE_PUBKEY) - let alice_err_extend = tx_block - .receipts + let alice_err_extend = receipts .get(1) .unwrap() .result @@ -8445,8 +8495,7 @@ fn test_scenario_four(use_nakamoto: bool) { assert_eq!(alice_err_extend, Value::Int(35)); // Check Alice's stack-extend tx - let alice_extend_receipt = tx_block - .receipts + let alice_extend_receipt = receipts .get(2) .unwrap() .result @@ -8455,8 +8504,7 @@ fn test_scenario_four(use_nakamoto: bool) { .unwrap(); // Check Alice's expected err vote (err 14 - DUPLICATE_AGGREGATE_PUBLIC_KEY) - let alice_expected_vote_err = tx_block - .receipts + let alice_expected_vote_err = receipts .get(3) .unwrap() .result diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 0b494d19a0..1ee61e853a 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -106,7 +106,7 @@ impl GetStackersResponse { let provider = OnChainRewardSetProvider::new(); let stacker_set = provider - .read_reward_set_nakamoto(chainstate, cycle_number, burnchain, sortdb, tip, true) + .read_reward_set_nakamoto(chainstate, cycle_number, sortdb, tip, true) .map_err(GetStackersErrors::NotAvailableYet)?; Ok(Self { stacker_set }) From 993d55b2d97044018f9e01771cf415860c1a7f85 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 09:54:32 -0400 Subject: [PATCH 0784/1400] WIP: missing stackerdb messages Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 4 ++- stackslib/src/net/stackerdb/mod.rs | 2 +- testnet/stacks-node/src/neon_node.rs | 32 ++++++++++----------- testnet/stacks-node/src/tests/signer/v0.rs | 33 ++++++++++++---------- 4 files changed, 37 insertions(+), 34 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 117a8c4912..f51d7965de 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -88,7 +88,9 @@ MinerSlotID { /// Block proposal from the miner BlockProposal = 0, /// Block pushed from the miner - BlockPushed = 1 + BlockPushed = 1, + /// Mock Miner Message from the miner + MockMinerMessage = 2 }); impl MessageSlotIDTrait for MessageSlotID { diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 847363b2e3..bfbb6e0a10 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -152,7 +152,7 @@ pub const STACKERDB_MAX_PAGE_COUNT: u32 = 2; pub const STACKERDB_SLOTS_FUNCTION: &str = "stackerdb-get-signer-slots"; pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; -pub const MINER_SLOT_COUNT: u32 = 2; +pub const MINER_SLOT_COUNT: u32 = 3; /// Final result of synchronizing state with a remote set of DB replicas #[derive(Clone)] diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 43eb114414..19d8bb966f 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2288,11 +2288,8 @@ impl BlockMinerThread { let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); let mut miners_stackerdb = StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); - let slot_id = MinerSlotID::BlockProposal.to_u8().into(); - if let Ok(messages) = - miners_stackerdb.get_latest_chunks(&[slot_id, slot_id * MINER_SLOT_COUNT]) - { - debug!("Miner got messages: {:?}", messages.len()); + let miner_slot_ids: Vec<_> = (0..MINER_SLOT_COUNT * 2).collect(); + if let Ok(messages) = miners_stackerdb.get_latest_chunks(&miner_slot_ids) { for message in messages { if let Some(message) = message { if message.is_empty() { @@ -2303,7 +2300,7 @@ impl BlockMinerThread { else { continue; }; - if miner_message.peer_info.burn_block_height == self.burn_block.block_height { + if miner_message.tenure_burn_block_height == self.burn_block.block_height { debug!( "Already sent mock miner message for tenure burn block height {:?}", self.burn_block.block_height @@ -2368,15 +2365,6 @@ impl BlockMinerThread { server_version, }; - info!("Sending mock miner message in response to mock signatures for burn block {:?}", &self.burn_block.block_height; - "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), - "stacks_tip" => ?peer_info.stacks_tip.clone(), - "peer_burn_block_height" => peer_info.burn_block_height, - "pox_consensus" => ?peer_info.pox_consensus.clone(), - "server_version" => peer_info.server_version.clone(), - "chain_id" => self.config.burnchain.chain_id, - "num_mock_signatures" => mock_signatures.len(), - ); let message = MockMinerMessage { peer_info, chain_id: self.config.burnchain.chain_id, @@ -2384,13 +2372,23 @@ impl BlockMinerThread { tenure_burn_block_height: self.burn_block.block_height, }; + info!("Sending mock miner message in response to mock signatures for burn block {:?}", message.tenure_burn_block_height; + "stacks_tip_consensus_hash" => ?message.peer_info.stacks_tip_consensus_hash.clone(), + "stacks_tip" => ?message.peer_info.stacks_tip.clone(), + "peer_burn_block_height" => message.peer_info.burn_block_height, + "pox_consensus" => ?message.peer_info.pox_consensus.clone(), + "server_version" => message.peer_info.server_version.clone(), + "chain_id" => message.chain_id, + "num_mock_signatures" => message.mock_signatures.len(), + ); + if let Err(e) = SignCoordinator::send_miners_message( &miner_config.mining_key.expect("BUG: no mining key"), &burn_db, &self.burn_block, &stackerdbs, - SignerMessage::MockMinerMessage(message), - MinerSlotID::BlockProposal, // We are sending a mock miner message NOT a block proposal, but since we do not propose blocks in epoch 2.5, it is fine + SignerMessage::MockMinerMessage(message.clone()), + MinerSlotID::MockMinerMessage, self.config.is_mainnet(), &mut miners_stackerdb, &self.burn_block.consensus_hash, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9034a8a523..0f94898244 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, + BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; @@ -2408,6 +2408,7 @@ fn mock_miner_message_epoch_25() { .unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; let epoch_3_start_height = epoch_3.start_height; + debug!("Epoch 3.0 starts at height {}", epoch_3_start_height); signer_test.boot_to_epoch_25_reward_cycle(); @@ -2447,20 +2448,22 @@ fn mock_miner_message_epoch_25() { }) .flatten() { - if chunk.slot_id == MinerSlotID::BlockProposal.to_u8() as u32 { - if chunk.data.is_empty() { - continue; - } - let SignerMessage::MockMinerMessage(message) = - SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize MockMinerMessage") - else { - continue; - }; - if message.peer_info.burn_block_height == current_burn_block_height { - mock_miner_message = Some(message); - break; - } + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockMinerMessage(message) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage") + else { + continue; + }; + if message.tenure_burn_block_height == current_burn_block_height { + mock_miner_message = Some(message); + break; + } else { + info!( + "Received MockMinerMessage for burn block height {} but expected {current_burn_block_height}", message.tenure_burn_block_height + ); } } assert!( From d93d07da0068e92da97226ae0a52d086be0d1ac8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 10:31:40 -0400 Subject: [PATCH 0785/1400] WIP: use latest election winner to send mock miner messages Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 19d8bb966f..b88ec5ffe2 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2381,6 +2381,15 @@ impl BlockMinerThread { "chain_id" => message.chain_id, "num_mock_signatures" => message.mock_signatures.len(), ); + let (_, miners_info) = + NakamotoChainState::make_miners_stackerdb_config(&burn_db, &self.burn_block)?; + + // find out which slot we're in. If we are not the latest sortition winner, we should not be sending anymore messages anyway + let idx = miners_info.get_latest_winner_index(); + let sortitions = miners_info.get_sortitions(); + let election_sortition = *sortitions + .get(idx as usize) + .expect("FATAL: latest winner index out of bounds"); if let Err(e) = SignCoordinator::send_miners_message( &miner_config.mining_key.expect("BUG: no mining key"), @@ -2391,7 +2400,7 @@ impl BlockMinerThread { MinerSlotID::MockMinerMessage, self.config.is_mainnet(), &mut miners_stackerdb, - &self.burn_block.consensus_hash, + &election_sortition, ) { warn!("Failed to send mock miner message: {:?}", &e); } From b597a119cab6c014e59cf4abd80d707ad5eff269 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 11:42:07 -0400 Subject: [PATCH 0786/1400] WIP: stuck at 250 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 0f94898244..2fe0cf934c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2422,10 +2422,6 @@ fn mock_miner_message_epoch_25() { .get_headers_height() < epoch_3_start_height { - let current_burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); let mock_poll_time = Instant::now(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, @@ -2433,6 +2429,10 @@ fn mock_miner_message_epoch_25() { || Ok(true), ) .unwrap(); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); while mock_miner_message.is_none() { From 37a2533fcae14f2d61655940925e1ae2f60f5c2b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 12:32:58 -0400 Subject: [PATCH 0787/1400] WIP: need to fix stacks tip consensus hash and stacks tip Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2fe0cf934c..74790bd15d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2407,8 +2407,7 @@ fn mock_miner_message_epoch_25() { .clone() .unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_start_height = epoch_3.start_height; - debug!("Epoch 3.0 starts at height {}", epoch_3_start_height); + let epoch_3_boundary = epoch_3.start_height - 1; signer_test.boot_to_epoch_25_reward_cycle(); @@ -2416,11 +2415,12 @@ fn mock_miner_message_epoch_25() { let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); let main_poll_time = Instant::now(); let mut mock_miner_message = None; + // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. while signer_test .running_nodes .btc_regtest_controller .get_headers_height() - < epoch_3_start_height + < epoch_3_boundary { let mock_poll_time = Instant::now(); next_block_and( From be1f6ed8c2d1e5cc905d5e6fe76ec71db0794bc3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 13:44:59 -0400 Subject: [PATCH 0788/1400] Fix consensus hash and stacks tip in MockMinerMessage Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 6 ---- testnet/stacks-node/src/neon_node.rs | 36 +++++++++++++--------- testnet/stacks-node/src/tests/signer/v0.rs | 6 +--- 3 files changed, 23 insertions(+), 25 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index f51d7965de..40a679d0f8 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -459,8 +459,6 @@ impl StacksMessageCodec for MockSignature { pub struct MockMinerMessage { /// The view of the stacks node peer information at the time of the mock signature pub peer_info: PeerInfo, - /// The burn block height of the miner's tenure - pub tenure_burn_block_height: u64, /// The chain id for the mock signature pub chain_id: u32, /// The mock signatures that the miner received @@ -470,7 +468,6 @@ pub struct MockMinerMessage { impl StacksMessageCodec for MockMinerMessage { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { self.peer_info.consensus_serialize(fd)?; - write_next(fd, &self.tenure_burn_block_height)?; write_next(fd, &self.chain_id)?; write_next(fd, &self.mock_signatures)?; Ok(()) @@ -478,12 +475,10 @@ impl StacksMessageCodec for MockMinerMessage { fn consensus_deserialize(fd: &mut R) -> Result { let peer_info = PeerInfo::consensus_deserialize(fd)?; - let tenure_burn_block_height = read_next::(fd)?; let chain_id = read_next::(fd)?; let mock_signatures = read_next::, _>(fd)?; Ok(Self { peer_info, - tenure_burn_block_height, chain_id, mock_signatures, }) @@ -1002,7 +997,6 @@ mod test { }; let mock_miner_message = MockMinerMessage { peer_info: random_peer_data(), - tenure_burn_block_height: thread_rng().next_u64(), chain_id: thread_rng().gen_range(0..=1), mock_signatures: vec![mock_signature_1, mock_signature_2], }; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b88ec5ffe2..c4880b3980 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -194,6 +194,7 @@ use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs, MINER_S use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; +use stacks::types::StacksEpoch; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks::{monitoring, version_string}; use stacks_common::codec::StacksMessageCodec; @@ -2273,12 +2274,20 @@ impl BlockMinerThread { let burn_db_path = self.config.get_burn_db_file_path(); let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let p2p_net = StacksNode::setup_peer_network( - &self.config, - &self.config.atlas, - self.burnchain.clone(), - ); - let epoch_id = p2p_net.get_current_epoch().epoch_id; + let epochs = SortitionDB::get_stacks_epochs(burn_db.conn()) + .expect("Error while loading stacks epochs"); + let epoch_index = StacksEpoch::find_epoch(&epochs, self.burn_block.block_height) + .unwrap_or_else(|| { + panic!( + "BUG: block {} is not in a known epoch", + self.burn_block.block_height + ) + }); + let epoch_id = epochs + .get(epoch_index) + .expect("BUG: no epoch at found index") + .epoch_id; + if epoch_id != StacksEpochId::Epoch25 { debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; "epoch_id" => epoch_id.to_string() @@ -2300,7 +2309,7 @@ impl BlockMinerThread { else { continue; }; - if miner_message.tenure_burn_block_height == self.burn_block.block_height { + if miner_message.peer_info.burn_block_height == self.burn_block.block_height { debug!( "Already sent mock miner message for tenure burn block height {:?}", self.burn_block.block_height @@ -2350,11 +2359,11 @@ impl BlockMinerThread { .or(option_env!("CARGO_PKG_VERSION")) .unwrap_or("0.0.0.0"), ); - let stacks_tip_height = p2p_net.stacks_tip.height; - let stacks_tip = p2p_net.stacks_tip.block_hash.clone(); - let stacks_tip_consensus_hash = p2p_net.stacks_tip.consensus_hash.clone(); - let pox_consensus = p2p_net.burnchain_tip.consensus_hash.clone(); - let burn_block_height = p2p_net.chain_view.burn_block_height; + let stacks_tip_height = self.burn_block.canonical_stacks_tip_height; + let stacks_tip = self.burn_block.canonical_stacks_tip_hash; + let stacks_tip_consensus_hash = self.burn_block.canonical_stacks_tip_consensus_hash; + let pox_consensus = self.burn_block.consensus_hash; + let burn_block_height = self.burn_block.block_height; let peer_info = PeerInfo { burn_block_height, @@ -2369,10 +2378,9 @@ impl BlockMinerThread { peer_info, chain_id: self.config.burnchain.chain_id, mock_signatures, - tenure_burn_block_height: self.burn_block.block_height, }; - info!("Sending mock miner message in response to mock signatures for burn block {:?}", message.tenure_burn_block_height; + info!("Sending mock miner message in response to mock signatures for burn block {:?}", message.peer_info.burn_block_height; "stacks_tip_consensus_hash" => ?message.peer_info.stacks_tip_consensus_hash.clone(), "stacks_tip" => ?message.peer_info.stacks_tip.clone(), "peer_burn_block_height" => message.peer_info.burn_block_height, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 74790bd15d..631d92c83c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2457,13 +2457,9 @@ fn mock_miner_message_epoch_25() { else { continue; }; - if message.tenure_burn_block_height == current_burn_block_height { + if message.peer_info.burn_block_height == current_burn_block_height { mock_miner_message = Some(message); break; - } else { - info!( - "Received MockMinerMessage for burn block height {} but expected {current_burn_block_height}", message.tenure_burn_block_height - ); } } assert!( From 666119458c246822a7dca276b87fd0ddbb87ac24 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 13:49:49 -0400 Subject: [PATCH 0789/1400] CRC: get sort db from SortitionDB Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 4 +--- stackslib/src/net/stackerdb/mod.rs | 2 +- testnet/stacks-node/src/neon_node.rs | 18 +++--------------- 3 files changed, 5 insertions(+), 19 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 40a679d0f8..779497b196 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -88,9 +88,7 @@ MinerSlotID { /// Block proposal from the miner BlockProposal = 0, /// Block pushed from the miner - BlockPushed = 1, - /// Mock Miner Message from the miner - MockMinerMessage = 2 + BlockPushed = 1 }); impl MessageSlotIDTrait for MessageSlotID { diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index bfbb6e0a10..847363b2e3 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -152,7 +152,7 @@ pub const STACKERDB_MAX_PAGE_COUNT: u32 = 2; pub const STACKERDB_SLOTS_FUNCTION: &str = "stackerdb-get-signer-slots"; pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; -pub const MINER_SLOT_COUNT: u32 = 3; +pub const MINER_SLOT_COUNT: u32 = 2; /// Final result of synchronizing state with a remote set of DB replicas #[derive(Clone)] diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index c4880b3980..818a2cf00b 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -194,7 +194,6 @@ use stacks::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBs, MINER_S use stacks::net::{ Error as NetError, NetworkResult, PeerNetworkComms, RPCHandlerArgs, ServiceFlags, }; -use stacks::types::StacksEpoch; use stacks::util_lib::strings::{UrlString, VecDisplay}; use stacks::{monitoring, version_string}; use stacks_common::codec::StacksMessageCodec; @@ -2274,20 +2273,9 @@ impl BlockMinerThread { let burn_db_path = self.config.get_burn_db_file_path(); let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let epochs = SortitionDB::get_stacks_epochs(burn_db.conn()) - .expect("Error while loading stacks epochs"); - let epoch_index = StacksEpoch::find_epoch(&epochs, self.burn_block.block_height) - .unwrap_or_else(|| { - panic!( - "BUG: block {} is not in a known epoch", - self.burn_block.block_height - ) - }); - let epoch_id = epochs - .get(epoch_index) - .expect("BUG: no epoch at found index") + let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height)? + .expect("FATAL: no epoch defined") .epoch_id; - if epoch_id != StacksEpochId::Epoch25 { debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; "epoch_id" => epoch_id.to_string() @@ -2405,7 +2393,7 @@ impl BlockMinerThread { &self.burn_block, &stackerdbs, SignerMessage::MockMinerMessage(message.clone()), - MinerSlotID::MockMinerMessage, + MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, From 22ea25684d50a45bda94a91c23b73f99edf88572 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 16:32:33 -0400 Subject: [PATCH 0790/1400] CRC: simulate block proposal, signatures, and appending a block in mock signing Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 315 ++++++++++-------- stacks-signer/src/v0/signer.rs | 61 ++-- .../src/nakamoto_node/sign_coordinator.rs | 10 +- testnet/stacks-node/src/neon_node.rs | 246 ++++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 220 ++---------- 5 files changed, 374 insertions(+), 478 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 779497b196..b767431c60 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -77,9 +77,7 @@ define_u8_enum!( /// the contract index in the signers contracts (i.e., X in signers-0-X) MessageSlotID { /// Block Response message from signers - BlockResponse = 1, - /// Mock Signature message from Epoch 2.5 signers - MockSignature = 2 + BlockResponse = 1 }); define_u8_enum!( @@ -115,10 +113,12 @@ SignerMessageTypePrefix { BlockResponse = 1, /// Block Pushed message from miners BlockPushed = 2, - /// Mock Signature message from Epoch 2.5 signers - MockSignature = 3, - /// Mock Pre-Nakamoto message from Epoch 2.5 miners - MockMinerMessage = 4 + /// Mock block proposal message from Epoch 2.5 miners + MockProposal = 3, + /// Mock block signature message from Epoch 2.5 signers + MockSignature = 4, + /// Mock block message from Epoch 2.5 miners + MockBlock = 5 }); #[cfg_attr(test, mutants::skip)] @@ -161,8 +161,9 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::BlockProposal(_) => SignerMessageTypePrefix::BlockProposal, SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::BlockPushed(_) => SignerMessageTypePrefix::BlockPushed, + SignerMessage::MockProposal(_) => SignerMessageTypePrefix::MockProposal, SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, - SignerMessage::MockMinerMessage(_) => SignerMessageTypePrefix::MockMinerMessage, + SignerMessage::MockBlock(_) => SignerMessageTypePrefix::MockBlock, } } } @@ -179,7 +180,9 @@ pub enum SignerMessage { /// A mock signature from the epoch 2.5 signers MockSignature(MockSignature), /// A mock message from the epoch 2.5 miners - MockMinerMessage(MockMinerMessage), + MockProposal(MockProposal), + /// A mock block from the epoch 2.5 miners + MockBlock(MockBlock), } impl SignerMessage { @@ -189,9 +192,11 @@ impl SignerMessage { #[cfg_attr(test, mutants::skip)] pub fn msg_id(&self) -> Option { match self { - Self::BlockProposal(_) | Self::BlockPushed(_) | Self::MockMinerMessage(_) => None, - Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), - Self::MockSignature(_) => Some(MessageSlotID::MockSignature), + Self::BlockProposal(_) + | Self::BlockPushed(_) + | Self::MockProposal(_) + | Self::MockBlock(_) => None, + Self::BlockResponse(_) | Self::MockSignature(_) => Some(MessageSlotID::BlockResponse), // Mock signature uses the same slot as block response since its exclusively for epoch 2.5 testing } } } @@ -206,7 +211,8 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::BlockResponse(block_response) => block_response.consensus_serialize(fd), SignerMessage::BlockPushed(block) => block.consensus_serialize(fd), SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), - SignerMessage::MockMinerMessage(message) => message.consensus_serialize(fd), + SignerMessage::MockProposal(message) => message.consensus_serialize(fd), + SignerMessage::MockBlock(block) => block.consensus_serialize(fd), }?; Ok(()) } @@ -228,13 +234,17 @@ impl StacksMessageCodec for SignerMessage { let block = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::BlockPushed(block) } + SignerMessageTypePrefix::MockProposal => { + let message = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockProposal(message) + } SignerMessageTypePrefix::MockSignature => { let signature = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::MockSignature(signature) } - SignerMessageTypePrefix::MockMinerMessage => { - let message = StacksMessageCodec::consensus_deserialize(fd)?; - SignerMessage::MockMinerMessage(message) + SignerMessageTypePrefix::MockBlock => { + let block = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::MockBlock(block) } }; Ok(message) @@ -305,110 +315,75 @@ impl StacksMessageCodec for PeerInfo { } } -/// A snapshot of the signer view of the stacks node to be used for mock signing. +/// A mock block proposal for Epoch 2.5 mock signing #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MockSignData { - /// The view of the stacks node peer information at the time of the mock signature +pub struct MockProposal { + /// The view of the stacks node peer information at the time of the mock proposal pub peer_info: PeerInfo, - /// The burn block height of the event that triggered the mock signature - pub event_burn_block_height: u64, - /// The chain id for the mock signature + /// The chain id for the mock proposal pub chain_id: u32, -} - -impl StacksMessageCodec for MockSignData { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.peer_info.consensus_serialize(fd)?; - write_next(fd, &self.event_burn_block_height)?; - write_next(fd, &self.chain_id)?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let peer_info = PeerInfo::consensus_deserialize(fd)?; - let event_burn_block_height = read_next::(fd)?; - let chain_id = read_next::(fd)?; - Ok(Self { - peer_info, - event_burn_block_height, - chain_id, - }) - } -} - -/// A mock signature for the stacks node to be used for mock signing. -/// This is only used by Epoch 2.5 signers to simulate the signing of a block for every sortition. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MockSignature { - /// The signature of the mock signature + /// The miner's signature across the peer info signature: MessageSignature, - /// The data that was signed across - pub sign_data: MockSignData, } -impl MockSignature { - /// Create a new mock sign data struct from the provided event burn block height, peer info, chain id, and private key. - /// Note that peer burn block height and event burn block height may not be the same if the peer view is stale. - pub fn new( - event_burn_block_height: u64, - peer_info: PeerInfo, - chain_id: u32, - stacks_private_key: &StacksPrivateKey, - ) -> Self { +impl MockProposal { + /// Create a new mock proposal data struct from the provided peer info, chain id, and private key. + pub fn new(peer_info: PeerInfo, chain_id: u32, stacks_private_key: &StacksPrivateKey) -> Self { let mut sig = Self { signature: MessageSignature::empty(), - sign_data: MockSignData { - peer_info, - event_burn_block_height, - chain_id, - }, + chain_id, + peer_info, }; sig.sign(stacks_private_key) - .expect("Failed to sign MockSignature"); + .expect("Failed to sign MockProposal"); sig } - /// The signature hash for the mock signature - pub fn signature_hash(&self) -> Sha256Sum { - let domain_tuple = - make_structured_data_domain("mock-signer", "1.0.0", self.sign_data.chain_id); + /// The signature hash for the mock proposal + pub fn miner_signature_hash(&self) -> Sha256Sum { + let domain_tuple = make_structured_data_domain("mock-miner", "1.0.0", self.chain_id); let data_tuple = Value::Tuple( TupleData::from_data(vec![ ( "stacks-tip-consensus-hash".into(), - Value::buff_from( - self.sign_data - .peer_info - .stacks_tip_consensus_hash - .as_bytes() - .into(), - ) - .unwrap(), + Value::buff_from(self.peer_info.stacks_tip_consensus_hash.as_bytes().into()) + .unwrap(), ), ( "stacks-tip".into(), - Value::buff_from(self.sign_data.peer_info.stacks_tip.as_bytes().into()) - .unwrap(), + Value::buff_from(self.peer_info.stacks_tip.as_bytes().into()).unwrap(), ), ( "stacks-tip-height".into(), - Value::UInt(self.sign_data.peer_info.stacks_tip_height.into()), + Value::UInt(self.peer_info.stacks_tip_height.into()), ), ( "server-version".into(), - Value::string_ascii_from_bytes( - self.sign_data.peer_info.server_version.clone().into(), - ) - .unwrap(), + Value::string_ascii_from_bytes(self.peer_info.server_version.clone().into()) + .unwrap(), + ), + ( + "pox-consensus".into(), + Value::buff_from(self.peer_info.pox_consensus.as_bytes().into()).unwrap(), ), + ]) + .expect("Error creating signature hash"), + ); + structured_data_message_hash(data_tuple, domain_tuple) + } + + /// The signature hash including the miner's signature. Used by signers. + fn signer_signature_hash(&self) -> Sha256Sum { + let domain_tuple = make_structured_data_domain("mock-signer", "1.0.0", self.chain_id); + let data_tuple = Value::Tuple( + TupleData::from_data(vec![ ( - "event-burn-block-height".into(), - Value::UInt(self.sign_data.event_burn_block_height.into()), + "miner-signature-hash".into(), + Value::buff_from(self.miner_signature_hash().as_bytes().into()).unwrap(), ), ( - "pox-consensus".into(), - Value::buff_from(self.sign_data.peer_info.pox_consensus.as_bytes().into()) - .unwrap(), + "miner-signature".into(), + Value::buff_from(self.signature.as_bytes().into()).unwrap(), ), ]) .expect("Error creating signature hash"), @@ -416,18 +391,79 @@ impl MockSignature { structured_data_message_hash(data_tuple, domain_tuple) } + /// Sign the mock proposal and set the internal signature field + fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + let signature_hash = self.miner_signature_hash(); + self.signature = private_key.sign(signature_hash.as_bytes())?; + Ok(()) + } + /// Verify the mock proposal against the provided miner public key + pub fn verify(&self, public_key: &StacksPublicKey) -> Result { + if self.signature == MessageSignature::empty() { + return Ok(false); + } + let signature_hash = self.miner_signature_hash(); + public_key + .verify(&signature_hash.0, &self.signature) + .map_err(|e| e.to_string()) + } +} + +impl StacksMessageCodec for MockProposal { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.peer_info.consensus_serialize(fd)?; + write_next(fd, &self.chain_id)?; + write_next(fd, &self.signature)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let peer_info = PeerInfo::consensus_deserialize(fd)?; + let chain_id = read_next::(fd)?; + let signature = read_next::(fd)?; + Ok(Self { + peer_info, + chain_id, + signature, + }) + } +} + +/// A mock signature for the stacks node to be used for mock signing. +/// This is only used by Epoch 2.5 signers to simulate the signing of a block for every sortition. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct MockSignature { + /// The signer's signature across the mock proposal + signature: MessageSignature, + /// The mock block proposal that was signed across + pub mock_proposal: MockProposal, +} + +impl MockSignature { + /// Create a new mock signature from the provided proposal and signer private key. + pub fn new(mock_proposal: MockProposal, stacks_private_key: &StacksPrivateKey) -> Self { + let mut sig = Self { + signature: MessageSignature::empty(), + mock_proposal, + }; + sig.sign(stacks_private_key) + .expect("Failed to sign MockSignature"); + sig + } + /// Sign the mock signature and set the internal signature field fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { - let signature_hash = self.signature_hash(); + let signature_hash = self.mock_proposal.signer_signature_hash(); self.signature = private_key.sign(signature_hash.as_bytes())?; Ok(()) } - /// Verify the mock signature against the provided public key + + /// Verify the mock signature against the provided signer public key pub fn verify(&self, public_key: &StacksPublicKey) -> Result { if self.signature == MessageSignature::empty() { return Ok(false); } - let signature_hash = self.signature_hash(); + let signature_hash = self.mock_proposal.signer_signature_hash(); public_key .verify(&signature_hash.0, &self.signature) .map_err(|e| e.to_string()) @@ -437,47 +473,41 @@ impl MockSignature { impl StacksMessageCodec for MockSignature { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.signature)?; - self.sign_data.consensus_serialize(fd)?; + self.mock_proposal.consensus_serialize(fd)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let signature = read_next::(fd)?; - let sign_data = read_next::(fd)?; + let mock_proposal = MockProposal::consensus_deserialize(fd)?; Ok(Self { signature, - sign_data, + mock_proposal, }) } } -/// A mock message for the stacks node to be used for mock mining messages -/// This is only used by Epoch 2.5 miners to simulate miners responding to mock signatures +/// The mock block data for epoch 2.5 miners to broadcast to simulate block signing #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct MockMinerMessage { - /// The view of the stacks node peer information at the time of the mock signature - pub peer_info: PeerInfo, - /// The chain id for the mock signature - pub chain_id: u32, +pub struct MockBlock { + /// The mock proposal that was signed across + pub mock_proposal: MockProposal, /// The mock signatures that the miner received pub mock_signatures: Vec, } -impl StacksMessageCodec for MockMinerMessage { +impl StacksMessageCodec for MockBlock { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.peer_info.consensus_serialize(fd)?; - write_next(fd, &self.chain_id)?; + self.mock_proposal.consensus_serialize(fd)?; write_next(fd, &self.mock_signatures)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { - let peer_info = PeerInfo::consensus_deserialize(fd)?; - let chain_id = read_next::(fd)?; + let mock_proposal = MockProposal::consensus_deserialize(fd)?; let mock_signatures = read_next::, _>(fd)?; Ok(Self { - peer_info, - chain_id, + mock_proposal, mock_signatures, }) } @@ -781,6 +811,7 @@ mod test { use clarity::types::PrivateKey; use clarity::util::hash::MerkleTree; use clarity::util::secp256k1::MessageSignature; + use rand::rngs::mock; use rand::{thread_rng, Rng, RngCore}; use rand_core::OsRng; use stacks_common::bitvec::BitVec; @@ -910,7 +941,7 @@ mod test { pox_consensus: ConsensusHash([pox_consensus_byte; 20]), } } - fn random_mock_sign_data() -> MockSignData { + fn random_mock_proposal() -> MockProposal { let chain_byte: u8 = thread_rng().gen_range(0..=1); let chain_id = if chain_byte == 1 { CHAIN_ID_TESTNET @@ -918,25 +949,23 @@ mod test { CHAIN_ID_MAINNET }; let peer_info = random_peer_data(); - MockSignData { + MockProposal { peer_info, - event_burn_block_height: thread_rng().next_u64(), chain_id, + signature: MessageSignature::empty(), } } #[test] - fn verify_sign_mock_signature() { + fn verify_sign_mock_proposal() { let private_key = StacksPrivateKey::new(); let public_key = StacksPublicKey::from_private(&private_key); let bad_private_key = StacksPrivateKey::new(); let bad_public_key = StacksPublicKey::from_private(&bad_private_key); - let mut mock_signature = MockSignature { - signature: MessageSignature::empty(), - sign_data: random_mock_sign_data(), - }; + let mut mock_signature = random_mock_proposal(); + mock_signature.sign(&private_key).unwrap(); assert!(!mock_signature .verify(&public_key) .expect("Failed to verify MockSignature")); @@ -962,12 +991,25 @@ mod test { assert_eq!(peer_data, deserialized_data); } + #[test] + fn serde_mock_proposal() { + let mut mock_signature = random_mock_proposal(); + mock_signature.sign(&StacksPrivateKey::new()).unwrap(); + let serialized_signature = mock_signature.serialize_to_vec(); + let deserialized_signature = read_next::(&mut &serialized_signature[..]) + .expect("Failed to deserialize MockSignature"); + assert_eq!(mock_signature, deserialized_signature); + } + #[test] fn serde_mock_signature() { - let mock_signature = MockSignature { + let mut mock_signature = MockSignature { signature: MessageSignature::empty(), - sign_data: random_mock_sign_data(), + mock_proposal: random_mock_proposal(), }; + mock_signature + .sign(&StacksPrivateKey::new()) + .expect("Failed to sign MockSignature"); let serialized_signature = mock_signature.serialize_to_vec(); let deserialized_signature = read_next::(&mut &serialized_signature[..]) .expect("Failed to deserialize MockSignature"); @@ -975,32 +1017,17 @@ mod test { } #[test] - fn serde_sign_data() { - let sign_data = random_mock_sign_data(); - let serialized_data = sign_data.serialize_to_vec(); - let deserialized_data = read_next::(&mut &serialized_data[..]) - .expect("Failed to deserialize MockSignData"); - assert_eq!(sign_data, deserialized_data); - } - - #[test] - fn serde_mock_miner_message() { - let mock_signature_1 = MockSignature { - signature: MessageSignature::empty(), - sign_data: random_mock_sign_data(), - }; - let mock_signature_2 = MockSignature { - signature: MessageSignature::empty(), - sign_data: random_mock_sign_data(), - }; - let mock_miner_message = MockMinerMessage { - peer_info: random_peer_data(), - chain_id: thread_rng().gen_range(0..=1), + fn serde_mock_block() { + let mock_proposal = random_mock_proposal(); + let mock_signature_1 = MockSignature::new(mock_proposal.clone(), &StacksPrivateKey::new()); + let mock_signature_2 = MockSignature::new(mock_proposal.clone(), &StacksPrivateKey::new()); + let mock_block = MockBlock { + mock_proposal, mock_signatures: vec![mock_signature_1, mock_signature_2], }; - let serialized_data = mock_miner_message.serialize_to_vec(); - let deserialized_data = read_next::(&mut &serialized_data[..]) + let serialized_data = mock_block.serialize_to_vec(); + let deserialized_data = read_next::(&mut &serialized_data[..]) .expect("Failed to deserialize MockSignData"); - assert_eq!(mock_miner_message, deserialized_data); + assert_eq!(mock_block, deserialized_data); } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index c32af06f3f..64622646e3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -16,13 +16,12 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; -use clarity::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::v0::messages::{ - BlockResponse, MessageSlotID, MockSignature, RejectCode, SignerMessage, + BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; @@ -140,6 +139,25 @@ impl SignerTrait for Signer { "push_result" => ?block_push_result, ); } + SignerMessage::MockProposal(mock_proposal) => { + let epoch = match stacks_client.get_node_epoch() { + Ok(epoch) => epoch, + Err(e) => { + warn!("{self}: Failed to determine node epoch. Cannot mock sign: {e}"); + return; + } + }; + debug!("{self}: received a mock block proposal."; + "current_reward_cycle" => current_reward_cycle, + "epoch" => ?epoch + ); + if epoch == StacksEpochId::Epoch25 + && self.reward_cycle == current_reward_cycle + { + // We are in epoch 2.5, so we should mock mine to prove we are still alive. + self.mock_sign(mock_proposal.clone()); + } + } _ => {} } } @@ -165,22 +183,6 @@ impl SignerTrait for Signer { ); } *sortition_state = None; - let epoch = match stacks_client.get_node_epoch() { - Ok(epoch) => epoch, - Err(e) => { - warn!("{self}: Failed to determine node epoch. Cannot mock sign: {e}"); - return; - } - }; - debug!("{self}: Epoch 2.5 signer received a new burn block event."; - "burn_height" => burn_height, - "current_reward_cycle" => current_reward_cycle, - "epoch" => ?epoch - ); - if epoch == StacksEpochId::Epoch25 && self.reward_cycle == current_reward_cycle { - // We are in epoch 2.5, so we should mock mine to prove we are still alive. - self.mock_sign(*burn_height, stacks_client); - } } } } @@ -482,26 +484,9 @@ impl Signer { } /// Send a mock signature to stackerdb to prove we are still alive - fn mock_sign(&mut self, burn_block_height: u64, stacks_client: &StacksClient) { - let Ok(peer_info) = stacks_client.get_peer_info() else { - warn!("{self}: Failed to get peer info. Cannot mock sign."); - return; - }; - let chain_id = if self.mainnet { - CHAIN_ID_MAINNET - } else { - CHAIN_ID_TESTNET - }; - info!("Mock signing for burn block {burn_block_height:?}"; - "stacks_tip_consensus_hash" => ?peer_info.stacks_tip_consensus_hash.clone(), - "stacks_tip" => ?peer_info.stacks_tip.clone(), - "peer_burn_block_height" => peer_info.burn_block_height, - "pox_consensus" => ?peer_info.pox_consensus.clone(), - "server_version" => peer_info.server_version.clone(), - "chain_id" => chain_id - ); - let mock_signature = - MockSignature::new(burn_block_height, peer_info, chain_id, &self.private_key); + fn mock_sign(&mut self, mock_proposal: MockProposal) { + info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); + let mock_signature = MockSignature::new(mock_proposal, &self.private_key); let message = SignerMessage::MockSignature(mock_signature); if let Err(e) = self .stackerdb diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b366d93132..b266d700d4 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -770,12 +770,10 @@ impl SignCoordinator { debug!("Received block pushed message. Ignoring."); continue; } - SignerMessageV0::MockSignature(_) => { - debug!("Received mock signature message. Ignoring."); - continue; - } - SignerMessageV0::MockMinerMessage(_) => { - debug!("Received mock miner message. Ignoring."); + SignerMessageV0::MockSignature(_) + | SignerMessageV0::MockProposal(_) + | SignerMessageV0::MockBlock(_) => { + debug!("Received mock message. Ignoring."); continue; } }; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 818a2cf00b..fc5e0d8055 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -144,7 +144,7 @@ use std::io::{Read, Write}; use std::net::SocketAddr; use std::sync::mpsc::{Receiver, TrySendError}; use std::thread::JoinHandle; -use std::time::Duration; +use std::time::{Duration, Instant}; use std::{fs, mem, thread}; use clarity::boot_util::boot_code_id; @@ -152,7 +152,7 @@ use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libsigner::v0::messages::{ - MessageSlotID, MinerSlotID, MockMinerMessage, PeerInfo, SignerMessage, + MessageSlotID, MinerSlotID, MockBlock, MockProposal, MockSignature, PeerInfo, SignerMessage, }; use libsigner::{SignerSession, StackerDBSession}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; @@ -2262,26 +2262,79 @@ impl BlockMinerThread { return false; } - /// Read any mock signatures from stackerdb and respond to them - pub fn send_mock_miner_message(&mut self) -> Result<(), ChainstateError> { - let miner_config = self.config.get_miner_config(); - if !miner_config.pre_nakamoto_miner_messaging { - debug!("Pre-Nakamoto mock miner messaging is disabled"); - return Ok(()); + /// Only used in mock signing to generate a peer info view + fn generate_peer_info(&self) -> PeerInfo { + // Create a peer info view of the current state + let server_version = version_string( + "stacks-node", + option_env!("STACKS_NODE_VERSION") + .or(option_env!("CARGO_PKG_VERSION")) + .unwrap_or("0.0.0.0"), + ); + let stacks_tip_height = self.burn_block.canonical_stacks_tip_height; + let stacks_tip = self.burn_block.canonical_stacks_tip_hash; + let stacks_tip_consensus_hash = self.burn_block.canonical_stacks_tip_consensus_hash; + let pox_consensus = self.burn_block.consensus_hash; + let burn_block_height = self.burn_block.block_height; + + PeerInfo { + burn_block_height, + stacks_tip_consensus_hash, + stacks_tip, + stacks_tip_height, + pox_consensus, + server_version, } + } - let burn_db_path = self.config.get_burn_db_file_path(); - let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) - .expect("FATAL: could not open sortition DB"); - let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height)? - .expect("FATAL: no epoch defined") - .epoch_id; - if epoch_id != StacksEpochId::Epoch25 { - debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; - "epoch_id" => epoch_id.to_string() - ); - return Ok(()); + /// Only used in mock signing to retrieve the mock signatures for the given mock proposal + fn wait_for_mock_signatures( + &self, + mock_proposal: &MockProposal, + stackerdbs: &StackerDBs, + timeout: Duration, + ) -> Result, ChainstateError> { + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(self.burn_block.block_height) + .expect("BUG: block commit exists before first block height"); + let signers_contract_id = MessageSlotID::BlockResponse + .stacker_db_contract(self.config.is_mainnet(), reward_cycle); + let slot_ids: Vec<_> = stackerdbs + .get_signers(&signers_contract_id) + .expect("FATAL: could not get signers from stacker DB") + .into_iter() + .enumerate() + .map(|(slot_id, _)| { + u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range") + }) + .collect(); + let mock_poll_start = Instant::now(); + let mut mock_signatures = vec![]; + // Because we don't care really if all signers reach quorum and this is just for testing purposes, + // we don't need to wait for ALL signers to sign the mock proposal and should not slow down mining too much + // Just wait a min amount of time for the mock signatures to come in + while mock_signatures.len() < slot_ids.len() && mock_poll_start.elapsed() < timeout { + let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; + for chunk in chunks { + if let Some(chunk) = chunk { + if let Ok(SignerMessage::MockSignature(mock_signature)) = + SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + { + if mock_signature.mock_proposal == *mock_proposal + && !mock_signatures.contains(&mock_signature) + { + mock_signatures.push(mock_signature); + } + } + } + } } + Ok(mock_signatures) + } + + /// Only used in mock signing to determine if the peer info view was already signed across + fn mock_block_exists(&self, peer_info: &PeerInfo) -> bool { let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); let mut miners_stackerdb = StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); @@ -2292,113 +2345,110 @@ impl BlockMinerThread { if message.is_empty() { continue; } - let Ok(SignerMessage::MockMinerMessage(miner_message)) = + let Ok(SignerMessage::MockBlock(mock_block)) = SignerMessage::consensus_deserialize(&mut message.as_slice()) else { continue; }; - if miner_message.peer_info.burn_block_height == self.burn_block.block_height { - debug!( - "Already sent mock miner message for tenure burn block height {:?}", - self.burn_block.block_height - ); - return Ok(()); + if mock_block.mock_proposal.peer_info == *peer_info { + return true; } } } } - // Retrieve any MockSignatures from stackerdb - let mut mock_signatures = Vec::new(); - let reward_cycle = self - .burnchain - .block_height_to_reward_cycle(self.burn_block.block_height) - .expect("BUG: block commit exists before first block height"); - let signers_contract_id = MessageSlotID::MockSignature - .stacker_db_contract(self.config.is_mainnet(), reward_cycle); - // Get the slots for every signer - let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false)?; - let slot_ids: Vec<_> = stackerdbs - .get_signers(&signers_contract_id) - .expect("FATAL: could not get signers from stacker DB") - .into_iter() - .enumerate() - .map(|(slot_id, _)| { - u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range") - }) - .collect(); - let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; - for chunk in chunks { - if let Some(chunk) = chunk { - if let Ok(SignerMessage::MockSignature(mock_signature)) = - SignerMessage::consensus_deserialize(&mut chunk.as_slice()) - { - if mock_signature.sign_data.event_burn_block_height - == self.burn_block.block_height - { - mock_signatures.push(mock_signature); - } - } - } + false + } + + /// Read any mock signatures from stackerdb and respond to them + pub fn send_mock_miner_messages(&mut self) -> Result<(), ChainstateError> { + let miner_config = self.config.get_miner_config(); + if !miner_config.pre_nakamoto_miner_messaging { + debug!("Pre-Nakamoto mock miner messaging is disabled"); + return Ok(()); } - let server_version = version_string( - "stacks-node", - option_env!("STACKS_NODE_VERSION") - .or(option_env!("CARGO_PKG_VERSION")) - .unwrap_or("0.0.0.0"), - ); - let stacks_tip_height = self.burn_block.canonical_stacks_tip_height; - let stacks_tip = self.burn_block.canonical_stacks_tip_hash; - let stacks_tip_consensus_hash = self.burn_block.canonical_stacks_tip_consensus_hash; - let pox_consensus = self.burn_block.consensus_hash; - let burn_block_height = self.burn_block.block_height; + let burn_db_path = self.config.get_burn_db_file_path(); + let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height)? + .expect("FATAL: no epoch defined") + .epoch_id; + if epoch_id != StacksEpochId::Epoch25 { + debug!("Mock miner messaging is disabled for non-epoch 2.5 blocks."; + "epoch_id" => epoch_id.to_string() + ); + return Ok(()); + } - let peer_info = PeerInfo { - burn_block_height, - stacks_tip_consensus_hash, - stacks_tip, - stacks_tip_height, - pox_consensus, - server_version, - }; + let mining_key = miner_config + .mining_key + .expect("Cannot mock sign without mining key"); - let message = MockMinerMessage { - peer_info, - chain_id: self.config.burnchain.chain_id, - mock_signatures, - }; + // Create a peer info view of the current state + let peer_info = self.generate_peer_info(); + if self.mock_block_exists(&peer_info) { + debug!( + "Already sent mock miner block proposal for current peer info view. Not sending another mock proposal." + ); + return Ok(()); + } - info!("Sending mock miner message in response to mock signatures for burn block {:?}", message.peer_info.burn_block_height; - "stacks_tip_consensus_hash" => ?message.peer_info.stacks_tip_consensus_hash.clone(), - "stacks_tip" => ?message.peer_info.stacks_tip.clone(), - "peer_burn_block_height" => message.peer_info.burn_block_height, - "pox_consensus" => ?message.peer_info.pox_consensus.clone(), - "server_version" => message.peer_info.server_version.clone(), - "chain_id" => message.chain_id, - "num_mock_signatures" => message.mock_signatures.len(), - ); + // find out which slot we're in. If we are not the latest sortition winner, we should not be sending anymore messages anyway + let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false)?; let (_, miners_info) = NakamotoChainState::make_miners_stackerdb_config(&burn_db, &self.burn_block)?; - - // find out which slot we're in. If we are not the latest sortition winner, we should not be sending anymore messages anyway let idx = miners_info.get_latest_winner_index(); let sortitions = miners_info.get_sortitions(); let election_sortition = *sortitions .get(idx as usize) .expect("FATAL: latest winner index out of bounds"); + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + + let mock_proposal = + MockProposal::new(peer_info, self.config.burnchain.chain_id, &mining_key); + + info!("Sending mock proposal to stackerdb: {mock_proposal:?}"); + + if let Err(e) = SignCoordinator::send_miners_message( + &mining_key, + &burn_db, + &self.burn_block, + &stackerdbs, + SignerMessage::MockProposal(mock_proposal.clone()), + MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages. We use BlockProposal for MockProposal as well. + self.config.is_mainnet(), + &mut miners_stackerdb, + &election_sortition, + ) { + warn!("Failed to send mock proposal to stackerdb: {:?}", &e); + return Ok(()); + } + + // Retrieve any MockSignatures from stackerdb + let mock_signatures = + self.wait_for_mock_signatures(&mock_proposal, &stackerdbs, Duration::from_secs(10))?; + + let mock_block = MockBlock { + mock_proposal, + mock_signatures, + }; + + info!("Sending mock block to stackerdb: {mock_block:?}"); if let Err(e) = SignCoordinator::send_miners_message( &miner_config.mining_key.expect("BUG: no mining key"), &burn_db, &self.burn_block, &stackerdbs, - SignerMessage::MockMinerMessage(message.clone()), - MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages + SignerMessage::MockBlock(mock_block.clone()), + MinerSlotID::BlockPushed, // There is no specific slot for mock miner messages self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, ) { - warn!("Failed to send mock miner message: {:?}", &e); + warn!("Failed to send mock block to stackerdb: {:?}", &e); } Ok(()) } @@ -3744,8 +3794,8 @@ impl RelayerThread { .name(format!("miner-block-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { - if let Err(e) = miner_thread_state.send_mock_miner_message() { - warn!("Failed to send mock miner message: {}", e); + if let Err(e) = miner_thread_state.send_mock_miner_messages() { + warn!("Failed to send mock miner messages: {}", e); } miner_thread_state.run_tenure() }) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 631d92c83c..79bed1739f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2216,205 +2216,27 @@ fn mock_sign_epoch_25() { .clone() .unwrap(); let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_start_height = epoch_3.start_height; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary signer_test.boot_to_epoch_25_reward_cycle(); info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); // Mine until epoch 3.0 and ensure that no more mock signatures are received - let mut reward_cycle = signer_test.get_current_reward_cycle(); - let mut stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - let mut signer_slot_ids: Vec<_> = signer_test + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test .get_signer_indices(reward_cycle) .iter() .map(|id| id.0) .collect(); + let signer_keys = signer_test.get_signer_public_keys(reward_cycle); + let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); assert_eq!(signer_slot_ids.len(), num_signers); - // Mine until epoch 3.0 and ensure we get a new mock signature per epoch 2.5 sortition - let main_poll_time = Instant::now(); - while signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height() - < epoch_3_start_height - { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - let current_burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - if current_burn_block_height - % signer_test - .running_nodes - .conf - .get_burnchain() - .pox_constants - .reward_cycle_length as u64 - == 0 - { - reward_cycle += 1; - debug!("Rolling over reward cycle to {:?}", reward_cycle); - stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - signer_slot_ids = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); - } - let mut mock_signatures = vec![]; - let mock_poll_time = Instant::now(); - debug!("Waiting for mock signatures for burn block height {current_burn_block_height}"); - while mock_signatures.len() != num_signers { - std::thread::sleep(Duration::from_millis(100)); - let messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::MockSignature) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - for message in messages { - if let SignerMessage::MockSignature(mock_signature) = message { - if mock_signature.sign_data.event_burn_block_height == current_burn_block_height - { - if !mock_signatures.contains(&mock_signature) { - mock_signatures.push(mock_signature); - } - } - } - } - assert!( - mock_poll_time.elapsed() <= Duration::from_secs(15), - "Failed to find mock signatures within timeout" - ); - } - assert!( - main_poll_time.elapsed() <= Duration::from_secs(45), - "Timed out waiting to advance epoch 3.0" - ); - } - info!("------------------------- Test Processing Epoch 3.0 Tenure -------------------------"); - let old_messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::MockSignature) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - let old_signatures = old_messages - .iter() - .filter_map(|message| { - if let SignerMessage::MockSignature(mock_signature) = message { - Some(mock_signature) - } else { - None - } - }) - .collect::>(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - // Wait a bit to ensure no new mock signatures show up - std::thread::sleep(Duration::from_secs(5)); - let new_messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::MockSignature) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - let new_signatures = new_messages - .iter() - .filter_map(|message| { - if let SignerMessage::MockSignature(mock_signature) = message { - Some(mock_signature) - } else { - None - } - }) - .collect::>(); - assert_eq!(old_signatures, new_signatures); -} - -#[test] -#[ignore] -/// This test checks that Epoch 2.5 miners will issue a MockMinerMessage per burn block they receive -/// including the mock signature from the signers. -fn mock_miner_message_epoch_25() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), - |_| {}, - |node_config| { - let epochs = node_config.burnchain.epochs.as_mut().unwrap(); - for epoch in epochs.iter_mut() { - if epoch.epoch_id == StacksEpochId::Epoch25 { - epoch.end_height = 251; - } - if epoch.epoch_id == StacksEpochId::Epoch30 { - epoch.start_height = 251; - } - } - }, - &[], - ); - - let epochs = signer_test - .running_nodes - .conf - .burnchain - .epochs - .clone() - .unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_boundary = epoch_3.start_height - 1; - - signer_test.boot_to_epoch_25_reward_cycle(); - - info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + + // Mine until epoch 3.0 and ensure we get a new mock block per epoch 2.5 sortition let main_poll_time = Instant::now(); - let mut mock_miner_message = None; // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. while signer_test .running_nodes @@ -2422,6 +2244,7 @@ fn mock_miner_message_epoch_25() { .get_headers_height() < epoch_3_boundary { + let mut mock_block_mesage = None; let mock_poll_time = Instant::now(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, @@ -2434,8 +2257,7 @@ fn mock_miner_message_epoch_25() { .btc_regtest_controller .get_headers_height(); debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); - - while mock_miner_message.is_none() { + while mock_block_mesage.is_none() { std::thread::sleep(Duration::from_millis(100)); let chunks = test_observer::get_stackerdb_chunks(); for chunk in chunks @@ -2451,14 +2273,29 @@ fn mock_miner_message_epoch_25() { if chunk.data.is_empty() { continue; } - let SignerMessage::MockMinerMessage(message) = + let SignerMessage::MockBlock(mock_block) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage") else { continue; }; - if message.peer_info.burn_block_height == current_burn_block_height { - mock_miner_message = Some(message); + if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height + { + assert_eq!(mock_block.mock_signatures.len(), num_signers); + mock_block + .mock_signatures + .iter() + .for_each(|mock_signature| { + assert!(signer_public_keys.iter().any(|signer| { + mock_signature + .verify( + &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) + .unwrap(), + ) + .expect("Failed to verify mock signature") + })); + }); + mock_block_mesage = Some(mock_block); break; } } @@ -2467,10 +2304,9 @@ fn mock_miner_message_epoch_25() { "Failed to find mock miner message within timeout" ); } - mock_miner_message = None; assert!( main_poll_time.elapsed() <= Duration::from_secs(45), - "Timed out waiting to advance epoch 3.0" + "Timed out waiting to advance epoch 3.0 boundary" ); } } From f12961e7b704705bcca56bad154f2326afa101bb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 16:36:40 -0400 Subject: [PATCH 0791/1400] Rename pre_nakamoto_miner_messaging to pre_nakamoto_mock_signing Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 10 +++++----- testnet/stacks-node/src/neon_node.rs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4528e07222..c6c0abfd25 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2332,8 +2332,8 @@ pub struct MinerConfig { pub max_reorg_depth: u64, /// Amount of time while mining in nakamoto to wait for signers to respond to a proposed block pub wait_on_signers: Duration, - /// Whether to send miner messages in Epoch 2.5 through the .miners contract. This is used for testing. - pub pre_nakamoto_miner_messaging: bool, + /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. + pub pre_nakamoto_mock_signing: bool, } impl Default for MinerConfig { @@ -2364,7 +2364,7 @@ impl Default for MinerConfig { max_reorg_depth: 3, // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), - pre_nakamoto_miner_messaging: true, + pre_nakamoto_mock_signing: true, } } } @@ -2696,7 +2696,7 @@ pub struct MinerConfigFile { pub filter_origins: Option, pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, - pub pre_nakamoto_miner_messaging: Option, + pub pre_nakamoto_mock_signing: Option, } impl MinerConfigFile { @@ -2799,7 +2799,7 @@ impl MinerConfigFile { .wait_on_signers_ms .map(Duration::from_millis) .unwrap_or(miner_default_config.wait_on_signers), - pre_nakamoto_miner_messaging: self.pre_nakamoto_miner_messaging.unwrap_or(true), + pre_nakamoto_mock_signing: self.pre_nakamoto_mock_signing.unwrap_or(true), }) } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index fc5e0d8055..238a677e4d 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2362,8 +2362,8 @@ impl BlockMinerThread { /// Read any mock signatures from stackerdb and respond to them pub fn send_mock_miner_messages(&mut self) -> Result<(), ChainstateError> { let miner_config = self.config.get_miner_config(); - if !miner_config.pre_nakamoto_miner_messaging { - debug!("Pre-Nakamoto mock miner messaging is disabled"); + if !miner_config.pre_nakamoto_mock_signing { + debug!("Pre-Nakamoto mock signing is disabled"); return Ok(()); } From 82e390d19ba3d782d3a2a21d0bd85933afc604ad Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 15 Aug 2024 16:41:07 -0400 Subject: [PATCH 0792/1400] Add a bit more logging Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/neon_node.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 238a677e4d..5f0720d1a4 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2418,7 +2418,7 @@ impl BlockMinerThread { &self.burn_block, &stackerdbs, SignerMessage::MockProposal(mock_proposal.clone()), - MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages. We use BlockProposal for MockProposal as well. + MinerSlotID::BlockProposal, // There is no specific slot for mock miner messages so we use BlockProposal for MockProposal as well. self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, @@ -2428,6 +2428,7 @@ impl BlockMinerThread { } // Retrieve any MockSignatures from stackerdb + info!("Waiting for mock signatures..."); let mock_signatures = self.wait_for_mock_signatures(&mock_proposal, &stackerdbs, Duration::from_secs(10))?; @@ -2443,7 +2444,7 @@ impl BlockMinerThread { &self.burn_block, &stackerdbs, SignerMessage::MockBlock(mock_block.clone()), - MinerSlotID::BlockPushed, // There is no specific slot for mock miner messages + MinerSlotID::BlockPushed, // There is no specific slot for mock miner messages. Let's use BlockPushed for MockBlock since MockProposal uses BlockProposal. self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, From 4ada7e2c8dc75b4fb287acf22d652df6c70ebbc1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 15 Aug 2024 16:57:15 -0400 Subject: [PATCH 0793/1400] fix: shorten miner thread ID string, and add a bump-blocks-processed call when we process a sortition (just as we have in neon node) --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 148c80d030..435305472a 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -453,6 +453,7 @@ impl RelayerThread { increment_stx_blocks_mined_counter(); } self.globals.set_last_sortition(sn.clone()); + self.globals.counters.bump_blocks_processed(); // there may be a bufferred stacks block to process, so wake up the coordinator to check self.globals.coord_comms.announce_new_stacks_block(); @@ -812,10 +813,7 @@ impl RelayerThread { )?; let new_miner_handle = std::thread::Builder::new() - .name(format!( - "miner.{parent_tenure_start} (bound ({},{})", - &self.config.node.p2p_bind, &self.config.node.rpc_bind - )) + .name(format!("miner.{parent_tenure_start}",)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) .map_err(|e| { From 572c5476e3e2a0b529eef052414180084de59348 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 15 Aug 2024 16:58:06 -0400 Subject: [PATCH 0794/1400] fix: remove sleeps in favor of wait_for() --- .../src/tests/nakamoto_integrations.rs | 121 +++++++++++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 15 ++- 2 files changed, 112 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index df8c512756..c370ca53f6 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3914,8 +3914,6 @@ fn forked_tenure_is_ignored() { thread::sleep(Duration::from_secs(1)); } - sleep_ms(1000); - info!("Tenure B broadcasted but did not process a block. Issue the next bitcon block and unstall block commits."); // the block will be stored, not processed, so load it out of staging @@ -3947,18 +3945,25 @@ fn forked_tenure_is_ignored() { // It should also build on block A, since the node has paused processing of block B. let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { test_skip_commit_op.0.lock().unwrap().replace(false); TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); - Ok(commits_count > commits_before && blocks_count > blocks_before) + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before + && blocks_count > blocks_before + && blocks_processed > blocks_processed_before) }) .unwrap(); - // allow blocks B and C to be processed - sleep_ms(1000); - info!("Tenure C produced a block!"); let block_tenure_c = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() @@ -3976,6 +3981,10 @@ fn forked_tenure_is_ignored() { // Now let's produce a second block for tenure C and ensure it builds off of block C. let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); let start_time = Instant::now(); // submit a tx so that the miner will mine an extra block @@ -3993,8 +4002,14 @@ fn forked_tenure_is_ignored() { thread::sleep(Duration::from_secs(1)); } - // give C's second block a moment to process - sleep_ms(1000); + wait_for(10, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); info!("Tenure C produced a second block!"); @@ -4014,16 +4029,23 @@ fn forked_tenure_is_ignored() { // Submit a block commit op for tenure D and mine a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); - Ok(commits_count > commits_before && blocks_count > blocks_before) + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(commits_count > commits_before + && blocks_count > blocks_before + && blocks_processed > blocks_processed_before) }) .unwrap(); - // give tenure D's block a moment to process - sleep_ms(1000); - let block_tenure_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); @@ -5703,6 +5725,11 @@ fn continue_tenure_extend() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + wait_for_first_naka_block_commit(60, &commits_submitted); // Mine a regular nakamoto tenure @@ -5720,7 +5747,20 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); - sleep_ms(5_000); + + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); info!("Pausing commit ops to trigger a tenure extend."); test_skip_commit_op.0.lock().unwrap().replace(true); @@ -5733,7 +5773,15 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); - sleep_ms(5_000); + + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); // Submit a TX let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); @@ -5759,6 +5807,11 @@ fn continue_tenure_extend() { ) .unwrap(); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -5768,7 +5821,20 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); - sleep_ms(5_000); + + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -5778,7 +5844,15 @@ fn continue_tenure_extend() { &[sender_signer_sk], &signers, ); - sleep_ms(5_000); + + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); info!("Resuming commit ops to mine regular tenures."); test_skip_commit_op.0.lock().unwrap().replace(false); @@ -5792,11 +5866,7 @@ fn continue_tenure_extend() { .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(commits_count > commits_before && blocks_processed > blocks_processed_before) + Ok(commits_count > commits_before) }) .unwrap(); @@ -5807,6 +5877,15 @@ fn continue_tenure_extend() { &signers, ); + wait_for(5, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + sleep_ms(5_000); } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5e366ba488..c2ce878e28 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -540,11 +540,19 @@ fn miner_gather_signatures() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); - signer_test.boot_to_epoch_3(); let timeout = Duration::from_secs(30); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); - // give the system a chance to mine a Nakamoto block - sleep_ms(30_000); + signer_test.boot_to_epoch_3(); + + // give the system a chance to reach the Nakamoto start tip + // mine a Nakamoto block + wait_for(30, || { + let blocks_mined = mined_blocks.load(Ordering::SeqCst); + Ok(blocks_mined > blocks_mined_before) + }) + .unwrap(); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); @@ -943,6 +951,7 @@ fn forked_tenure_testing( // In the next block, the miner should win the tenure and submit a stacks block let commits_before = commits_submitted.load(Ordering::SeqCst); let blocks_before = mined_blocks.load(Ordering::SeqCst); + next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, From b1b60d14d8c32fb5266a205320156481260b3fed Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 15 Aug 2024 14:07:24 -0700 Subject: [PATCH 0795/1400] fix: update scenario_five_test --- stackslib/src/chainstate/stacks/boot/mod.rs | 1 + .../src/chainstate/stacks/boot/pox_4_tests.rs | 51 ++++++++++++++++--- 2 files changed, 44 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 0f45d7a6d0..88ecc8887e 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1773,6 +1773,7 @@ pub mod test { let data = if let Some(d) = value_opt.expect_optional().unwrap() { d } else { + warn!("get_stacker_info: No PoX info for {}", addr); return None; }; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index f7f0f21116..70a175b87b 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -6793,6 +6793,7 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; pox_constants.pox_4_activation_height = 41; + pox_constants.prepare_length = 5; let mut boot_plan = NakamotoBootPlan::new(test_name) .with_test_stackers(test_stackers) .with_test_signers(test_signers.clone()) @@ -6807,6 +6808,8 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( boot_plan.initial_balances = initial_balances; boot_plan.pox_constants = pox_constants.clone(); burnchain.pox_constants = pox_constants.clone(); + peer_config.burnchain = burnchain.clone(); + peer_config.test_signers = Some(test_signers.clone()); info!("---- Booting into Nakamoto Peer ----"); let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], Some(observer)); @@ -8873,6 +8876,7 @@ pub fn prepare_pox4_test<'a>( pox_constants.pox_3_activation_height = 26; pox_constants.v3_unlock_height = 27; pox_constants.pox_4_activation_height = 41; + pox_constants.prepare_length = 5; let mut boot_plan = NakamotoBootPlan::new(test_name) .with_test_stackers(test_stackers) .with_test_signers(test_signers.clone()) @@ -9427,6 +9431,17 @@ fn test_scenario_five(use_nakamoto: bool) { use_nakamoto, ); + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers.signer_keys.extend(vec![ + alice.private_key.clone(), + bob.private_key.clone(), + carl.private_key.clone(), + david.private_key.clone(), + eve.private_key.clone(), + ]); + } + // Lock periods for each stacker let carl_lock_period = 3; let frank_lock_period = 1; @@ -9684,6 +9699,12 @@ fn test_scenario_five(use_nakamoto: bool) { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); + info!( + "Scenario five: submitting stacking txs."; + "target_height" => target_height, + "next_reward_cycle" => next_reward_cycle, + "prepare_length" => peer_config.burnchain.pox_constants.prepare_length, + ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, @@ -9765,12 +9786,15 @@ fn test_scenario_five(use_nakamoto: bool) { alice.nonce += 1; bob.nonce += 1; carl.nonce += 1; - // Mine vote txs & advance to the reward set calculation of the next reward cycle let target_height = peer .config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); + info!( + "Scenario five: submitting votes. Target height: {}", + target_height + ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, @@ -9878,7 +9902,11 @@ fn test_scenario_five(use_nakamoto: bool) { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block, _receipts) = advance_to_block_height( + info!( + "Scenario five: submitting extend and aggregate commit txs. Target height: {}", + target_height + ); + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -9888,17 +9916,19 @@ fn test_scenario_five(use_nakamoto: bool) { ); // Check that all of David's stackers are stacked - for (stacker, stacker_lock_period) in davids_stackers { + for (idx, (stacker, stacker_lock_period)) in davids_stackers.iter().enumerate() { let (pox_address, first_reward_cycle, lock_period, _indices) = - get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + get_stacker_info_pox_4(&mut peer, &stacker.principal) + .expect(format!("Failed to find stacker {}", idx).as_str()); assert_eq!(first_reward_cycle, reward_cycle); assert_eq!(pox_address, david.pox_address); assert_eq!(lock_period, *stacker_lock_period); } // Check that all of Eve's stackers are stacked - for (stacker, stacker_lock_period) in eves_stackers { + for (idx, (stacker, stacker_lock_period)) in eves_stackers.iter().enumerate() { let (pox_address, first_reward_cycle, lock_period, _indices) = - get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + get_stacker_info_pox_4(&mut peer, &stacker.principal) + .expect(format!("Failed to find stacker {}", idx).as_str()); assert_eq!(first_reward_cycle, reward_cycle); assert_eq!(pox_address, eve.pox_address); assert_eq!(lock_period, *stacker_lock_period); @@ -9970,6 +10000,10 @@ fn test_scenario_five(use_nakamoto: bool) { .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); // Submit vote transactions + info!( + "Scenario five: submitting votes. Target height: {}", + target_height + ); let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, @@ -10085,7 +10119,8 @@ fn test_scenario_five(use_nakamoto: bool) { (heidi.clone(), heidi_lock_period), ]; - let (latest_block, tx_block, _receipts) = advance_to_block_height( + info!("Scenario five: submitting increase and aggregate-commit txs"); + let (latest_block, tx_block, receipts) = advance_to_block_height( &mut peer, &observer, &txs, @@ -10116,6 +10151,6 @@ fn test_scenario_five(use_nakamoto: bool) { assert_eq!(pox_address, carl.pox_address); // Assert that carl's error is err(40) - let carl_increase_err = tx_block.receipts[1].clone().result; + let carl_increase_err = receipts[1].clone().result; assert_eq!(carl_increase_err, Value::error(Value::Int(40)).unwrap()); } From fe3d7dc2b7e866621ef81aa49eecc1bcb1307bc3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 15 Aug 2024 17:12:16 -0400 Subject: [PATCH 0796/1400] chore: add warn logs for block validate rejections --- stackslib/src/net/api/postblock_proposal.rs | 24 +++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6c1d5526b5..853cf8fc62 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -215,6 +215,14 @@ impl NakamotoBlockProposal { let mainnet = self.chain_id == CHAIN_ID_MAINNET; if self.chain_id != chainstate.chain_id || mainnet != chainstate.mainnet { + warn!( + "Rejected block proposal"; + "reason" => "Wrong network/chain_id", + "expected_chain_id" => chainstate.chain_id, + "expected_mainnet" => chainstate.mainnet, + "received_chain_id" => self.chain_id, + "received_mainnet" => mainnet, + ); return Err(BlockValidateRejectReason { reason_code: ValidateRejectCode::InvalidBlock, reason: "Wrong network/chain_id".into(), @@ -227,6 +235,10 @@ impl NakamotoBlockProposal { let expected_burn_opt = NakamotoChainState::get_expected_burns(&mut db_handle, chainstate.db(), &self.block)?; if expected_burn_opt.is_none() { + warn!( + "Rejected block proposal"; + "reason" => "Failed to find parent expected burns", + ); return Err(BlockValidateRejectReason { reason_code: ValidateRejectCode::UnknownParent, reason: "Failed to find parent expected burns".into(), @@ -259,6 +271,12 @@ impl NakamotoBlockProposal { &parent_stacks_header.anchored_header { if self.block.header.timestamp <= parent_nakamoto_header.timestamp { + warn!( + "Rejected block proposal"; + "reason" => "Block timestamp is not greater than parent block", + "block_timestamp" => self.block.header.timestamp, + "parent_block_timestamp" => parent_nakamoto_header.timestamp, + ); return Err(BlockValidateRejectReason { reason_code: ValidateRejectCode::InvalidBlock, reason: "Block timestamp is not greater than parent block".into(), @@ -266,6 +284,12 @@ impl NakamotoBlockProposal { } } if self.block.header.timestamp > get_epoch_time_secs() + 15 { + warn!( + "Rejected block proposal"; + "reason" => "Block timestamp is too far into the future", + "block_timestamp" => self.block.header.timestamp, + "current_time" => get_epoch_time_secs(), + ); return Err(BlockValidateRejectReason { reason_code: ValidateRejectCode::InvalidBlock, reason: "Block timestamp is too far into the future".into(), From f18a6b9d1861327b71329c08775de9c7e6448148 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 15 Aug 2024 15:26:53 -0700 Subject: [PATCH 0797/1400] fix: remove unneeded comments --- .../src/chainstate/stacks/boot/pox_4_tests.rs | 58 ++----------------- 1 file changed, 5 insertions(+), 53 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 70a175b87b..0968cc4de3 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -3300,12 +3300,12 @@ fn verify_signer_key_signatures() { assert_eq!(result, Value::okay_true()); } -#[test] -fn stack_stx_verify_signer_sig() { +#[apply(nakamoto_cases)] +fn stack_stx_verify_signer_sig(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = - prepare_pox4_test(function_name!(), Some(&observer), false); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -3564,7 +3564,7 @@ fn stack_stx_verify_signer_sig() { valid_tx, ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_txs = get_last_block_sender_transactions(&observer, stacker_addr); let expected_error = Value::error(Value::Int(35)).unwrap(); @@ -4260,7 +4260,6 @@ fn advance_to_block_height( peer.get_burn_block_height(), passed_txs.len() ); - // latest_block = Some(peer.tenure_with_txs(&passed_txs, peer_nonce)); latest_block = Some(tenure_with_txs(peer, &passed_txs, peer_nonce, test_signers)); passed_txs = &[]; if tx_block.is_none() { @@ -4274,7 +4273,6 @@ fn advance_to_block_height( } else { tx_block.receipts.clone() }; - // let tx_block_receipts = tx_block.receipts[2..].to_vec(); (latest_block, tx_block, tx_block_receipts) } @@ -4481,7 +4479,6 @@ fn stack_agg_increase() { &mut peer_nonce, target_height.into(), &mut None, - // Some(&mut test_signers), ); // Get Bob's aggregate commit reward index @@ -4628,7 +4625,6 @@ fn stack_agg_increase() { &txs, &mut peer_nonce, target_height.into(), - // &mut test_signers, &mut None, ); @@ -5200,8 +5196,6 @@ fn stack_stx_signer_key(use_nakamoto: bool) { mut test_signers, ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); - info!("--- starting stack-stx test ---"); - let stacker_nonce = 0; let stacker_key = &keys[0]; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); @@ -5210,7 +5204,6 @@ fn stack_stx_signer_key(use_nakamoto: bool) { let signer_key_val = Value::buff_from(signer_public_key.to_bytes_compressed()).unwrap(); let reward_cycle = get_current_reward_cycle(&peer, &burnchain); - info!("Reward cycle: {reward_cycle}"); // (define-public (stack-stx (amount-ustx uint) // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) @@ -5245,9 +5238,7 @@ fn stack_stx_signer_key(use_nakamoto: bool) { ], )]; - // let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); - // peer.make_nakamoto_tenure(tenure_change, coinbase, signers, block_builder) let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -6800,11 +6791,6 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( .with_private_key(private_key); boot_plan.add_default_balance = false; - // let balances: Vec<(PrincipalData, u64)> = addrs - // .clone() - // .into_iter() - // .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) - // .collect(); boot_plan.initial_balances = initial_balances; boot_plan.pox_constants = pox_constants.clone(); burnchain.pox_constants = pox_constants.clone(); @@ -6822,23 +6808,11 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( let coinbase_nonce = 0; let burn_block_height = get_tip(peer.sortdb.as_ref()).block_height; - // let reward_cycle = get_current_reward_cycle(&peer, &peer.config.burnchain); let reward_cycle = burnchain .block_height_to_reward_cycle(burn_block_height) .unwrap() as u128; let min_ustx = get_stacking_minimum(&mut peer, &latest_block); - info!("Block height: {}", burn_block_height); - - // ( - // burnchain, - // peer, - // keys, - // latest_block, - // block_height, - // coinbase_nonce, - // Some(test_signers), - // ) ( peer, coinbase_nonce, @@ -7039,24 +7013,6 @@ fn test_scenario_one(use_nakamoto: bool) { assert_eq!(first_reward_cycle, next_reward_cycle); assert_eq!(pox_address, bob.pox_address); - info!("Got {} receipts", receipts.clone().len()); - - for receipt in receipts.clone() { - info!("Receipt: {:?}", receipt); - } - - let signer_keys_len = test_signers - .clone() - .map(|t| t.signer_keys.len()) - .unwrap_or(0); - // let signer_keys_len = if let Some(ref test_signers) = test_signers { - // test_signers.signer_keys.len() - // } else { - // 0 - // }; - - info!("Test signers now has {} keys", signer_keys_len); - // 1. Check bob's low authorization transaction let bob_tx_result_low = receipts .get(1) @@ -8981,15 +8937,11 @@ pub fn tenure_with_txs( let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - // let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) - // .unwrap() - // .unwrap(); let latest_block = sort_db .index_handle_at_tip() .get_nakamoto_tip_block_id() .unwrap() .unwrap(); - // let tip = StacksBlockId:: latest_block } else { peer.tenure_with_txs(txs, coinbase_nonce) From 3e4b875ca8ac7f920810e4ed5de876efd66ac00b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 15 Aug 2024 19:59:58 -0400 Subject: [PATCH 0798/1400] docs: improve docs on new tests --- testnet/stacks-node/src/tests/signer/v0.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3cf7a14804..ce95049744 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2645,7 +2645,9 @@ fn signer_set_rollover() { #[test] #[ignore] -// This test involves two miners, each mining tenures with 6 blocks each. +/// This test involves two miners, each mining tenures with 6 blocks each. Half +/// of the signers are attached to each miner, so the test also verifies that +/// the signers' messages successfully make their way to the active miner. fn multiple_miners_with_nakamoto_blocks() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -2892,11 +2894,11 @@ fn multiple_miners_with_nakamoto_blocks() { #[test] #[ignore] -// This test involves two miners, 1 and 2. During miner 1's first tenure, miner -// 2 is forced to ignore one of the blocks in that tenure. The next time miner -// 2 mines a block, it should attempt to fork the chain at that point. The test -// verifies that the fork is not successful and that miner 1 is able to -// continue mining after this fork attempt. +/// This test involves two miners, 1 and 2. During miner 1's first tenure, miner +/// 2 is forced to ignore one of the blocks in that tenure. The next time miner +/// 2 mines a block, it should attempt to fork the chain at that point. The test +/// verifies that the fork is not successful and that miner 1 is able to +/// continue mining after this fork attempt. fn partial_tenure_fork() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; From b4987f7ed7b2324db70196ed1e495cdf03c16f24 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 16 Aug 2024 08:57:40 -0400 Subject: [PATCH 0799/1400] CRC: improve logging Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 64622646e3..9245220e94 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -147,14 +147,14 @@ impl SignerTrait for Signer { return; } }; - debug!("{self}: received a mock block proposal."; + info!("{self}: received a mock block proposal."; "current_reward_cycle" => current_reward_cycle, "epoch" => ?epoch ); if epoch == StacksEpochId::Epoch25 && self.reward_cycle == current_reward_cycle { - // We are in epoch 2.5, so we should mock mine to prove we are still alive. + // We are in epoch 2.5, so we should mock sign to prove we are still alive. self.mock_sign(mock_proposal.clone()); } } From aae44aba19a9570a264bc07e94196b91efd11197 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 16 Aug 2024 10:28:29 -0400 Subject: [PATCH 0800/1400] chore: Address Brice's PR comments --- testnet/stacks-node/src/tests/neon_integrations.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 64b1ca70da..03c8eb2df8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12408,10 +12408,9 @@ fn next_block_and_wait_all( let finished = follower_blocks_processed .iter() .zip(followers_current.iter()) - .map(|(blocks_processed, start_count)| { + .all(|(blocks_processed, start_count)| { blocks_processed.load(Ordering::SeqCst) > *start_count - }) - .all(|b| b); + }); if finished { break; @@ -12702,7 +12701,7 @@ fn mock_miner_replay() { thread::sleep(block_gap); - // first block will hold our VRF registration + // second block will hold our VRF registration next_block_and_wait_all( &mut btc_regtest_controller, &miner_blocks_processed, From 314b4b88b4062ce5d01d0fa470f6f34f7e18747b Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 19:11:53 +0300 Subject: [PATCH 0801/1400] action membership run --- .github/workflows/pr-differences-mutants.yml | 50 +- stackslib/src/net/download/nakamoto/mod.rs | 6 + .../nakamoto/tenure_downloader_copy.rs | 693 ++++++++++++++ .../nakamoto/tenure_downloader_opy.rs | 693 ++++++++++++++ .../nakamoto/tenure_downloader_set_copy.rs | 660 +++++++++++++ .../nakamoto/tenure_downloader_set_opy.rs | 660 +++++++++++++ .../tenure_downloader_unconfirmed_copy.rs | 867 ++++++++++++++++++ .../tenure_downloader_unconfirmed_opy.rs | 867 ++++++++++++++++++ 8 files changed, 4487 insertions(+), 9 deletions(-) create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index fc4a725687..9c5cc34c7b 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -9,6 +9,16 @@ on: - ready_for_review paths: - '**.rs' + workflow_dispatch: + inputs: + ignore_timeout: + description: "Ignore mutants timeout limit" + required: false + type: choice + options: + - true + # - false + default: 'true' concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -16,9 +26,27 @@ concurrency: cancel-in-progress: true jobs: + check-right-permissions: + name: Check Right Permissions + runs-on: ubuntu-latest + + steps: + - name: Check Right Permissions To Trigger This + id: check_right_permissions + uses: stacks-network/actions/team-membership@feat/mutation-testing + with: + username: ${{ github.actor }} + team: 'Blockchain Team' + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + + - name: Fail if the user does not have the right permissions + if: ${{ inputs.ignore_timeout == true && steps.check_right_permissions.outputs.is_team_member != 'true' }} + run: exit 1 + # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: name: Check Packages and Shards + needs: check-right-permissions runs-on: ubuntu-latest @@ -30,10 +58,13 @@ jobs: run_small_packages: ${{ steps.check_packages_and_shards.outputs.run_small_packages }} small_packages_with_shards: ${{ steps.check_packages_and_shards.outputs.small_packages_with_shards }} run_stacks_signer: ${{ steps.check_packages_and_shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ steps.check_packages_and_shards.outputs.too_many_mutants }} steps: - id: check_packages_and_shards - uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main + uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@feat/mutation-testing + with: + ignore_timeout: ${{ inputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: @@ -49,7 +80,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'small' @@ -72,7 +103,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'small' @@ -94,7 +125,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stackslib' @@ -120,7 +151,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'stackslib' @@ -142,7 +173,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stacks-node' @@ -168,7 +199,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: shard: ${{ matrix.shard }} package: 'stacks-node' @@ -186,7 +217,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing with: package: 'stacks-signer' @@ -211,7 +242,7 @@ jobs: steps: - name: Output Mutants - uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@main + uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@feat/mutation-testing with: stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.run_stackslib }} shards_for_stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.stackslib_with_shards }} @@ -220,3 +251,4 @@ jobs: small_packages: ${{ needs.check-big-packages-and-shards.outputs.run_small_packages }} shards_for_small_packages: ${{ needs.check-big-packages-and-shards.outputs.small_packages_with_shards }} stacks_signer: ${{ needs.check-big-packages-and-shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ needs.check-big-packages-and-shards.outputs.too_many_mutants }} diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index dd440ac110..7643c54ff7 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,8 +161,14 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; +mod tenure_downloader_copy; +mod tenure_downloader_opy; mod tenure_downloader_set; +mod tenure_downloader_set_copy; +mod tenure_downloader_set_opy; mod tenure_downloader_unconfirmed; +mod tenure_downloader_unconfirmed_copy; +mod tenure_downloader_unconfirmed_opy; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs new file mode 100644 index 0000000000..f7fb970bb6 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs @@ -0,0 +1,693 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for an historic tenure. This is a tenure for which we know the hashes of the +/// start and end block. This includes all tenures except for the two most recent ones. +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoTenureDownloadState { + /// Getting the tenure-start block (the given StacksBlockId is it's block ID). + GetTenureStartBlock(StacksBlockId), + /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not + /// always) handled by the execution of another NakamotoTenureDownloader. The only + /// exceptions are as follows: + /// + /// * if this tenure contains the anchor block, and it's the last tenure in the + /// reward cycle. In this case, the end-block must be directly fetched, since there will be no + /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. + /// + /// * if this tenure is the highest complete tenure, and we just learned the start-block of the + /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block + /// already known. This step will be skipped because the end-block is already present in the + /// state machine. + /// + /// * if the deadline (second parameter) is exceeded, the state machine transitions to + /// GetTenureEndBlock. + /// + /// The two fields here are: + /// * the block ID of the last block in the tenure (which happens to be the block ID of the + /// start block of the next tenure) + /// * the deadline by which this state machine needs to have obtained the tenure end-block + /// before transitioning to `GetTenureEndBlock`. + WaitForTenureEndBlock(StacksBlockId, Instant), + /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks + /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in + /// which we cannot quickly get the tenure-end block. + /// + /// The field here is the block ID of the tenure end block. + GetTenureEndBlock(StacksBlockId), + /// Receiving tenure blocks. + /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This + /// is because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId), + /// We have gotten all the blocks for this tenure + Done, +} + +pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; + +impl fmt::Display for NakamotoTenureDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs +/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent +/// tenures). +/// +/// This state machine works as follows: +/// +/// 1. Fetch the first block in the given tenure +/// 2. Obtain the last block in the given tenure, via one of the following means: +/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this +/// machine's tenure, and can be copied into this machine. +/// b. This machine is configured to directly fetch the end-block. This only happens if this +/// tenure both contains the anchor block for the next reward cycle and happens to be the last +/// tenure in the current reward cycle. +/// c. This machine is given the end-block on instantiation. This only happens when the machine +/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); +/// in this case, the end-block is the start-block of the ongoing tenure. +/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start +/// and end block hashes; their quantity will be validated against the tenure-change transaction +/// in the end-block. +/// +/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto +/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of +/// whether or not it straddles a reward cycle boundary). +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoTenureDownloader { + /// Consensus hash that identifies this tenure + pub tenure_id_consensus_hash: ConsensusHash, + /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and + /// sortition DB. + pub tenure_start_block_id: StacksBlockId, + /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID + /// for some other tenure). Learned from the inventory state machine and sortition DB. + pub tenure_end_block_id: StacksBlockId, + /// Address of who we're asking for blocks + pub naddr: NeighborAddress, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, + /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with + /// this state machine. + pub idle: bool, + + /// What state we're in for downloading this tenure + pub state: NakamotoTenureDownloadState, + /// Tenure-start block + pub tenure_start_block: Option, + /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once + /// the start-block for the current tenure is downloaded. This is that start-block, which is + /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + pub tenure_end_block: Option, + /// Tenure-end block header and TenureChange + pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, + /// Tenure blocks + pub tenure_blocks: Option>, +} + +impl NakamotoTenureDownloader { + pub fn new( + tenure_id_consensus_hash: ConsensusHash, + tenure_start_block_id: StacksBlockId, + tenure_end_block_id: StacksBlockId, + naddr: NeighborAddress, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, + ) -> Self { + debug!( + "Instantiate downloader to {} for tenure {}: {}-{}", + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + ); + Self { + tenure_id_consensus_hash, + tenure_start_block_id, + tenure_end_block_id, + naddr, + start_signer_keys, + end_signer_keys, + idle: false, + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + tenure_start_block: None, + tenure_end_header: None, + tenure_end_block: None, + tenure_blocks: None, + } + } + + /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed + /// tenure. This supplies the tenure end-block if known in advance. + pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { + self.tenure_end_block = Some(tenure_end_block); + self + } + + /// Is this downloader waiting for the tenure-end block data from some other downloader? Per + /// the struct documentation, this is case 2(a). + pub fn is_waiting(&self) -> bool { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { + return true; + } else { + return false; + } + } + + /// Validate and accept a given tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the start-block is valid. + /// Returns Err(..) if it is not valid. + pub fn try_accept_tenure_start_block( + &mut self, + tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + // not the right state for this + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if self.tenure_start_block_id != tenure_start_block.header.block_id() { + // not the block we were expecting + warn!("Invalid tenure-start block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_start_block" => %self.tenure_start_block_id, + "tenure_start_block ID" => %tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-start block for tenure {} block={}", + &self.tenure_id_consensus_hash, + &tenure_start_block.block_id() + ); + self.tenure_start_block = Some(tenure_start_block); + + if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { + // tenure_end_header supplied externally + self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); + } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + // we already have the tenure-end block, so immediately proceed to accept it. + debug!( + "Preemptively process tenure-end block {} for tenure {}", + tenure_end_block.block_id(), + &self.tenure_id_consensus_hash + ); + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + tenure_end_block.block_id(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + // need to get tenure_end_header. By default, assume that another + // NakamotoTenureDownloader will provide this block, and allow the + // NakamotoTenureDownloaderSet instance that manages a collection of these + // state-machines make the call to require this one to fetch the block directly. + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + self.tenure_end_block_id.clone(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + } + Ok(()) + } + + /// Transition this state-machine from waiting for its tenure-end block from another + /// state-machine to directly fetching it. This only needs to happen if the tenure this state + /// machine is downloading contains the PoX anchor block, and it's also the last confirmed + /// tenurein this reward cycle. + /// + /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and + /// runs a set of these machines based on the peers' inventory vectors. But because we don't + /// know if this is the PoX anchor block tenure (or even the last tenure) until we have + /// inventory vectors for this tenure's reward cycle, this state-transition must be driven + /// after this machine's instantiation. + pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state + else { + return Err(NetError::InvalidState); + }; + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + Ok(()) + } + + /// Transition to fetching the tenure-end block directly if waiting has taken too long. + pub fn transition_to_fetch_end_block_on_timeout(&mut self) { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = + self.state + { + if wait_deadline < Instant::now() { + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + } + } + } + + /// Validate and accept a tenure-end block. If accepted, then advance the state. + /// Once accepted, this function extracts the tenure-change transaction and block header from + /// this block (it does not need the entire block). + /// + /// Returns Ok(()) if the block was valid + /// Returns Err(..) if the block was invalid + pub fn try_accept_tenure_end_block( + &mut self, + tenure_end_block: &NakamotoBlock, + ) -> Result<(), NetError> { + if !matches!( + &self.state, + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) + | NakamotoTenureDownloadState::GetTenureEndBlock(_) + ) { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + warn!("Invalid state -- tenure_start_block is not set"); + return Err(NetError::InvalidState); + }; + + if self.tenure_end_block_id != tenure_end_block.header.block_id() { + // not the block we asked for + warn!("Invalid tenure-end block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_end_block" => %self.tenure_end_block_id, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // extract the needful -- need the tenure-change payload (which proves that the tenure-end + // block is the tenure-start block for the next tenure) and the parent block ID (which is + // the next block to download). + let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-end block: failed to validate tenure-start"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + if !valid { + warn!("Invalid tenure-end block: not a well-formed tenure-start block"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + } + + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + warn!("Invalid tenure-end block: no tenure-change transaction"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + // tc_payload must point to the tenure-start block's header + if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { + warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; + "start_block_id" => %tenure_start_block.block_id(), + "end_block_id" => %tenure_end_block.block_id(), + "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, + "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + &self.tenure_id_consensus_hash, + &tenure_end_block.block_id(), + tc_payload.previous_tenure_blocks + ); + self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.state = NakamotoTenureDownloadState::GetTenureBlocks( + tenure_end_block.header.parent_block_id.clone(), + ); + Ok(()) + } + + /// Determine how many blocks must be in this tenure. + /// Returns None if we don't have the start and end blocks yet. + pub fn tenure_length(&self) -> Option { + self.tenure_end_header + .as_ref() + .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + } + + /// Add downloaded tenure blocks to this machine. + /// If we have collected all tenure blocks, then return them and transition to the Done state. + /// + /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in + /// ascending order by height, and will include the tenure-start block but exclude the + /// tenure-end block. + /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to + /// the next block to fetch (stored in self.state) will be updated. + /// Returns Err(..) if the blocks were invalid. + pub fn try_accept_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest + let mut expected_block_id = block_cursor; + let mut count = 0; + for block in tenure_blocks.iter() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + expected_block_id = &block.header.parent_block_id; + count += 1; + if self + .tenure_blocks + .as_ref() + .map(|blocks| blocks.len()) + .unwrap_or(0) + .saturating_add(count) + > self.tenure_length().unwrap_or(0) as usize + { + // there are more blocks downloaded than indicated by the end-blocks tenure-change + // transaction. + warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); + "tenure_id" => %self.tenure_id_consensus_hash, + "count" => %count, + "tenure_length" => self.tenure_length().unwrap_or(0), + "num_blocks" => tenure_blocks.len()); + return Err(NetError::InvalidMessage); + } + } + + if let Some(blocks) = self.tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.tenure_blocks = Some(tenure_blocks); + } + + // did we reach the tenure start block? + let Some(blocks) = self.tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got None)"); + return Err(NetError::InvalidState); + }; + + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no tenure-start block (infallible)"); + return Err(NetError::InvalidState); + }; + + debug!( + "Accepted tenure blocks for tenure {} cursor={} ({})", + &self.tenure_id_consensus_hash, &block_cursor, count + ); + if earliest_block.block_id() != tenure_start_block.block_id() { + // still have more blocks to download + let next_block_id = earliest_block.header.parent_block_id.clone(); + debug!( + "Need more blocks for tenure {} (went from {} to {}, next is {})", + &self.tenure_id_consensus_hash, + &block_cursor, + &earliest_block.block_id(), + &next_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + return Ok(None); + } + + // finished! + self.state = NakamotoTenureDownloadState::Done; + Ok(self + .tenure_blocks + .take() + .map(|blocks| blocks.into_iter().rev().collect())) + } + + /// Produce the next HTTP request that, when successfully executed, will fetch the data needed + /// to advance this state machine. + /// Not all states require an HTTP request for advanceement. + /// + /// Returns Ok(Some(request)) if a request is needed + /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's + /// state) + /// Returns Err(()) if we're done. + pub fn make_next_download_request( + &self, + peerhost: PeerHost, + ) -> Result, ()> { + let request = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { + debug!("Request tenure-start block {}", &start_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { + // we're waiting for some other downloader's block-fetch to complete + debug!( + "Waiting for tenure-end block {} until {:?}", + &_block_id, _deadline + ); + return Ok(None); + } + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { + debug!("Request tenure-end block {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) + } + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { + debug!("Downloading tenure ending at {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) + } + NakamotoTenureDownloadState::Done => { + // nothing more to do + return Err(()); + } + }; + Ok(Some(request)) + } + + /// Begin the next download request for this state machine. The request will be sent to the + /// data URL corresponding to self.naddr. + /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. + /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) + /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to + /// resolve its data URL to a socket address. + pub fn send_next_download_request( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(true); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let request = match self.make_next_download_request(peerhost) { + Ok(Some(request)) => request, + Ok(None) => { + return Ok(true); + } + Err(_) => { + return Ok(false); + } + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + self.idle = false; + Ok(true) + } + + /// Handle a received StacksHttpResponse and advance the state machine. + /// If we get the full tenure's blocks, then return them. + /// Returns Ok(Some([blocks])) if we successfully complete the state machine. + /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done + /// yet. The caller should now call `send_next_download_request()` + /// Returns Err(..) on failure to process the response. + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + ) -> Result>, NetError> { + let handle_result = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + debug!( + "Got download response for tenure-start block {}", + &_block_id + ); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_start_block(block)?; + Ok(None) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + Err(NetError::InvalidState) + } + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { + debug!("Got download response to tenure-end block {}", &_block_id); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_end_block(&block)?; + Ok(None) + } + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + debug!( + "Got download response for tenure blocks ending at {}", + &_end_block_id + ); + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), + }; + self.idle = true; + handle_result + } + + pub fn is_done(&self) -> bool { + self.state == NakamotoTenureDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs new file mode 100644 index 0000000000..f7fb970bb6 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs @@ -0,0 +1,693 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for an historic tenure. This is a tenure for which we know the hashes of the +/// start and end block. This includes all tenures except for the two most recent ones. +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoTenureDownloadState { + /// Getting the tenure-start block (the given StacksBlockId is it's block ID). + GetTenureStartBlock(StacksBlockId), + /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not + /// always) handled by the execution of another NakamotoTenureDownloader. The only + /// exceptions are as follows: + /// + /// * if this tenure contains the anchor block, and it's the last tenure in the + /// reward cycle. In this case, the end-block must be directly fetched, since there will be no + /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. + /// + /// * if this tenure is the highest complete tenure, and we just learned the start-block of the + /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block + /// already known. This step will be skipped because the end-block is already present in the + /// state machine. + /// + /// * if the deadline (second parameter) is exceeded, the state machine transitions to + /// GetTenureEndBlock. + /// + /// The two fields here are: + /// * the block ID of the last block in the tenure (which happens to be the block ID of the + /// start block of the next tenure) + /// * the deadline by which this state machine needs to have obtained the tenure end-block + /// before transitioning to `GetTenureEndBlock`. + WaitForTenureEndBlock(StacksBlockId, Instant), + /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks + /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in + /// which we cannot quickly get the tenure-end block. + /// + /// The field here is the block ID of the tenure end block. + GetTenureEndBlock(StacksBlockId), + /// Receiving tenure blocks. + /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This + /// is because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId), + /// We have gotten all the blocks for this tenure + Done, +} + +pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; + +impl fmt::Display for NakamotoTenureDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs +/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent +/// tenures). +/// +/// This state machine works as follows: +/// +/// 1. Fetch the first block in the given tenure +/// 2. Obtain the last block in the given tenure, via one of the following means: +/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this +/// machine's tenure, and can be copied into this machine. +/// b. This machine is configured to directly fetch the end-block. This only happens if this +/// tenure both contains the anchor block for the next reward cycle and happens to be the last +/// tenure in the current reward cycle. +/// c. This machine is given the end-block on instantiation. This only happens when the machine +/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); +/// in this case, the end-block is the start-block of the ongoing tenure. +/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start +/// and end block hashes; their quantity will be validated against the tenure-change transaction +/// in the end-block. +/// +/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto +/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of +/// whether or not it straddles a reward cycle boundary). +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoTenureDownloader { + /// Consensus hash that identifies this tenure + pub tenure_id_consensus_hash: ConsensusHash, + /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and + /// sortition DB. + pub tenure_start_block_id: StacksBlockId, + /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID + /// for some other tenure). Learned from the inventory state machine and sortition DB. + pub tenure_end_block_id: StacksBlockId, + /// Address of who we're asking for blocks + pub naddr: NeighborAddress, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, + /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with + /// this state machine. + pub idle: bool, + + /// What state we're in for downloading this tenure + pub state: NakamotoTenureDownloadState, + /// Tenure-start block + pub tenure_start_block: Option, + /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once + /// the start-block for the current tenure is downloaded. This is that start-block, which is + /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + pub tenure_end_block: Option, + /// Tenure-end block header and TenureChange + pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, + /// Tenure blocks + pub tenure_blocks: Option>, +} + +impl NakamotoTenureDownloader { + pub fn new( + tenure_id_consensus_hash: ConsensusHash, + tenure_start_block_id: StacksBlockId, + tenure_end_block_id: StacksBlockId, + naddr: NeighborAddress, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, + ) -> Self { + debug!( + "Instantiate downloader to {} for tenure {}: {}-{}", + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + ); + Self { + tenure_id_consensus_hash, + tenure_start_block_id, + tenure_end_block_id, + naddr, + start_signer_keys, + end_signer_keys, + idle: false, + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + tenure_start_block: None, + tenure_end_header: None, + tenure_end_block: None, + tenure_blocks: None, + } + } + + /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed + /// tenure. This supplies the tenure end-block if known in advance. + pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { + self.tenure_end_block = Some(tenure_end_block); + self + } + + /// Is this downloader waiting for the tenure-end block data from some other downloader? Per + /// the struct documentation, this is case 2(a). + pub fn is_waiting(&self) -> bool { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { + return true; + } else { + return false; + } + } + + /// Validate and accept a given tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the start-block is valid. + /// Returns Err(..) if it is not valid. + pub fn try_accept_tenure_start_block( + &mut self, + tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + // not the right state for this + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if self.tenure_start_block_id != tenure_start_block.header.block_id() { + // not the block we were expecting + warn!("Invalid tenure-start block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_start_block" => %self.tenure_start_block_id, + "tenure_start_block ID" => %tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-start block for tenure {} block={}", + &self.tenure_id_consensus_hash, + &tenure_start_block.block_id() + ); + self.tenure_start_block = Some(tenure_start_block); + + if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { + // tenure_end_header supplied externally + self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); + } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + // we already have the tenure-end block, so immediately proceed to accept it. + debug!( + "Preemptively process tenure-end block {} for tenure {}", + tenure_end_block.block_id(), + &self.tenure_id_consensus_hash + ); + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + tenure_end_block.block_id(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + // need to get tenure_end_header. By default, assume that another + // NakamotoTenureDownloader will provide this block, and allow the + // NakamotoTenureDownloaderSet instance that manages a collection of these + // state-machines make the call to require this one to fetch the block directly. + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + self.tenure_end_block_id.clone(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + } + Ok(()) + } + + /// Transition this state-machine from waiting for its tenure-end block from another + /// state-machine to directly fetching it. This only needs to happen if the tenure this state + /// machine is downloading contains the PoX anchor block, and it's also the last confirmed + /// tenurein this reward cycle. + /// + /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and + /// runs a set of these machines based on the peers' inventory vectors. But because we don't + /// know if this is the PoX anchor block tenure (or even the last tenure) until we have + /// inventory vectors for this tenure's reward cycle, this state-transition must be driven + /// after this machine's instantiation. + pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state + else { + return Err(NetError::InvalidState); + }; + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + Ok(()) + } + + /// Transition to fetching the tenure-end block directly if waiting has taken too long. + pub fn transition_to_fetch_end_block_on_timeout(&mut self) { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = + self.state + { + if wait_deadline < Instant::now() { + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + } + } + } + + /// Validate and accept a tenure-end block. If accepted, then advance the state. + /// Once accepted, this function extracts the tenure-change transaction and block header from + /// this block (it does not need the entire block). + /// + /// Returns Ok(()) if the block was valid + /// Returns Err(..) if the block was invalid + pub fn try_accept_tenure_end_block( + &mut self, + tenure_end_block: &NakamotoBlock, + ) -> Result<(), NetError> { + if !matches!( + &self.state, + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) + | NakamotoTenureDownloadState::GetTenureEndBlock(_) + ) { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + warn!("Invalid state -- tenure_start_block is not set"); + return Err(NetError::InvalidState); + }; + + if self.tenure_end_block_id != tenure_end_block.header.block_id() { + // not the block we asked for + warn!("Invalid tenure-end block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_end_block" => %self.tenure_end_block_id, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // extract the needful -- need the tenure-change payload (which proves that the tenure-end + // block is the tenure-start block for the next tenure) and the parent block ID (which is + // the next block to download). + let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-end block: failed to validate tenure-start"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + if !valid { + warn!("Invalid tenure-end block: not a well-formed tenure-start block"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + } + + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + warn!("Invalid tenure-end block: no tenure-change transaction"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + // tc_payload must point to the tenure-start block's header + if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { + warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; + "start_block_id" => %tenure_start_block.block_id(), + "end_block_id" => %tenure_end_block.block_id(), + "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, + "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + &self.tenure_id_consensus_hash, + &tenure_end_block.block_id(), + tc_payload.previous_tenure_blocks + ); + self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.state = NakamotoTenureDownloadState::GetTenureBlocks( + tenure_end_block.header.parent_block_id.clone(), + ); + Ok(()) + } + + /// Determine how many blocks must be in this tenure. + /// Returns None if we don't have the start and end blocks yet. + pub fn tenure_length(&self) -> Option { + self.tenure_end_header + .as_ref() + .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + } + + /// Add downloaded tenure blocks to this machine. + /// If we have collected all tenure blocks, then return them and transition to the Done state. + /// + /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in + /// ascending order by height, and will include the tenure-start block but exclude the + /// tenure-end block. + /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to + /// the next block to fetch (stored in self.state) will be updated. + /// Returns Err(..) if the blocks were invalid. + pub fn try_accept_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest + let mut expected_block_id = block_cursor; + let mut count = 0; + for block in tenure_blocks.iter() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + expected_block_id = &block.header.parent_block_id; + count += 1; + if self + .tenure_blocks + .as_ref() + .map(|blocks| blocks.len()) + .unwrap_or(0) + .saturating_add(count) + > self.tenure_length().unwrap_or(0) as usize + { + // there are more blocks downloaded than indicated by the end-blocks tenure-change + // transaction. + warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); + "tenure_id" => %self.tenure_id_consensus_hash, + "count" => %count, + "tenure_length" => self.tenure_length().unwrap_or(0), + "num_blocks" => tenure_blocks.len()); + return Err(NetError::InvalidMessage); + } + } + + if let Some(blocks) = self.tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.tenure_blocks = Some(tenure_blocks); + } + + // did we reach the tenure start block? + let Some(blocks) = self.tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got None)"); + return Err(NetError::InvalidState); + }; + + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no tenure-start block (infallible)"); + return Err(NetError::InvalidState); + }; + + debug!( + "Accepted tenure blocks for tenure {} cursor={} ({})", + &self.tenure_id_consensus_hash, &block_cursor, count + ); + if earliest_block.block_id() != tenure_start_block.block_id() { + // still have more blocks to download + let next_block_id = earliest_block.header.parent_block_id.clone(); + debug!( + "Need more blocks for tenure {} (went from {} to {}, next is {})", + &self.tenure_id_consensus_hash, + &block_cursor, + &earliest_block.block_id(), + &next_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + return Ok(None); + } + + // finished! + self.state = NakamotoTenureDownloadState::Done; + Ok(self + .tenure_blocks + .take() + .map(|blocks| blocks.into_iter().rev().collect())) + } + + /// Produce the next HTTP request that, when successfully executed, will fetch the data needed + /// to advance this state machine. + /// Not all states require an HTTP request for advanceement. + /// + /// Returns Ok(Some(request)) if a request is needed + /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's + /// state) + /// Returns Err(()) if we're done. + pub fn make_next_download_request( + &self, + peerhost: PeerHost, + ) -> Result, ()> { + let request = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { + debug!("Request tenure-start block {}", &start_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { + // we're waiting for some other downloader's block-fetch to complete + debug!( + "Waiting for tenure-end block {} until {:?}", + &_block_id, _deadline + ); + return Ok(None); + } + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { + debug!("Request tenure-end block {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) + } + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { + debug!("Downloading tenure ending at {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) + } + NakamotoTenureDownloadState::Done => { + // nothing more to do + return Err(()); + } + }; + Ok(Some(request)) + } + + /// Begin the next download request for this state machine. The request will be sent to the + /// data URL corresponding to self.naddr. + /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. + /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) + /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to + /// resolve its data URL to a socket address. + pub fn send_next_download_request( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(true); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let request = match self.make_next_download_request(peerhost) { + Ok(Some(request)) => request, + Ok(None) => { + return Ok(true); + } + Err(_) => { + return Ok(false); + } + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + self.idle = false; + Ok(true) + } + + /// Handle a received StacksHttpResponse and advance the state machine. + /// If we get the full tenure's blocks, then return them. + /// Returns Ok(Some([blocks])) if we successfully complete the state machine. + /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done + /// yet. The caller should now call `send_next_download_request()` + /// Returns Err(..) on failure to process the response. + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + ) -> Result>, NetError> { + let handle_result = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + debug!( + "Got download response for tenure-start block {}", + &_block_id + ); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_start_block(block)?; + Ok(None) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + Err(NetError::InvalidState) + } + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { + debug!("Got download response to tenure-end block {}", &_block_id); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_end_block(&block)?; + Ok(None) + } + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + debug!( + "Got download response for tenure blocks ending at {}", + &_end_block_id + ); + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), + }; + self.idle = true; + handle_result + } + + pub fn is_done(&self) -> bool { + self.state == NakamotoTenureDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs new file mode 100644 index 0000000000..28a40e7eb5 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs @@ -0,0 +1,660 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, + NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// A set of confirmed downloader state machines assigned to one or more neighbors. The block +/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure +/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer +/// connections to downloader state machines, such that each peer is assigned to at most one +/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at +/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine +/// can make progress even if there is only one available peer (in which case, that peer will get +/// scheduled across multiple machines to drive their progress in the right sequence such that +/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). +pub struct NakamotoTenureDownloaderSet { + /// A list of instantiated downloaders that are in progress + pub(crate) downloaders: Vec>, + /// An assignment of peers to downloader machines in the `downloaders` list. + pub(crate) peers: HashMap, + /// The set of tenures that have been successfully downloaded (but possibly not yet stored or + /// processed) + pub(crate) completed_tenures: HashSet, +} + +impl NakamotoTenureDownloaderSet { + pub fn new() -> Self { + Self { + downloaders: vec![], + peers: HashMap::new(), + completed_tenures: HashSet::new(), + } + } + + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if + /// needed. + fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { + debug!( + "Add downloader for tenure {} driven by {}", + &downloader.tenure_id_consensus_hash, &naddr + ); + if let Some(idx) = self.peers.get(&naddr) { + self.downloaders[*idx] = Some(downloader); + } else { + self.downloaders.push(Some(downloader)); + self.peers.insert(naddr, self.downloaders.len() - 1); + } + } + + /// Does the given neighbor have an assigned downloader state machine? + pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { + let Some(idx) = self.peers.get(naddr) else { + return false; + }; + let Some(downloader_opt) = self.downloaders.get(*idx) else { + return false; + }; + downloader_opt.is_some() + } + + /// Drop the downloader associated with the given neighbor, if any. + pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { + let Some(index) = self.peers.remove(naddr) else { + return; + }; + self.downloaders[index] = None; + } + + /// How many downloaders are there? + pub fn num_downloaders(&self) -> usize { + self.downloaders + .iter() + .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) + } + + /// How many downloaders are there, which are scheduled? + pub fn num_scheduled_downloaders(&self) -> usize { + let mut cnt = 0; + for (_, idx) in self.peers.iter() { + if let Some(Some(_)) = self.downloaders.get(*idx) { + cnt += 1; + } + } + cnt + } + + /// Add a sequence of (address, downloader) pairs to this downloader set. + pub(crate) fn add_downloaders( + &mut self, + iter: impl IntoIterator, + ) { + for (naddr, downloader) in iter { + if self.has_downloader(&naddr) { + debug!("Already have downloader for {}", &naddr); + continue; + } + self.add_downloader(naddr, downloader); + } + } + + /// Count up the number of in-flight messages, based on the states of each instantiated + /// downloader. + pub fn inflight(&self) -> usize { + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.is_done() { + continue; + } + cnt += 1; + } + cnt + } + + /// Determine whether or not there exists a downloader for the given tenure, identified by its + /// consensus hash. + pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { + self.downloaders + .iter() + .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) + .is_some() + } + + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. + pub fn is_empty(&self) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true + } + + /// Try to resume processing a download state machine with a given peer. Since a peer is + /// detached from the machine after a single RPC call, this call is needed to re-attach it to a + /// (potentially different, unblocked) machine for the next RPC call to this peer. + /// + /// Returns true if the peer gets scheduled. + /// Returns false if not. + pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); + if let Some(idx) = self.peers.get(&naddr) { + let Some(Some(_downloader)) = self.downloaders.get(*idx) else { + return false; + }; + + debug!( + "Peer {} already bound to downloader for {}", + &naddr, &_downloader.tenure_id_consensus_hash + ); + return true; + } + for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { + let Some(downloader) = downloader_opt else { + continue; + }; + if !downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.naddr != naddr { + continue; + } + debug!( + "Assign peer {} to work on downloader for {} in state {}", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + self.peers.insert(naddr, i); + return true; + } + return false; + } + + /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to + /// blocked downloaders. + pub fn clear_available_peers(&mut self) { + let mut idled: Vec = vec![]; + for (naddr, i) in self.peers.iter() { + let Some(downloader_opt) = self.downloaders.get(*i) else { + // should be unreachable + idled.push(naddr.clone()); + continue; + }; + let Some(downloader) = downloader_opt else { + debug!("Remove peer {} for null download {}", &naddr, i); + idled.push(naddr.clone()); + continue; + }; + if downloader.idle || downloader.is_waiting() { + debug!( + "Remove idled peer {} for tenure download {}", + &naddr, &downloader.tenure_id_consensus_hash + ); + idled.push(naddr.clone()); + } + } + for naddr in idled.into_iter() { + self.peers.remove(&naddr); + } + } + + /// Clear out downloaders (but not their peers) that have finished. The caller should follow + /// this up with a call to `clear_available_peers()`. + pub fn clear_finished_downloaders(&mut self) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + *downloader_opt = None; + } + } + } + + /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These + /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. + pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { + let mut ret = HashMap::new(); + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + let Some(block) = downloader.tenure_start_block.as_ref() else { + continue; + }; + ret.insert(block.block_id(), block.clone()); + } + ret + } + + /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their + /// tenure-end blocks. + /// Return a list of peers driving downloaders with failing `tenure_start_blocks` + pub(crate) fn handle_tenure_end_blocks( + &mut self, + tenure_start_blocks: &HashMap, + ) -> Vec { + debug!( + "handle tenure-end blocks: {:?}", + &tenure_start_blocks.keys().collect::>() + ); + let mut dead = vec![]; + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = + &downloader.state + else { + continue; + }; + let Some(end_block) = tenure_start_blocks.get(end_block_id) else { + continue; + }; + if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { + warn!( + "Failed to accept tenure end-block {} for tenure {}: {:?}", + &end_block.block_id(), + &downloader.tenure_id_consensus_hash, + &e + ); + dead.push(downloader.naddr.clone()); + } + } + dead + } + + /// Does there exist a downloader (possibly unscheduled) for the given tenure? + pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if &downloader.tenure_id_consensus_hash == tenure_id { + debug!( + "Have downloader for tenure {} already (idle={}, waiting={}, state={})", + tenure_id, + downloader.idle, + downloader.is_waiting(), + &downloader.state + ); + return true; + } + } + false + } + + /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor + /// block, we need to go and directly fetch its end block instead of waiting for another + /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method + /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. + pub(crate) fn try_transition_fetch_tenure_end_blocks( + &mut self, + tenure_block_ids: &HashMap, + ) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + downloader.transition_to_fetch_end_block_on_timeout(); + } + + // find tenures in which we need to fetch the tenure-end block directly. + let mut last_available_tenures: HashSet = HashSet::new(); + for (_, all_available) in tenure_block_ids.iter() { + for (_, available) in all_available.iter() { + if available.fetch_end_block { + last_available_tenures.insert(available.end_block_id.clone()); + } + } + } + + // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to + // fetching + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + if !downloader.idle { + continue; + } + if !downloader.is_waiting() { + continue; + } + if !last_available_tenures.contains(&downloader.tenure_end_block_id) { + continue; + } + debug!( + "Transition downloader for {} from waiting to fetching", + &downloader.tenure_id_consensus_hash + ); + if let Err(e) = downloader.transition_to_fetch_end_block() { + warn!( + "Downloader for {} failed to transition to fetch end block: {:?}", + &downloader.tenure_id_consensus_hash, &e + ); + } + } + } + + /// Create a given number of downloads from a schedule and availability set. + /// Removes items from the schedule, and neighbors from the availability set. + /// A neighbor will be issued at most one request. + pub(crate) fn make_tenure_downloaders( + &mut self, + schedule: &mut VecDeque, + available: &mut HashMap>, + tenure_block_ids: &HashMap, + count: usize, + current_reward_cycles: &BTreeMap, + ) { + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); + + self.clear_finished_downloaders(); + self.clear_available_peers(); + self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); + while self.inflight() < count { + let Some(ch) = schedule.front() else { + break; + }; + if self.completed_tenures.contains(&ch) { + debug!("Already successfully downloaded tenure {}", &ch); + schedule.pop_front(); + continue; + } + let Some(neighbors) = available.get_mut(ch) else { + // not found on any neighbors, so stop trying this tenure + debug!("No neighbors have tenure {}", ch); + schedule.pop_front(); + continue; + }; + if neighbors.is_empty() { + // no more neighbors to try + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + } + let Some(naddr) = neighbors.pop() else { + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + }; + if self.try_resume_peer(naddr.clone()) { + continue; + }; + if self.has_downloader_for_tenure(&ch) { + schedule.pop_front(); + continue; + } + + let Some(available_tenures) = tenure_block_ids.get(&naddr) else { + // this peer doesn't have any known tenures, so try the others + debug!("No tenures available from {}", &naddr); + continue; + }; + let Some(tenure_info) = available_tenures.get(ch) else { + // this peer does not have a tenure start/end block for this tenure, so try the + // others. + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + continue; + }; + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + + debug!( + "Download tenure {} (start={}, end={}) (rc {},{})", + &ch, + &tenure_info.start_block_id, + &tenure_info.end_block_id, + tenure_info.start_reward_cycle, + tenure_info.end_reward_cycle + ); + let tenure_download = NakamotoTenureDownloader::new( + ch.clone(), + tenure_info.start_block_id.clone(), + tenure_info.end_block_id.clone(), + naddr.clone(), + start_reward_set.clone(), + end_reward_set.clone(), + ); + + debug!("Request tenure {} from neighbor {}", ch, &naddr); + self.add_downloader(naddr, tenure_download); + schedule.pop_front(); + } + } + + /// Run all confirmed downloaders. + /// * Identify neighbors for which we do not have an inflight request + /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that + /// request to the neighbor and begin driving the underlying socket I/O. + /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance + /// its state. + /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. + /// + /// Returns the set of downloaded blocks obtained for completed downloaders. These will be + /// full confirmed tenures. + pub fn run( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> HashMap> { + let addrs: Vec<_> = self.peers.keys().cloned().collect(); + let mut finished = vec![]; + let mut finished_tenures = vec![]; + let mut new_blocks = HashMap::new(); + + // send requests + for (naddr, index) in self.peers.iter() { + if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", &naddr); + continue; + } + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + if downloader.is_done() { + debug!("Downloader for {} is done", &naddr); + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + debug!( + "Send request to {} for tenure {} (state {})", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { + debug!("Downloader for {} failed; this peer is dead", &naddr); + neighbor_rpc.add_dead(network, naddr); + continue; + }; + if !sent { + // this downloader is dead or broken + finished.push(naddr.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(&naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + // handle responses + for (naddr, response) in neighbor_rpc.collect_replies(network) { + let Some(index) = self.peers.get(&naddr) else { + debug!("No downloader for {}", &naddr); + continue; + }; + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + debug!("Got response from {}", &naddr); + + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { + debug!("Failed to handle download response from {}", &naddr); + neighbor_rpc.add_dead(network, &naddr); + continue; + }; + + let Some(blocks) = blocks_opt else { + continue; + }; + + debug!( + "Got {} blocks for tenure {}", + blocks.len(), + &downloader.tenure_id_consensus_hash + ); + new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); + if downloader.is_done() { + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + new_blocks + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs new file mode 100644 index 0000000000..28a40e7eb5 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs @@ -0,0 +1,660 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, + NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// A set of confirmed downloader state machines assigned to one or more neighbors. The block +/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure +/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer +/// connections to downloader state machines, such that each peer is assigned to at most one +/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at +/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine +/// can make progress even if there is only one available peer (in which case, that peer will get +/// scheduled across multiple machines to drive their progress in the right sequence such that +/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). +pub struct NakamotoTenureDownloaderSet { + /// A list of instantiated downloaders that are in progress + pub(crate) downloaders: Vec>, + /// An assignment of peers to downloader machines in the `downloaders` list. + pub(crate) peers: HashMap, + /// The set of tenures that have been successfully downloaded (but possibly not yet stored or + /// processed) + pub(crate) completed_tenures: HashSet, +} + +impl NakamotoTenureDownloaderSet { + pub fn new() -> Self { + Self { + downloaders: vec![], + peers: HashMap::new(), + completed_tenures: HashSet::new(), + } + } + + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if + /// needed. + fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { + debug!( + "Add downloader for tenure {} driven by {}", + &downloader.tenure_id_consensus_hash, &naddr + ); + if let Some(idx) = self.peers.get(&naddr) { + self.downloaders[*idx] = Some(downloader); + } else { + self.downloaders.push(Some(downloader)); + self.peers.insert(naddr, self.downloaders.len() - 1); + } + } + + /// Does the given neighbor have an assigned downloader state machine? + pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { + let Some(idx) = self.peers.get(naddr) else { + return false; + }; + let Some(downloader_opt) = self.downloaders.get(*idx) else { + return false; + }; + downloader_opt.is_some() + } + + /// Drop the downloader associated with the given neighbor, if any. + pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { + let Some(index) = self.peers.remove(naddr) else { + return; + }; + self.downloaders[index] = None; + } + + /// How many downloaders are there? + pub fn num_downloaders(&self) -> usize { + self.downloaders + .iter() + .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) + } + + /// How many downloaders are there, which are scheduled? + pub fn num_scheduled_downloaders(&self) -> usize { + let mut cnt = 0; + for (_, idx) in self.peers.iter() { + if let Some(Some(_)) = self.downloaders.get(*idx) { + cnt += 1; + } + } + cnt + } + + /// Add a sequence of (address, downloader) pairs to this downloader set. + pub(crate) fn add_downloaders( + &mut self, + iter: impl IntoIterator, + ) { + for (naddr, downloader) in iter { + if self.has_downloader(&naddr) { + debug!("Already have downloader for {}", &naddr); + continue; + } + self.add_downloader(naddr, downloader); + } + } + + /// Count up the number of in-flight messages, based on the states of each instantiated + /// downloader. + pub fn inflight(&self) -> usize { + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.is_done() { + continue; + } + cnt += 1; + } + cnt + } + + /// Determine whether or not there exists a downloader for the given tenure, identified by its + /// consensus hash. + pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { + self.downloaders + .iter() + .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) + .is_some() + } + + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. + pub fn is_empty(&self) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true + } + + /// Try to resume processing a download state machine with a given peer. Since a peer is + /// detached from the machine after a single RPC call, this call is needed to re-attach it to a + /// (potentially different, unblocked) machine for the next RPC call to this peer. + /// + /// Returns true if the peer gets scheduled. + /// Returns false if not. + pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); + if let Some(idx) = self.peers.get(&naddr) { + let Some(Some(_downloader)) = self.downloaders.get(*idx) else { + return false; + }; + + debug!( + "Peer {} already bound to downloader for {}", + &naddr, &_downloader.tenure_id_consensus_hash + ); + return true; + } + for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { + let Some(downloader) = downloader_opt else { + continue; + }; + if !downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.naddr != naddr { + continue; + } + debug!( + "Assign peer {} to work on downloader for {} in state {}", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + self.peers.insert(naddr, i); + return true; + } + return false; + } + + /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to + /// blocked downloaders. + pub fn clear_available_peers(&mut self) { + let mut idled: Vec = vec![]; + for (naddr, i) in self.peers.iter() { + let Some(downloader_opt) = self.downloaders.get(*i) else { + // should be unreachable + idled.push(naddr.clone()); + continue; + }; + let Some(downloader) = downloader_opt else { + debug!("Remove peer {} for null download {}", &naddr, i); + idled.push(naddr.clone()); + continue; + }; + if downloader.idle || downloader.is_waiting() { + debug!( + "Remove idled peer {} for tenure download {}", + &naddr, &downloader.tenure_id_consensus_hash + ); + idled.push(naddr.clone()); + } + } + for naddr in idled.into_iter() { + self.peers.remove(&naddr); + } + } + + /// Clear out downloaders (but not their peers) that have finished. The caller should follow + /// this up with a call to `clear_available_peers()`. + pub fn clear_finished_downloaders(&mut self) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + *downloader_opt = None; + } + } + } + + /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These + /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. + pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { + let mut ret = HashMap::new(); + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + let Some(block) = downloader.tenure_start_block.as_ref() else { + continue; + }; + ret.insert(block.block_id(), block.clone()); + } + ret + } + + /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their + /// tenure-end blocks. + /// Return a list of peers driving downloaders with failing `tenure_start_blocks` + pub(crate) fn handle_tenure_end_blocks( + &mut self, + tenure_start_blocks: &HashMap, + ) -> Vec { + debug!( + "handle tenure-end blocks: {:?}", + &tenure_start_blocks.keys().collect::>() + ); + let mut dead = vec![]; + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = + &downloader.state + else { + continue; + }; + let Some(end_block) = tenure_start_blocks.get(end_block_id) else { + continue; + }; + if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { + warn!( + "Failed to accept tenure end-block {} for tenure {}: {:?}", + &end_block.block_id(), + &downloader.tenure_id_consensus_hash, + &e + ); + dead.push(downloader.naddr.clone()); + } + } + dead + } + + /// Does there exist a downloader (possibly unscheduled) for the given tenure? + pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if &downloader.tenure_id_consensus_hash == tenure_id { + debug!( + "Have downloader for tenure {} already (idle={}, waiting={}, state={})", + tenure_id, + downloader.idle, + downloader.is_waiting(), + &downloader.state + ); + return true; + } + } + false + } + + /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor + /// block, we need to go and directly fetch its end block instead of waiting for another + /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method + /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. + pub(crate) fn try_transition_fetch_tenure_end_blocks( + &mut self, + tenure_block_ids: &HashMap, + ) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + downloader.transition_to_fetch_end_block_on_timeout(); + } + + // find tenures in which we need to fetch the tenure-end block directly. + let mut last_available_tenures: HashSet = HashSet::new(); + for (_, all_available) in tenure_block_ids.iter() { + for (_, available) in all_available.iter() { + if available.fetch_end_block { + last_available_tenures.insert(available.end_block_id.clone()); + } + } + } + + // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to + // fetching + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + if !downloader.idle { + continue; + } + if !downloader.is_waiting() { + continue; + } + if !last_available_tenures.contains(&downloader.tenure_end_block_id) { + continue; + } + debug!( + "Transition downloader for {} from waiting to fetching", + &downloader.tenure_id_consensus_hash + ); + if let Err(e) = downloader.transition_to_fetch_end_block() { + warn!( + "Downloader for {} failed to transition to fetch end block: {:?}", + &downloader.tenure_id_consensus_hash, &e + ); + } + } + } + + /// Create a given number of downloads from a schedule and availability set. + /// Removes items from the schedule, and neighbors from the availability set. + /// A neighbor will be issued at most one request. + pub(crate) fn make_tenure_downloaders( + &mut self, + schedule: &mut VecDeque, + available: &mut HashMap>, + tenure_block_ids: &HashMap, + count: usize, + current_reward_cycles: &BTreeMap, + ) { + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); + + self.clear_finished_downloaders(); + self.clear_available_peers(); + self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); + while self.inflight() < count { + let Some(ch) = schedule.front() else { + break; + }; + if self.completed_tenures.contains(&ch) { + debug!("Already successfully downloaded tenure {}", &ch); + schedule.pop_front(); + continue; + } + let Some(neighbors) = available.get_mut(ch) else { + // not found on any neighbors, so stop trying this tenure + debug!("No neighbors have tenure {}", ch); + schedule.pop_front(); + continue; + }; + if neighbors.is_empty() { + // no more neighbors to try + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + } + let Some(naddr) = neighbors.pop() else { + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + }; + if self.try_resume_peer(naddr.clone()) { + continue; + }; + if self.has_downloader_for_tenure(&ch) { + schedule.pop_front(); + continue; + } + + let Some(available_tenures) = tenure_block_ids.get(&naddr) else { + // this peer doesn't have any known tenures, so try the others + debug!("No tenures available from {}", &naddr); + continue; + }; + let Some(tenure_info) = available_tenures.get(ch) else { + // this peer does not have a tenure start/end block for this tenure, so try the + // others. + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + continue; + }; + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + + debug!( + "Download tenure {} (start={}, end={}) (rc {},{})", + &ch, + &tenure_info.start_block_id, + &tenure_info.end_block_id, + tenure_info.start_reward_cycle, + tenure_info.end_reward_cycle + ); + let tenure_download = NakamotoTenureDownloader::new( + ch.clone(), + tenure_info.start_block_id.clone(), + tenure_info.end_block_id.clone(), + naddr.clone(), + start_reward_set.clone(), + end_reward_set.clone(), + ); + + debug!("Request tenure {} from neighbor {}", ch, &naddr); + self.add_downloader(naddr, tenure_download); + schedule.pop_front(); + } + } + + /// Run all confirmed downloaders. + /// * Identify neighbors for which we do not have an inflight request + /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that + /// request to the neighbor and begin driving the underlying socket I/O. + /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance + /// its state. + /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. + /// + /// Returns the set of downloaded blocks obtained for completed downloaders. These will be + /// full confirmed tenures. + pub fn run( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> HashMap> { + let addrs: Vec<_> = self.peers.keys().cloned().collect(); + let mut finished = vec![]; + let mut finished_tenures = vec![]; + let mut new_blocks = HashMap::new(); + + // send requests + for (naddr, index) in self.peers.iter() { + if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", &naddr); + continue; + } + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + if downloader.is_done() { + debug!("Downloader for {} is done", &naddr); + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + debug!( + "Send request to {} for tenure {} (state {})", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { + debug!("Downloader for {} failed; this peer is dead", &naddr); + neighbor_rpc.add_dead(network, naddr); + continue; + }; + if !sent { + // this downloader is dead or broken + finished.push(naddr.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(&naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + // handle responses + for (naddr, response) in neighbor_rpc.collect_replies(network) { + let Some(index) = self.peers.get(&naddr) else { + debug!("No downloader for {}", &naddr); + continue; + }; + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + debug!("Got response from {}", &naddr); + + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { + debug!("Failed to handle download response from {}", &naddr); + neighbor_rpc.add_dead(network, &naddr); + continue; + }; + + let Some(blocks) = blocks_opt else { + continue; + }; + + debug!( + "Got {} blocks for tenure {}", + blocks.len(), + &downloader.tenure_id_consensus_hash + ); + new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); + if downloader.is_done() { + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + new_blocks + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs new file mode 100644 index 0000000000..c96f718d2b --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs @@ -0,0 +1,867 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, + WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the +/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but +/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoUnconfirmedDownloadState { + /// Getting the tenure tip information + GetTenureInfo, + /// Get the tenure start block for the ongoing tenure. + /// The inner value is tenure-start block ID of the ongoing tenure. + GetTenureStartBlock(StacksBlockId), + /// Receiving unconfirmed tenure blocks. + /// The inner value is the block ID of the next block to fetch. + GetUnconfirmedTenureBlocks(StacksBlockId), + /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block + /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). + Done, +} + +impl fmt::Display for NakamotoUnconfirmedDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for the unconfirmed tenures. It operates in the following steps: +/// +/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip +/// 2. Get the tenure-start block for the unconfirmed chain tip +/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the +/// immediate child of the one obtained in (2) +/// +/// Once this state-machine finishes execution, the tenure-start block is used to construct a +/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. +/// +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoUnconfirmedTenureDownloader { + /// state of this machine + pub state: NakamotoUnconfirmedDownloadState, + /// Address of who we're asking + pub naddr: NeighborAddress, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, + /// Block ID of this node's highest-processed block. + /// We will not download any blocks lower than this, if it's set. + pub highest_processed_block_id: Option, + /// Highest processed block height (which may not need to be loaded) + pub highest_processed_block_height: Option, + + /// Tenure tip info we obtained for this peer + pub tenure_tip: Option, + /// Tenure start block for the ongoing tip. + /// This is also the tenure-end block for the highest-complete tip. + pub unconfirmed_tenure_start_block: Option, + /// Unconfirmed tenure blocks obtained + pub unconfirmed_tenure_blocks: Option>, +} + +impl NakamotoUnconfirmedTenureDownloader { + /// Make a new downloader which will download blocks from the tip back down to the optional + /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). + pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { + Self { + state: NakamotoUnconfirmedDownloadState::GetTenureInfo, + naddr, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, + highest_processed_block_id, + highest_processed_block_height: None, + tenure_tip: None, + unconfirmed_tenure_start_block: None, + unconfirmed_tenure_blocks: None, + } + } + + /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is + /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote + /// node). + pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { + self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) + } + + /// Set the highest-processed block. + /// This can be performed by the downloader itself in order to inform ongoing requests for + /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node + /// has already handled. + pub fn set_highest_processed_block( + &mut self, + highest_processed_block_id: StacksBlockId, + highest_processed_block_height: u64, + ) { + self.highest_processed_block_id = Some(highest_processed_block_id); + self.highest_processed_block_height = Some(highest_processed_block_height); + } + + /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. + /// + /// * tenure_tip.consensus_hash + /// This is the consensus hash of the remote node's ongoing tenure. It may not be the + /// sortition tip, e.g. if the tenure spans multiple sortitions. + /// * tenure_tip.tenure_start_block_id + /// This is the first block ID of the ongoing unconfirmed tenure. + /// * tenure_tip.parent_consensus_hash + /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest + /// complete tenure, for which we know the start and end block IDs. + /// * tenure_tip.parent_tenure_start_block_id + /// This is the tenure start block for the highest complete tenure. It should be equal to + /// the winning Stacks block hash of the snapshot for the ongoing tenure. + /// + /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go + /// fetch it again; just get the new unconfirmed blocks. + pub fn try_accept_tenure_info( + &mut self, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + remote_tenure_tip: RPCGetTenureInfo, + current_reward_sets: &BTreeMap, + ) -> Result<(), NetError> { + if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { + return Err(NetError::InvalidState); + } + if self.tenure_tip.is_some() { + return Err(NetError::InvalidState); + } + + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + + // authenticate consensus hashes against canonical chain history + let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.parent_consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + let ih = sortdb.index_handle(&local_sort_tip.sortition_id); + let ancestor_local_tenure_sn = ih + .get_block_snapshot_by_height(local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { + // .consensus_hash is not on the canonical fork + warn!("Unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash); + return Err(DBError::NotFoundError.into()); + } + let ancestor_parent_local_tenure_sn = ih + .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; + + if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { + // .parent_consensus_hash is not on the canonical fork + warn!("Parent unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(DBError::NotFoundError.into()); + } + + // parent tenure sortition must precede the ongoing tenure sortition + if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { + warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash, + "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(NetError::InvalidMessage); + } + + // parent tenure start block ID must be the winning block hash for the ongoing tenure's + // snapshot + if local_tenure_sn.winning_stacks_block_hash.0 + != remote_tenure_tip.parent_tenure_start_block_id.0 + { + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, + "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); + return Err(NetError::StaleView); + } + + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + // we've synchronized this tenure before, so don't get anymore blocks before it. + let highest_processed_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(highest_processed_block_id)? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? + .0; + + let highest_processed_block_height = highest_processed_block.header.chain_length; + self.highest_processed_block_height = Some(highest_processed_block_height); + + if &remote_tenure_tip.tip_block_id == highest_processed_block_id + || highest_processed_block_height > remote_tenure_tip.tip_height + { + // nothing to do -- we're at or ahead of the remote peer, so finish up. + // If we don't have the tenure-start block for the confirmed tenure that the remote + // peer claims to have, then the remote peer has sent us invalid data and we should + // treat it as such. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or(NetError::InvalidMessage)? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::Done; + } + } + + if self.state == NakamotoUnconfirmedDownloadState::Done { + // only need to remember the tenure tip + self.tenure_tip = Some(remote_tenure_tip); + return Ok(()); + } + + // we're not finished + let tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for confirmed tenure {} (rc {})", + &parent_local_tenure_sn.consensus_hash, parent_tenure_rc + ); + return Err(NetError::InvalidState); + }; + + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for unconfirmed tenure {} (rc {})", + &local_tenure_sn.consensus_hash, tenure_rc + ); + return Err(NetError::InvalidState); + }; + + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? + { + // proceed to get unconfirmed blocks. We already have the tenure-start block. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + remote_tenure_tip.tip_block_id.clone(), + ); + } else { + // get the tenure-start block first + self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( + remote_tenure_tip.tenure_start_block_id.clone(), + ); + } + + debug!( + "Will validate unconfirmed blocks with reward sets in ({},{})", + parent_tenure_rc, tenure_rc + ); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); + self.tenure_tip = Some(remote_tenure_tip); + + Ok(()) + } + + /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the unconfirmed tenure start block was valid + /// Returns Err(..) if it was not valid, or if this function was called out of sequence. + pub fn try_accept_unconfirmed_tenure_start_block( + &mut self, + unconfirmed_tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = + &self.state + else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // block has to match the expected hash + if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { + warn!("Invalid tenure-start block"; + "tenure_id_start_block" => %tenure_start_block_id, + "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + // furthermore, the block has to match the expected tenure ID + if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { + warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); + return Err(NetError::InvalidMessage); + } + + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + tenure_tip.tip_block_id.clone(), + ); + Ok(()) + } + + /// Add downloaded unconfirmed tenure blocks. + /// If we have collected all tenure blocks, then return them. + /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the + /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come + /// after the highest-processed block (if set). + /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on invalid state or invalid block. + pub fn try_accept_unconfirmed_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = + &self.state + else { + return Err(NetError::InvalidState); + }; + + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + debug!("No tenure blocks obtained"); + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest. + // If there's a tenure-start block, it must be last. + let mut expected_block_id = last_block_id; + let mut finished_download = false; + let mut last_block_index = None; + for (cnt, block) in tenure_blocks.iter().enumerate() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id()); + return Err(NetError::InvalidMessage); + } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // we may or may not need the tenure-start block for the unconfirmed tenure. But if we + // do, make sure it's valid, and it's the last block we receive. + let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + }; + if is_tenure_start { + // this is the tenure-start block, so make sure it matches our /v3/tenure/info + if block.header.block_id() != tenure_tip.tenure_start_block_id { + warn!("Unexpected tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); + return Err(NetError::InvalidMessage); + } + + if cnt.saturating_add(1) != tenure_blocks.len() { + warn!("Invalid tenure stream -- got tenure-start before end of tenure"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "cnt" => cnt, + "len" => tenure_blocks.len(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + finished_download = true; + last_block_index = Some(cnt); + break; + } + + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + if expected_block_id == highest_processed_block_id { + // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_height) = + self.highest_processed_block_height.as_ref() + { + if &block.header.chain_length <= highest_processed_block_height { + // no need to continue this download + debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); + } + + if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.unconfirmed_tenure_blocks = Some(tenure_blocks); + } + + if finished_download { + // we have all of the unconfirmed tenure blocks that were requested. + // only return those newer than the highest block. + self.state = NakamotoUnconfirmedDownloadState::Done; + let highest_processed_block_height = + *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + debug!("Finished receiving unconfirmed tenure"); + return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { + blocks + .into_iter() + .filter(|block| block.header.chain_length > highest_processed_block_height) + .rev() + .collect() + })); + } + + let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + // still have more to get + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + let next_block_id = earliest_block.header.parent_block_id.clone(); + + debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); + Ok(None) + } + + /// Once this machine runs to completion, examine its state to see if we still need to fetch + /// the highest complete tenure. We may not need to, especially if we're just polling for new + /// unconfirmed blocks. + /// + /// Return Ok(true) if we need it still + /// Return Ok(false) if we already have it + /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. + pub fn need_highest_complete_tenure( + &self, + chainstate: &StacksChainState, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() + else { + return Err(NetError::InvalidState); + }; + + // if we've processed the unconfirmed tenure-start block already, then we've necessarily + // downloaded and processed the highest-complete tenure already. + Ok(!NakamotoChainState::has_block_header( + chainstate.db(), + &unconfirmed_tenure_start_block.header.block_id(), + false, + )?) + } + + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the + /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get + /// its tenure-start block. + /// + /// Returns Ok(downloader) on success + /// Returns Err(..) if we call this function out of sequence. + pub fn make_highest_complete_tenure_downloader( + &self, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + Ok(ntd) + } + + /// Produce the next HTTP request that, when successfully executed, will advance this state + /// machine. + /// + /// Returns Some(request) if a request must be sent. + /// Returns None if we're done + pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // need to get the tenure tip + return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_block( + peerhost, + block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_tenure( + peerhost, + tip_block_id.clone(), + self.highest_processed_block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::Done => { + // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed + // tenure downloader using the earliest unconfirmed tenure block. + return None; + } + } + } + + /// Begin the next download request for this state machine. + /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. It's + /// up to the caller to determine when it's appropriate to convert this state machine into a + /// `NakamotoTenureDownloader`. + /// Returns Err(..) if the neighbor is dead or broken. + pub fn send_next_download_request( + &self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result<(), NetError> { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(()); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let Some(request) = self.make_next_download_request(peerhost) else { + // treat this downloader as still in-flight since the overall state machine will need + // to keep it around long enough to convert it into a tenure downloader for the highest + // complete tenure. + return Ok(()); + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + Ok(()) + } + + /// Handle a received StacksHttpResponse and advance this machine's state + /// If we get the full tenure, return it. + /// + /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure + /// Returns Ok(None) if we're still working, in which case the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on unrecoverable failure to advance state + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + current_reward_sets: &BTreeMap, + ) -> Result>, NetError> { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + debug!("Got tenure-info response"); + let remote_tenure_info = response.decode_nakamoto_tenure_info()?; + debug!("Got tenure-info response: {:?}", &remote_tenure_info); + self.try_accept_tenure_info( + sortdb, + local_sort_tip, + chainstate, + remote_tenure_info, + current_reward_sets, + )?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { + debug!("Got tenure start-block response"); + let block = response.decode_nakamoto_block()?; + self.try_accept_unconfirmed_tenure_start_block(block)?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + debug!("Got unconfirmed tenure blocks response"); + let blocks = response.decode_nakamoto_tenure()?; + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) + } + NakamotoUnconfirmedDownloadState::Done => { + return Err(NetError::InvalidState); + } + } + } + + /// Is this machine finished? + pub fn is_done(&self) -> bool { + self.state == NakamotoUnconfirmedDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs new file mode 100644 index 0000000000..c96f718d2b --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs @@ -0,0 +1,867 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, + WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the +/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but +/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoUnconfirmedDownloadState { + /// Getting the tenure tip information + GetTenureInfo, + /// Get the tenure start block for the ongoing tenure. + /// The inner value is tenure-start block ID of the ongoing tenure. + GetTenureStartBlock(StacksBlockId), + /// Receiving unconfirmed tenure blocks. + /// The inner value is the block ID of the next block to fetch. + GetUnconfirmedTenureBlocks(StacksBlockId), + /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block + /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). + Done, +} + +impl fmt::Display for NakamotoUnconfirmedDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for the unconfirmed tenures. It operates in the following steps: +/// +/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip +/// 2. Get the tenure-start block for the unconfirmed chain tip +/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the +/// immediate child of the one obtained in (2) +/// +/// Once this state-machine finishes execution, the tenure-start block is used to construct a +/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. +/// +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoUnconfirmedTenureDownloader { + /// state of this machine + pub state: NakamotoUnconfirmedDownloadState, + /// Address of who we're asking + pub naddr: NeighborAddress, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, + /// Block ID of this node's highest-processed block. + /// We will not download any blocks lower than this, if it's set. + pub highest_processed_block_id: Option, + /// Highest processed block height (which may not need to be loaded) + pub highest_processed_block_height: Option, + + /// Tenure tip info we obtained for this peer + pub tenure_tip: Option, + /// Tenure start block for the ongoing tip. + /// This is also the tenure-end block for the highest-complete tip. + pub unconfirmed_tenure_start_block: Option, + /// Unconfirmed tenure blocks obtained + pub unconfirmed_tenure_blocks: Option>, +} + +impl NakamotoUnconfirmedTenureDownloader { + /// Make a new downloader which will download blocks from the tip back down to the optional + /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). + pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { + Self { + state: NakamotoUnconfirmedDownloadState::GetTenureInfo, + naddr, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, + highest_processed_block_id, + highest_processed_block_height: None, + tenure_tip: None, + unconfirmed_tenure_start_block: None, + unconfirmed_tenure_blocks: None, + } + } + + /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is + /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote + /// node). + pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { + self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) + } + + /// Set the highest-processed block. + /// This can be performed by the downloader itself in order to inform ongoing requests for + /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node + /// has already handled. + pub fn set_highest_processed_block( + &mut self, + highest_processed_block_id: StacksBlockId, + highest_processed_block_height: u64, + ) { + self.highest_processed_block_id = Some(highest_processed_block_id); + self.highest_processed_block_height = Some(highest_processed_block_height); + } + + /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. + /// + /// * tenure_tip.consensus_hash + /// This is the consensus hash of the remote node's ongoing tenure. It may not be the + /// sortition tip, e.g. if the tenure spans multiple sortitions. + /// * tenure_tip.tenure_start_block_id + /// This is the first block ID of the ongoing unconfirmed tenure. + /// * tenure_tip.parent_consensus_hash + /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest + /// complete tenure, for which we know the start and end block IDs. + /// * tenure_tip.parent_tenure_start_block_id + /// This is the tenure start block for the highest complete tenure. It should be equal to + /// the winning Stacks block hash of the snapshot for the ongoing tenure. + /// + /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go + /// fetch it again; just get the new unconfirmed blocks. + pub fn try_accept_tenure_info( + &mut self, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + remote_tenure_tip: RPCGetTenureInfo, + current_reward_sets: &BTreeMap, + ) -> Result<(), NetError> { + if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { + return Err(NetError::InvalidState); + } + if self.tenure_tip.is_some() { + return Err(NetError::InvalidState); + } + + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + + // authenticate consensus hashes against canonical chain history + let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.parent_consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + let ih = sortdb.index_handle(&local_sort_tip.sortition_id); + let ancestor_local_tenure_sn = ih + .get_block_snapshot_by_height(local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { + // .consensus_hash is not on the canonical fork + warn!("Unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash); + return Err(DBError::NotFoundError.into()); + } + let ancestor_parent_local_tenure_sn = ih + .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; + + if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { + // .parent_consensus_hash is not on the canonical fork + warn!("Parent unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(DBError::NotFoundError.into()); + } + + // parent tenure sortition must precede the ongoing tenure sortition + if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { + warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash, + "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(NetError::InvalidMessage); + } + + // parent tenure start block ID must be the winning block hash for the ongoing tenure's + // snapshot + if local_tenure_sn.winning_stacks_block_hash.0 + != remote_tenure_tip.parent_tenure_start_block_id.0 + { + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, + "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); + return Err(NetError::StaleView); + } + + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + // we've synchronized this tenure before, so don't get anymore blocks before it. + let highest_processed_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(highest_processed_block_id)? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? + .0; + + let highest_processed_block_height = highest_processed_block.header.chain_length; + self.highest_processed_block_height = Some(highest_processed_block_height); + + if &remote_tenure_tip.tip_block_id == highest_processed_block_id + || highest_processed_block_height > remote_tenure_tip.tip_height + { + // nothing to do -- we're at or ahead of the remote peer, so finish up. + // If we don't have the tenure-start block for the confirmed tenure that the remote + // peer claims to have, then the remote peer has sent us invalid data and we should + // treat it as such. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or(NetError::InvalidMessage)? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::Done; + } + } + + if self.state == NakamotoUnconfirmedDownloadState::Done { + // only need to remember the tenure tip + self.tenure_tip = Some(remote_tenure_tip); + return Ok(()); + } + + // we're not finished + let tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for confirmed tenure {} (rc {})", + &parent_local_tenure_sn.consensus_hash, parent_tenure_rc + ); + return Err(NetError::InvalidState); + }; + + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for unconfirmed tenure {} (rc {})", + &local_tenure_sn.consensus_hash, tenure_rc + ); + return Err(NetError::InvalidState); + }; + + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? + { + // proceed to get unconfirmed blocks. We already have the tenure-start block. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + remote_tenure_tip.tip_block_id.clone(), + ); + } else { + // get the tenure-start block first + self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( + remote_tenure_tip.tenure_start_block_id.clone(), + ); + } + + debug!( + "Will validate unconfirmed blocks with reward sets in ({},{})", + parent_tenure_rc, tenure_rc + ); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); + self.tenure_tip = Some(remote_tenure_tip); + + Ok(()) + } + + /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the unconfirmed tenure start block was valid + /// Returns Err(..) if it was not valid, or if this function was called out of sequence. + pub fn try_accept_unconfirmed_tenure_start_block( + &mut self, + unconfirmed_tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = + &self.state + else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // block has to match the expected hash + if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { + warn!("Invalid tenure-start block"; + "tenure_id_start_block" => %tenure_start_block_id, + "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + // furthermore, the block has to match the expected tenure ID + if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { + warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); + return Err(NetError::InvalidMessage); + } + + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + tenure_tip.tip_block_id.clone(), + ); + Ok(()) + } + + /// Add downloaded unconfirmed tenure blocks. + /// If we have collected all tenure blocks, then return them. + /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the + /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come + /// after the highest-processed block (if set). + /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on invalid state or invalid block. + pub fn try_accept_unconfirmed_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = + &self.state + else { + return Err(NetError::InvalidState); + }; + + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + debug!("No tenure blocks obtained"); + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest. + // If there's a tenure-start block, it must be last. + let mut expected_block_id = last_block_id; + let mut finished_download = false; + let mut last_block_index = None; + for (cnt, block) in tenure_blocks.iter().enumerate() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id()); + return Err(NetError::InvalidMessage); + } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // we may or may not need the tenure-start block for the unconfirmed tenure. But if we + // do, make sure it's valid, and it's the last block we receive. + let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + }; + if is_tenure_start { + // this is the tenure-start block, so make sure it matches our /v3/tenure/info + if block.header.block_id() != tenure_tip.tenure_start_block_id { + warn!("Unexpected tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); + return Err(NetError::InvalidMessage); + } + + if cnt.saturating_add(1) != tenure_blocks.len() { + warn!("Invalid tenure stream -- got tenure-start before end of tenure"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "cnt" => cnt, + "len" => tenure_blocks.len(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + finished_download = true; + last_block_index = Some(cnt); + break; + } + + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + if expected_block_id == highest_processed_block_id { + // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_height) = + self.highest_processed_block_height.as_ref() + { + if &block.header.chain_length <= highest_processed_block_height { + // no need to continue this download + debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); + } + + if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.unconfirmed_tenure_blocks = Some(tenure_blocks); + } + + if finished_download { + // we have all of the unconfirmed tenure blocks that were requested. + // only return those newer than the highest block. + self.state = NakamotoUnconfirmedDownloadState::Done; + let highest_processed_block_height = + *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + debug!("Finished receiving unconfirmed tenure"); + return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { + blocks + .into_iter() + .filter(|block| block.header.chain_length > highest_processed_block_height) + .rev() + .collect() + })); + } + + let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + // still have more to get + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + let next_block_id = earliest_block.header.parent_block_id.clone(); + + debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); + Ok(None) + } + + /// Once this machine runs to completion, examine its state to see if we still need to fetch + /// the highest complete tenure. We may not need to, especially if we're just polling for new + /// unconfirmed blocks. + /// + /// Return Ok(true) if we need it still + /// Return Ok(false) if we already have it + /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. + pub fn need_highest_complete_tenure( + &self, + chainstate: &StacksChainState, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() + else { + return Err(NetError::InvalidState); + }; + + // if we've processed the unconfirmed tenure-start block already, then we've necessarily + // downloaded and processed the highest-complete tenure already. + Ok(!NakamotoChainState::has_block_header( + chainstate.db(), + &unconfirmed_tenure_start_block.header.block_id(), + false, + )?) + } + + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the + /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get + /// its tenure-start block. + /// + /// Returns Ok(downloader) on success + /// Returns Err(..) if we call this function out of sequence. + pub fn make_highest_complete_tenure_downloader( + &self, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + Ok(ntd) + } + + /// Produce the next HTTP request that, when successfully executed, will advance this state + /// machine. + /// + /// Returns Some(request) if a request must be sent. + /// Returns None if we're done + pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // need to get the tenure tip + return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_block( + peerhost, + block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_tenure( + peerhost, + tip_block_id.clone(), + self.highest_processed_block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::Done => { + // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed + // tenure downloader using the earliest unconfirmed tenure block. + return None; + } + } + } + + /// Begin the next download request for this state machine. + /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. It's + /// up to the caller to determine when it's appropriate to convert this state machine into a + /// `NakamotoTenureDownloader`. + /// Returns Err(..) if the neighbor is dead or broken. + pub fn send_next_download_request( + &self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result<(), NetError> { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(()); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let Some(request) = self.make_next_download_request(peerhost) else { + // treat this downloader as still in-flight since the overall state machine will need + // to keep it around long enough to convert it into a tenure downloader for the highest + // complete tenure. + return Ok(()); + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + Ok(()) + } + + /// Handle a received StacksHttpResponse and advance this machine's state + /// If we get the full tenure, return it. + /// + /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure + /// Returns Ok(None) if we're still working, in which case the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on unrecoverable failure to advance state + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + current_reward_sets: &BTreeMap, + ) -> Result>, NetError> { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + debug!("Got tenure-info response"); + let remote_tenure_info = response.decode_nakamoto_tenure_info()?; + debug!("Got tenure-info response: {:?}", &remote_tenure_info); + self.try_accept_tenure_info( + sortdb, + local_sort_tip, + chainstate, + remote_tenure_info, + current_reward_sets, + )?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { + debug!("Got tenure start-block response"); + let block = response.decode_nakamoto_block()?; + self.try_accept_unconfirmed_tenure_start_block(block)?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + debug!("Got unconfirmed tenure blocks response"); + let blocks = response.decode_nakamoto_tenure()?; + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) + } + NakamotoUnconfirmedDownloadState::Done => { + return Err(NetError::InvalidState); + } + } + } + + /// Is this machine finished? + pub fn is_done(&self) -> bool { + self.state == NakamotoUnconfirmedDownloadState::Done + } +} From 8107bf97dbdc26fb822ba57175ac373df600dd64 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 19:50:52 +0300 Subject: [PATCH 0802/1400] update team --- .github/workflows/pr-differences-mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 9c5cc34c7b..51456c9172 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -36,7 +36,7 @@ jobs: uses: stacks-network/actions/team-membership@feat/mutation-testing with: username: ${{ github.actor }} - team: 'Blockchain Team' + team: 'blockchain-team' GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - name: Fail if the user does not have the right permissions From febf3cef5dd8acf95b3f692c1d62ec36bad2a75c Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:47:43 +0300 Subject: [PATCH 0803/1400] update to actions' repo main branch --- .github/workflows/pr-differences-mutants.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 51456c9172..12d891297b 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Check Right Permissions To Trigger This id: check_right_permissions - uses: stacks-network/actions/team-membership@feat/mutation-testing + uses: stacks-network/actions/team-membership@main with: username: ${{ github.actor }} team: 'blockchain-team' @@ -62,7 +62,7 @@ jobs: steps: - id: check_packages_and_shards - uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main with: ignore_timeout: ${{ inputs.ignore_timeout }} @@ -80,7 +80,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: package: 'small' @@ -103,7 +103,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: shard: ${{ matrix.shard }} package: 'small' @@ -125,7 +125,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: package: 'stackslib' @@ -151,7 +151,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: shard: ${{ matrix.shard }} package: 'stackslib' @@ -173,7 +173,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: package: 'stacks-node' @@ -199,7 +199,7 @@ jobs: env: BITCOIND_TEST: 1 RUST_BACKTRACE: full - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: shard: ${{ matrix.shard }} package: 'stacks-node' @@ -217,7 +217,7 @@ jobs: steps: - name: Run mutants on diffs - uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/pr-differences@main with: package: 'stacks-signer' @@ -242,7 +242,7 @@ jobs: steps: - name: Output Mutants - uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@feat/mutation-testing + uses: stacks-network/actions/stacks-core/mutation-testing/output-pr-mutants@main with: stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.run_stackslib }} shards_for_stackslib_package: ${{ needs.check-big-packages-and-shards.outputs.stackslib_with_shards }} From c3183204830c061da1681e48a287ad3f391cb644 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:49:16 +0300 Subject: [PATCH 0804/1400] test with team i am not part of --- .github/workflows/pr-differences-mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 12d891297b..920f0f0701 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -36,7 +36,7 @@ jobs: uses: stacks-network/actions/team-membership@main with: username: ${{ github.actor }} - team: 'blockchain-team' + team: 'devops' GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - name: Fail if the user does not have the right permissions From abaf606585a65a8fb5ee91418254a30fddbc8168 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:51:27 +0300 Subject: [PATCH 0805/1400] fix true type on equal --- .github/workflows/pr-differences-mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 920f0f0701..3c560cdbbc 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -40,7 +40,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - name: Fail if the user does not have the right permissions - if: ${{ inputs.ignore_timeout == true && steps.check_right_permissions.outputs.is_team_member != 'true' }} + if: ${{ inputs.ignore_timeout == 'true' && steps.check_right_permissions.outputs.is_team_member != 'true' }} run: exit 1 # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards From bca227e7c0ef2b88ffbee124de0932f51ebe313c Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:53:57 +0300 Subject: [PATCH 0806/1400] update the mutants team to blockchain-team --- .github/workflows/pr-differences-mutants.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index 3c560cdbbc..edafb42bf1 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -36,7 +36,7 @@ jobs: uses: stacks-network/actions/team-membership@main with: username: ${{ github.actor }} - team: 'devops' + team: 'blockchain-team' GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - name: Fail if the user does not have the right permissions From a52464e88c19f89bc2f79301aa7261e2a04d11e4 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Fri, 16 Aug 2024 20:56:23 +0300 Subject: [PATCH 0807/1400] removed extra files for testing the mutants membership dispatch --- stackslib/src/net/download/nakamoto/mod.rs | 6 - .../nakamoto/tenure_downloader_copy.rs | 693 -------------- .../nakamoto/tenure_downloader_opy.rs | 693 -------------- .../nakamoto/tenure_downloader_set_copy.rs | 660 ------------- .../nakamoto/tenure_downloader_set_opy.rs | 660 ------------- .../tenure_downloader_unconfirmed_copy.rs | 867 ------------------ .../tenure_downloader_unconfirmed_opy.rs | 867 ------------------ 7 files changed, 4446 deletions(-) delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index 7643c54ff7..dd440ac110 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,14 +161,8 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; -mod tenure_downloader_copy; -mod tenure_downloader_opy; mod tenure_downloader_set; -mod tenure_downloader_set_copy; -mod tenure_downloader_set_opy; mod tenure_downloader_unconfirmed; -mod tenure_downloader_unconfirmed_copy; -mod tenure_downloader_unconfirmed_opy; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs deleted file mode 100644 index f7fb970bb6..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the signer -/// public keys for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Signer public keys that signed the start-block of this tenure, in reward cycle order - pub start_signer_keys: RewardSet, - /// Signer public keys that signed the end-block of this tenure - pub end_signer_keys: RewardSet, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_signer_keys: RewardSet, - end_signer_keys: RewardSet, - ) -> Self { - debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_signer_keys, - end_signer_keys, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_start_block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_end_block - .header - .verify_signer_signatures(&self.end_signer_keys) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, &block_cursor, count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e - })?; - let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - Ok(blocks_opt) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - }; - self.idle = true; - handle_result - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs deleted file mode 100644 index f7fb970bb6..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the signer -/// public keys for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Signer public keys that signed the start-block of this tenure, in reward cycle order - pub start_signer_keys: RewardSet, - /// Signer public keys that signed the end-block of this tenure - pub end_signer_keys: RewardSet, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_signer_keys: RewardSet, - end_signer_keys: RewardSet, - ) -> Self { - debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_signer_keys, - end_signer_keys, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_start_block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_end_block - .header - .verify_signer_signatures(&self.end_signer_keys) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, &block_cursor, count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e - })?; - let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - Ok(blocks_opt) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - }; - self.idle = true; - handle_result - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs deleted file mode 100644 index 28a40e7eb5..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. - pub fn is_empty(&self) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - continue; - } - debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return false; - } - true - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - debug!("Try resume {}", &naddr); - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - current_reward_cycles: &BTreeMap, - ) { - debug!("make_tenure_downloaders"; - "schedule" => ?schedule, - "available" => ?available, - "tenure_block_ids" => ?tenure_block_ids, - "inflight" => %self.inflight(), - "count" => count, - "running" => self.num_downloaders(), - "scheduled" => self.num_scheduled_downloaders()); - - self.clear_finished_downloaders(); - self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_reward_set)) = current_reward_cycles - .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", - tenure_info.start_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_reward_set)) = current_reward_cycles - .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", - tenure_info.end_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_reward_set.clone(), - end_reward_set.clone(), - ); - - debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader - .handle_next_download_response(response) - .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); - e - }) - else { - debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs deleted file mode 100644 index 28a40e7eb5..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. - pub fn is_empty(&self) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - continue; - } - debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return false; - } - true - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - debug!("Try resume {}", &naddr); - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - current_reward_cycles: &BTreeMap, - ) { - debug!("make_tenure_downloaders"; - "schedule" => ?schedule, - "available" => ?available, - "tenure_block_ids" => ?tenure_block_ids, - "inflight" => %self.inflight(), - "count" => count, - "running" => self.num_downloaders(), - "scheduled" => self.num_scheduled_downloaders()); - - self.clear_finished_downloaders(); - self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_reward_set)) = current_reward_cycles - .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", - tenure_info.start_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_reward_set)) = current_reward_cycles - .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", - tenure_info.end_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_reward_set.clone(), - end_reward_set.clone(), - ); - - debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader - .handle_next_download_response(response) - .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); - e - }) - else { - debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs deleted file mode 100644 index c96f718d2b..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::RewardCycleInfo; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the block ID of the next block to fetch. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// reward set of the highest confirmed tenure - pub confirmed_signer_keys: Option, - /// reward set of the unconfirmed (ongoing) tenure - pub unconfirmed_signer_keys: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_signer_keys: None, - unconfirmed_signer_keys: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - debug!("Got tenure info {:?}", remote_tenure_tip); - debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for tenure {}", - &remote_tenure_tip.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for parent tenure {}", - &remote_tenure_tip.parent_consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No parent tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError.into()) - })?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::StaleView); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronized this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or_else(|| { - debug!("No such Nakamoto block {}", &highest_processed_block_id); - NetError::DBError(DBError::NotFoundError) - })? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_reward_set)) = current_reward_sets - .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_reward_set)) = current_reward_sets - .get(&tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for unconfirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or_else(|| { - debug!( - "No such tenure-start Nakamoto block {}", - &remote_tenure_tip.tenure_start_block_id - ); - NetError::DBError(DBError::NotFoundError) - })? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - debug!( - "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, tenure_rc - ); - self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); - self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current reward set - if let Err(e) = unconfirmed_tenure_start_block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - debug!("No tenure blocks obtained"); - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - let mut last_block_index = None; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if let Err(e) = block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - last_block_index = Some(cnt); - break; - } - - debug!("Got unconfirmed tenure block {}", &block.header.block_id()); - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length <= highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - expected_block_id = &block.header.parent_block_id; - last_block_index = Some(cnt); - } - - // blocks after the last_block_index were not processed, so should be dropped - if let Some(last_block_index) = last_block_index { - tenure_blocks.truncate(last_block_index + 1); - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - - debug!("Finished receiving unconfirmed tenure"); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - debug!( - "Will resume fetching unconfirmed tenure blocks starting at {}", - &next_block_id - ); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Determine if we can produce a highest-complete tenure request. - /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure - pub fn can_make_highest_complete_tenure_downloader( - &self, - sortdb: &SortitionDB, - ) -> Result { - let Some(tenure_tip) = &self.tenure_tip else { - return Ok(false); - }; - - let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_tip.parent_consensus_hash, - )? - else { - return Ok(false); - }; - - let Some(tip_sn) = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? - else { - return Ok(false); - }; - - let Some(parent_tenure) = - SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? - else { - return Ok(false); - }; - - let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? - else { - return Ok(false); - }; - - if parent_tenure.epoch_id < StacksEpochId::Epoch30 - || tip_tenure.epoch_id < StacksEpochId::Epoch30 - { - debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; - "start_tenure" => %tenure_tip.parent_consensus_hash, - "end_tenure" => %tenure_tip.consensus_hash, - "start_tenure_epoch" => %parent_tenure.epoch_id, - "end_tenure_epoch" => %tip_tenure.epoch_id - ); - return Ok(false); - } - - Ok(true) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(tenure_tip) = &self.tenure_tip else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - debug!( - "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, &self.naddr, - ); - let ntd = NakamotoTenureDownloader::new( - tenure_tip.parent_consensus_hash.clone(), - tenure_tip.parent_tenure_start_block_id.clone(), - tenure_tip.tenure_start_block_id.clone(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - current_reward_sets, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); - Ok(accepted_opt) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs deleted file mode 100644 index c96f718d2b..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::RewardCycleInfo; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the block ID of the next block to fetch. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// reward set of the highest confirmed tenure - pub confirmed_signer_keys: Option, - /// reward set of the unconfirmed (ongoing) tenure - pub unconfirmed_signer_keys: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_signer_keys: None, - unconfirmed_signer_keys: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - debug!("Got tenure info {:?}", remote_tenure_tip); - debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for tenure {}", - &remote_tenure_tip.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for parent tenure {}", - &remote_tenure_tip.parent_consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No parent tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError.into()) - })?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::StaleView); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronized this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or_else(|| { - debug!("No such Nakamoto block {}", &highest_processed_block_id); - NetError::DBError(DBError::NotFoundError) - })? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_reward_set)) = current_reward_sets - .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_reward_set)) = current_reward_sets - .get(&tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for unconfirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or_else(|| { - debug!( - "No such tenure-start Nakamoto block {}", - &remote_tenure_tip.tenure_start_block_id - ); - NetError::DBError(DBError::NotFoundError) - })? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - debug!( - "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, tenure_rc - ); - self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); - self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current reward set - if let Err(e) = unconfirmed_tenure_start_block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - debug!("No tenure blocks obtained"); - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - let mut last_block_index = None; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if let Err(e) = block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - last_block_index = Some(cnt); - break; - } - - debug!("Got unconfirmed tenure block {}", &block.header.block_id()); - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length <= highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - expected_block_id = &block.header.parent_block_id; - last_block_index = Some(cnt); - } - - // blocks after the last_block_index were not processed, so should be dropped - if let Some(last_block_index) = last_block_index { - tenure_blocks.truncate(last_block_index + 1); - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - - debug!("Finished receiving unconfirmed tenure"); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - debug!( - "Will resume fetching unconfirmed tenure blocks starting at {}", - &next_block_id - ); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Determine if we can produce a highest-complete tenure request. - /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure - pub fn can_make_highest_complete_tenure_downloader( - &self, - sortdb: &SortitionDB, - ) -> Result { - let Some(tenure_tip) = &self.tenure_tip else { - return Ok(false); - }; - - let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_tip.parent_consensus_hash, - )? - else { - return Ok(false); - }; - - let Some(tip_sn) = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? - else { - return Ok(false); - }; - - let Some(parent_tenure) = - SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? - else { - return Ok(false); - }; - - let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? - else { - return Ok(false); - }; - - if parent_tenure.epoch_id < StacksEpochId::Epoch30 - || tip_tenure.epoch_id < StacksEpochId::Epoch30 - { - debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; - "start_tenure" => %tenure_tip.parent_consensus_hash, - "end_tenure" => %tenure_tip.consensus_hash, - "start_tenure_epoch" => %parent_tenure.epoch_id, - "end_tenure_epoch" => %tip_tenure.epoch_id - ); - return Ok(false); - } - - Ok(true) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(tenure_tip) = &self.tenure_tip else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - debug!( - "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, &self.naddr, - ); - let ntd = NakamotoTenureDownloader::new( - tenure_tip.parent_consensus_hash.clone(), - tenure_tip.parent_tenure_start_block_id.clone(), - tenure_tip.tenure_start_block_id.clone(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - current_reward_sets, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); - Ok(accepted_opt) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} From 62f5a6a7813ab94999063ba4aecdcc9612fe5cea Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 16 Aug 2024 16:42:36 -0400 Subject: [PATCH 0808/1400] Do not enable pre nakamoto mock signing unless the miner key is set Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 20b5d07355..30a5990319 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1168,6 +1168,10 @@ impl Config { .validate() .map_err(|e| format!("Atlas config error: {e}"))?; + if miner.mining_key.is_none() && miner.pre_nakamoto_mock_signing { + return Err("Cannot use pre_nakamoto_mock_signing without a mining_key".to_string()); + } + Ok(Config { config_path: config_file.__path, node, @@ -2384,7 +2388,7 @@ impl Default for MinerConfig { max_reorg_depth: 3, // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), - pre_nakamoto_mock_signing: true, + pre_nakamoto_mock_signing: false, // Should only default true if mining key is set } } } @@ -2739,6 +2743,12 @@ pub struct MinerConfigFile { impl MinerConfigFile { fn into_config_default(self, miner_default_config: MinerConfig) -> Result { + let mining_key = self + .mining_key + .as_ref() + .map(|x| Secp256k1PrivateKey::from_hex(x)) + .transpose()?; + let pre_nakamoto_mock_signing = mining_key.is_some(); Ok(MinerConfig { first_attempt_time_ms: self .first_attempt_time_ms @@ -2837,7 +2847,9 @@ impl MinerConfigFile { .wait_on_signers_ms .map(Duration::from_millis) .unwrap_or(miner_default_config.wait_on_signers), - pre_nakamoto_mock_signing: self.pre_nakamoto_mock_signing.unwrap_or(true), + pre_nakamoto_mock_signing: self + .pre_nakamoto_mock_signing + .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set }) } } From c8d8743cd01d6cbfed1e1e9444ea2adf6621344e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 16 Aug 2024 16:43:02 -0400 Subject: [PATCH 0809/1400] Remove panic in tests when deserializing the block proposal slot due to mock signing using it for mock proposals Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c370ca53f6..24b7745419 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -391,7 +391,8 @@ pub fn get_latest_block_proposal( let message: SignerMessageV0 = miners_stackerdb.get_latest(miner_slot_id.start).ok()??; let SignerMessageV0::BlockProposal(block_proposal) = message else { - panic!("Expected a signer message block proposal. Got {message:?}"); + warn!("Expected a block proposal. Got {message:?}"); + return None; }; block_proposal.block }; From 8a160dc5d5cfc0ead5da5a9f3c50e97562f7f0ac Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 16 Aug 2024 16:47:11 -0400 Subject: [PATCH 0810/1400] test: add assertion about node 2's chainstate --- testnet/stacks-node/src/tests/signer/v0.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ce95049744..e03b0d392b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3139,5 +3139,24 @@ fn partial_tenure_fork() { u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); + let sortdb = SortitionDB::open( + &conf_node_2.get_burn_db_file_path(), + false, + conf_node_2.get_burnchain().pox_constants, + ) + .unwrap(); + + let (chainstate, _) = StacksChainState::open( + false, + conf_node_2.burnchain.chain_id, + &conf_node_2.get_chainstate_path_str(), + None, + ) + .unwrap(); + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + assert_eq!(tip.stacks_block_height, ignore_block - 1); + signer_test.shutdown(); } From 1610ce3698c0ab4a5fe12c75a3b72ec05674c8f9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 16 Aug 2024 17:24:44 -0400 Subject: [PATCH 0811/1400] Set pre nakamoto mock signing to true for mock sign test Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 19 +++++++++---------- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index b767431c60..5f7b82a937 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -964,22 +964,21 @@ mod test { let bad_private_key = StacksPrivateKey::new(); let bad_public_key = StacksPublicKey::from_private(&bad_private_key); - let mut mock_signature = random_mock_proposal(); - mock_signature.sign(&private_key).unwrap(); - assert!(!mock_signature + let mut mock_proposal = random_mock_proposal(); + assert!(!mock_proposal .verify(&public_key) - .expect("Failed to verify MockSignature")); + .expect("Failed to verify MockProposal")); - mock_signature + mock_proposal .sign(&private_key) - .expect("Failed to sign MockSignature"); + .expect("Failed to sign MockProposal"); - assert!(mock_signature + assert!(mock_proposal .verify(&public_key) - .expect("Failed to verify MockSignature")); - assert!(!mock_signature + .expect("Failed to verify MockProposal")); + assert!(!mock_proposal .verify(&bad_public_key) - .expect("Failed to verify MockSignature")); + .expect("Failed to verify MockProposal")); } #[test] diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1d06c429e9..115fce4c83 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2440,6 +2440,7 @@ fn mock_sign_epoch_25() { Some(Duration::from_secs(5)), |_| {}, |node_config| { + node_config.miner.pre_nakamoto_mock_signing = true; let epochs = node_config.burnchain.epochs.as_mut().unwrap(); for epoch in epochs.iter_mut() { if epoch.epoch_id == StacksEpochId::Epoch25 { From 32e2875558ba6f90d5cf43e719f5a0b3b36aa52c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 17 Aug 2024 10:59:30 -0400 Subject: [PATCH 0812/1400] Fix block proposal rejection by enforcing fetch view to be up to date Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 55 +++++++---- testnet/stacks-node/src/tests/signer/v0.rs | 101 ++++++++++---------- testnet/stacks-node/src/tests/signer/v1.rs | 12 ++- 3 files changed, 96 insertions(+), 72 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 91371578cb..5761372803 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -43,7 +43,9 @@ use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; use stacks::core::StacksEpoch; -use stacks::net::api::postblock_proposal::BlockValidateResponse; +use stacks::net::api::postblock_proposal::{ + BlockValidateOk, BlockValidateReject, BlockValidateResponse, +}; use stacks::types::chainstate::StacksAddress; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; @@ -99,6 +101,7 @@ pub struct SignerTest { // The spawned signers and their threads pub spawned_signers: Vec, // The spawned signers and their threads + #[allow(dead_code)] pub signer_configs: Vec, // the private keys of the signers pub signer_stacks_private_keys: Vec, @@ -481,35 +484,47 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockValidateResponse { + fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> BlockValidateOk { // Wait for the block to show up in the test observer let t_start = Instant::now(); - while test_observer::get_proposal_responses().is_empty() { + loop { + let responses = test_observer::get_proposal_responses(); + for response in responses { + let BlockValidateResponse::Ok(validation) = response else { + continue; + }; + return validation; + } assert!( t_start.elapsed() < timeout, - "Timed out while waiting for block proposal response event" + "Timed out while waiting for block proposal ok event" ); thread::sleep(Duration::from_secs(1)); } - test_observer::get_proposal_responses() - .pop() - .expect("No block proposal") - } - - fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { - let validate_response = self.wait_for_block_validate_response(timeout); - match validate_response { - BlockValidateResponse::Ok(block_validated) => block_validated.signer_signature_hash, - _ => panic!("Unexpected response"), - } } - fn wait_for_validate_reject_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { + fn wait_for_validate_reject_response( + &mut self, + timeout: Duration, + signer_signature_hash: Sha512Trunc256Sum, + ) -> BlockValidateReject { // Wait for the block to show up in the test observer - let validate_response = self.wait_for_block_validate_response(timeout); - match validate_response { - BlockValidateResponse::Reject(block_rejection) => block_rejection.signer_signature_hash, - _ => panic!("Unexpected response"), + let t_start = Instant::now(); + loop { + let responses = test_observer::get_proposal_responses(); + for response in responses { + let BlockValidateResponse::Reject(rejection) = response else { + continue; + }; + if rejection.signer_signature_hash == signer_signature_hash { + return rejection; + } + } + assert!( + t_start.elapsed() < timeout, + "Timed out while waiting for block proposal reject event" + ); + thread::sleep(Duration::from_secs(1)); } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c2ce878e28..a4c4b935ac 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -23,7 +23,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, + BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use rand::RngCore; @@ -36,7 +36,7 @@ use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, S use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; -use stacks::net::api::postblock_proposal::TEST_VALIDATE_STALL; +use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; @@ -51,7 +51,6 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::runloop::State; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -64,9 +63,8 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, - next_block_and, setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -298,7 +296,9 @@ impl SignerTest { self.mine_nakamoto_block(timeout); // Verify that the signers accepted the proposed block, sending back a validate ok response - let proposed_signer_signature_hash = self.wait_for_validate_ok_response(timeout); + let proposed_signer_signature_hash = self + .wait_for_validate_ok_response(timeout) + .signer_signature_hash; let message = proposed_signer_signature_hash.0; info!("------------------------- Test Block Signed -------------------------"); @@ -368,7 +368,7 @@ impl SignerTest { } /// Propose an invalid block to the signers - fn propose_block(&mut self, slot_id: u32, version: u32, block: NakamotoBlock) { + fn propose_block(&mut self, block: NakamotoBlock, timeout: Duration) { let miners_contract_id = boot_code_id(MINERS_NAME, false); let mut session = StackerDBSession::new(&self.running_nodes.conf.node.rpc_bind, miners_contract_id); @@ -388,17 +388,26 @@ impl SignerTest { .miner .mining_key .expect("No mining key"); - // Submit the block proposal to the miner's slot - let mut chunk = StackerDBChunkData::new(slot_id, version, message.serialize_to_vec()); - chunk.sign(&miner_sk).expect("Failed to sign message chunk"); - debug!("Produced a signature: {:?}", chunk.sig); - let result = session.put_chunk(&chunk).expect("Failed to put chunk"); - debug!("Test Put Chunk ACK: {result:?}"); - assert!( - result.accepted, - "Failed to submit block proposal to signers" - ); + let mut accepted = false; + let mut version = 0; + let slot_id = MinerSlotID::BlockProposal.to_u8() as u32; + let start = Instant::now(); + debug!("Proposing invalid block to signers"); + while !accepted { + let mut chunk = + StackerDBChunkData::new(slot_id * 2, version, message.serialize_to_vec()); + chunk.sign(&miner_sk).expect("Failed to sign message chunk"); + debug!("Produced a signature: {:?}", chunk.sig); + let result = session.put_chunk(&chunk).expect("Failed to put chunk"); + accepted = result.accepted; + version += 1; + debug!("Test Put Chunk ACK: {result:?}"); + assert!( + start.elapsed() < timeout, + "Timed out waiting for block proposal to be accepted" + ); + } } } @@ -434,12 +443,10 @@ fn block_proposal_rejection() { let short_timeout = Duration::from_secs(30); info!("------------------------- Send Block Proposal To Signers -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle(); let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), }; - let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), txs: vec![], @@ -448,48 +455,44 @@ fn block_proposal_rejection() { // First propose a block to the signers that does not have the correct consensus hash or BitVec. This should be rejected BEFORE // the block is submitted to the node for validation. let block_signer_signature_hash_1 = block.header.signer_signature_hash(); - signer_test.propose_block(0, 1, block.clone()); + signer_test.propose_block(block.clone(), short_timeout); + + // Wait for the first block to be mined successfully so we have the most up to date sortition view + signer_test.wait_for_validate_ok_response(short_timeout); // Propose a block to the signers that passes initial checks but will be rejected by the stacks node + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); block.header.pox_treatment = BitVec::ones(1).unwrap(); block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.header.chain_length = 35; // We have mined 35 blocks so far. let block_signer_signature_hash_2 = block.header.signer_signature_hash(); - signer_test.propose_block(0, 2, block); + signer_test.propose_block(block, short_timeout); info!("------------------------- Test Block Proposal Rejected -------------------------"); // Verify the signers rejected the second block via the endpoint - let rejected_block_hash = signer_test.wait_for_validate_reject_response(short_timeout); - assert_eq!(rejected_block_hash, block_signer_signature_hash_2); - - let mut stackerdb = StackerDB::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // We are just reading so don't care what the key is - false, - reward_cycle, - SignerSlotID(0), // We are just reading so again, don't care about index. - ); - - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); + let reject = + signer_test.wait_for_validate_reject_response(short_timeout, block_signer_signature_hash_2); + assert!(matches!( + reject.reason_code, + ValidateRejectCode::UnknownParent + )); let start_polling = Instant::now(); let mut found_signer_signature_hash_1 = false; let mut found_signer_signature_hash_2 = false; while !found_signer_signature_hash_1 && !found_signer_signature_hash_2 { std::thread::sleep(Duration::from_secs(1)); - let messages: Vec = StackerDB::get_messages( - stackerdb - .get_session_mut(&MessageSlotID::BlockResponse) - .expect("Failed to get BlockResponse stackerdb session"), - &signer_slot_ids, - ) - .expect("Failed to get message from stackerdb"); - for message in messages { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .map(|chunk| chunk.modified_slots) + .flatten() + { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason: _reason, reason_code, @@ -503,10 +506,10 @@ fn block_proposal_rejection() { found_signer_signature_hash_2 = true; assert!(matches!(reason_code, RejectCode::ValidationFailed(_))); } else { - panic!("Unexpected signer signature hash"); + continue; } } else { - panic!("Unexpected message type"); + continue; } } assert!( diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 44bbc57228..6e9ed71f36 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -893,7 +893,9 @@ fn block_proposal() { info!("------------------------- Test Block Proposal -------------------------"); // Verify that the signers accepted the proposed block, sending back a validate ok response - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); + let proposed_signer_signature_hash = signer_test + .wait_for_validate_ok_response(short_timeout) + .signer_signature_hash; info!("------------------------- Test Block Signed -------------------------"); // Verify that the signers signed the proposed block @@ -1115,7 +1117,9 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Mine Block -------------------------"); signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); + let proposed_signer_signature_hash = signer_test + .wait_for_validate_ok_response(short_timeout) + .signer_signature_hash; let signature = signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); @@ -1136,7 +1140,9 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Mine Block after restart -------------------------"); let last_block = signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); + let proposed_signer_signature_hash = signer_test + .wait_for_validate_ok_response(short_timeout) + .signer_signature_hash; let frost_signature = signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); From eef4b0e81bb50cfb4b389ab1bdc399983c4d20f9 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Sun, 18 Aug 2024 18:30:56 +0300 Subject: [PATCH 0813/1400] keep workflow dispatch only true as it runs as false from PRs --- .github/workflows/pr-differences-mutants.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index edafb42bf1..c109b69cfe 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -17,7 +17,6 @@ on: type: choice options: - true - # - false default: 'true' concurrency: From 42bfde05a347e9646911c722cadd5ab83f50899e Mon Sep 17 00:00:00 2001 From: shangchengbabaiban Date: Mon, 19 Aug 2024 00:47:07 +0800 Subject: [PATCH 0814/1400] chore: fix some comments Signed-off-by: shangchengbabaiban --- clarity/src/vm/ast/definition_sorter/mod.rs | 2 +- contrib/boot-contracts-unit-tests/tests/misc.test.ts | 2 +- stacks-signer/src/client/stacks_client.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/ast/definition_sorter/mod.rs b/clarity/src/vm/ast/definition_sorter/mod.rs index eee6625310..a5a551298c 100644 --- a/clarity/src/vm/ast/definition_sorter/mod.rs +++ b/clarity/src/vm/ast/definition_sorter/mod.rs @@ -173,7 +173,7 @@ impl DefinitionSorter { return Ok(()); } DefineFunctions::Map => { - // Args: [name, key, value]: with key value being potentialy tuples + // Args: [name, key, value]: with key value being potentially tuples if function_args.len() == 3 { self.probe_for_dependencies( function_args[1], diff --git a/contrib/boot-contracts-unit-tests/tests/misc.test.ts b/contrib/boot-contracts-unit-tests/tests/misc.test.ts index d50f2ef6d3..09a6fe1174 100644 --- a/contrib/boot-contracts-unit-tests/tests/misc.test.ts +++ b/contrib/boot-contracts-unit-tests/tests/misc.test.ts @@ -575,7 +575,7 @@ describe("test `get-total-ustx-stacked`", () => { expect(response.result).toBeUint(amount * 3n); }); - it("expires stacking after the stacking duration has finsihed", () => { + it("expires stacking after the stacking duration has finished", () => { const amount = getStackingMinimum() * 2n; stackers.forEach((stacker, i) => { diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 0aeb30bb6e..cd65f7914b 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -556,7 +556,7 @@ impl StacksClient { Ok(stackers_response.stacker_set.signers) } - /// Retreive the current pox data from the stacks node + /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { debug!("Getting pox data..."); #[cfg(feature = "monitoring_prom")] From 54777d302b7e9a7576ac888da75e9c6dfe7fb0c6 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 19 Aug 2024 13:27:56 -0400 Subject: [PATCH 0815/1400] Wait for a new block commit. Not just 1 commit Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index cb232edd60..c7106a0037 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -63,8 +63,9 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, - wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_mine_commit, + setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -277,14 +278,15 @@ impl SignerTest { self.run_until_epoch_3_boundary(); - let commits_submitted = self.running_nodes.commits_submitted.clone(); - info!("Waiting 1 burnchain block for miner VRF key confirmation"); // Wait one block to confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) + let commits_submitted = self.running_nodes.commits_submitted.clone(); + next_block_and_mine_commit( + &mut self.running_nodes.btc_regtest_controller, + 30, + &self.running_nodes.coord_channel, + &commits_submitted, + ) .unwrap(); info!("Ready to mine Nakamoto blocks!"); } From 7a73f92e46c487b2866124869bfb51f68081bb7f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 19 Aug 2024 14:42:30 -0400 Subject: [PATCH 0816/1400] Wait for a new block commit and don't use mine_commit fn Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c7106a0037..2a9c81c4dd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -63,9 +63,8 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_mine_commit, - setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -278,15 +277,14 @@ impl SignerTest { self.run_until_epoch_3_boundary(); + let commits_submitted = self.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); info!("Waiting 1 burnchain block for miner VRF key confirmation"); // Wait one block to confirm the VRF register, wait until a block commit is submitted - let commits_submitted = self.running_nodes.commits_submitted.clone(); - next_block_and_mine_commit( - &mut self.running_nodes.btc_regtest_controller, - 30, - &self.running_nodes.coord_channel, - &commits_submitted, - ) + next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }) .unwrap(); info!("Ready to mine Nakamoto blocks!"); } From 45eda2cbe42ad55e98cbeb06333e1d80ef16e817 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:42:49 -0400 Subject: [PATCH 0817/1400] feat: upgrade CONSENSUS to INFO --- stackslib/src/chainstate/burn/db/processing.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 70f170a60c..82318bfe37 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -211,7 +211,7 @@ impl<'a> SortitionHandleTx<'a> { "SORTITION-HASH({}): {}", this_block_height, &snapshot.sortition_hash ); - debug!( + info!( "CONSENSUS({}): {}", this_block_height, &snapshot.consensus_hash ); From 33ffeb90f35a43362b1f8f93cbca2a78bd6fadec Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:43:05 -0400 Subject: [PATCH 0818/1400] chore: log advance to new tip _after_ the tx commits in order to get a better understanding of when it becomes readable by other threads --- stackslib/src/chainstate/nakamoto/mod.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 657315e993..80aef0a74a 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2036,6 +2036,12 @@ impl NakamotoChainState { panic!() }); + info!( + "Advanced to new tip! {}/{}", + &receipt.header.consensus_hash, + &receipt.header.anchored_header.block_hash() + ); + // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -2994,7 +3000,6 @@ impl NakamotoChainState { ); let parent_hash = new_tip.parent_block_id.clone(); - let new_block_hash = new_tip.block_hash(); let index_block_hash = new_tip.block_id(); let mut marf_keys = vec![]; @@ -3186,10 +3191,6 @@ impl NakamotoChainState { headers_tx.deref_mut().execute(sql, args)?; } - debug!( - "Advanced to new tip! {}/{}", - &new_tip.consensus_hash, new_block_hash, - ); Ok(new_tip_info) } From dea8b57fc7b03ad4ddd5807b7efb1118b92dcb86 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:43:35 -0400 Subject: [PATCH 0819/1400] chore: advance to new tip is now INFO --- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 356b117b8b..ca8acd2dce 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2675,7 +2675,7 @@ impl StacksChainState { headers_tx.deref_mut().execute(sql, args)?; } - debug!( + info!( "Advanced to new tip! {}/{}", new_consensus_hash, new_tip.block_hash() From 424d7bf2dc3850e7c3df311f8ab1a4c27855c346 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:43:50 -0400 Subject: [PATCH 0820/1400] feat: unconfirmed downloader interval and nakamoto inventory sync burst config --- stackslib/src/net/connection.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 36b1fc18ff..3737f70c0f 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -398,6 +398,12 @@ pub struct ConnectionOptions { /// maximum number of confirmations for a nakamoto block's sortition for which it will be /// pushed pub max_nakamoto_block_relay_age: u64, + /// minimum amount of time between requests to push nakamoto blocks (millis) + pub nakamoto_push_interval_ms: u128, + /// minimum amount of time between requests to push nakamoto blocks (millis) + pub nakamoto_inv_sync_burst_interval_ms: u128, + /// time between unconfirmed downloader runs + pub nakamoto_unconfirmed_downloader_interval_ms: u128, /// The authorization token to enable privileged RPC endpoints pub auth_token: Option, @@ -521,6 +527,9 @@ impl std::default::Default for ConnectionOptions { socket_send_buffer_size: 16384, // Linux default private_neighbors: true, max_nakamoto_block_relay_age: 6, + nakamoto_push_interval_ms: 30_000, // re-send a block no more than once every 30 seconds + nakamoto_inv_sync_burst_interval_ms: 1_000, // wait 1 second after a sortition before running inventory sync + nakamoto_unconfirmed_downloader_interval_ms: 5_000, // run unconfirmed downloader once every 5 seconds auth_token: None, // no faults on by default From bd3e7c5abf1314f8b0e54596ecf5ce715a77f54b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:44:25 -0400 Subject: [PATCH 0821/1400] fix: use a consistent reward-cycle-start index in the downloader, fix wanted tenure and availability calculations at reward cycle boundaries, and fix the transition logic for confirmed/unconfirmed states --- .../nakamoto/download_state_machine.rs | 450 +++++++----------- 1 file changed, 178 insertions(+), 272 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 6e298470e0..2e7be7f977 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -52,10 +52,11 @@ use crate::core::{ }; use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::chat::ConversationP2P; +use crate::net::connection::ConnectionOptions; use crate::net::db::{LocalPeer, PeerDB}; use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, + downloader_block_height_to_reward_cycle, AvailableTenures, NakamotoTenureDownloader, + NakamotoTenureDownloaderSet, NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, }; use crate::net::http::HttpRequestContents; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; @@ -120,6 +121,8 @@ pub struct NakamotoDownloadStateMachine { pub(super) neighbor_rpc: NeighborRPC, /// Nakamoto chain tip nakamoto_tip: StacksBlockId, + /// last time an unconfirmed downloader was run + last_unconfirmed_download_run_ms: u128, } impl NakamotoDownloadStateMachine { @@ -140,6 +143,7 @@ impl NakamotoDownloadStateMachine { tenure_start_blocks: HashMap::new(), neighbor_rpc: NeighborRPC::new(), nakamoto_tip, + last_unconfirmed_download_run_ms: 0, } } @@ -194,8 +198,6 @@ impl NakamotoDownloadStateMachine { ) -> Result<(), NetError> { let highest_tenure_height = wanted_tenures.last().map(|wt| wt.burn_height).unwrap_or(0); - // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at - // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len let first_block_height = sortdb .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, cur_rc) @@ -243,18 +245,22 @@ impl NakamotoDownloadStateMachine { sortdb: &SortitionDB, loaded_so_far: &[WantedTenure], ) -> Result, NetError> { - let tip_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tip.block_height) - .unwrap_or(0); + let tip_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + tip.block_height, + ) + .expect("FATAL: tip.block_height before system start"); + // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at + // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len let first_block_height = if let Some(highest_wanted_tenure) = loaded_so_far.last() { highest_wanted_tenure.burn_height.saturating_add(1) } else if let Some(last_tip) = last_tip.as_ref() { last_tip.block_height.saturating_add(1) } else { // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at - // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len. + // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len sortdb .pox_constants .reward_cycle_to_block_height(sortdb.first_block_height, tip_rc) @@ -403,50 +409,13 @@ impl NakamotoDownloadStateMachine { ) } - /// Update `self.wanted_tenures` and `self.prev_wanted_tenures` with newly-discovered sortition - /// data. These lists are extended in three possible ways, depending on the sortition tip: - /// - /// * If the sortition tip is in the same reward cycle that the block downloader is tracking, - /// then any newly-available sortitions are loaded via `load_wanted_tenures_at_tip()` and appended - /// to `self.wanted_tenures`. This is what happens most of the time in steady-state. - /// - /// * Otherwise, if the sortition tip is different (i.e. ahead) of the block downloader's - /// tracked reward cycle, _and_ if it's safe to do so (discussed below), then the next reward - /// cycle's sortitions are loaded. `self.prev_wanted_tenures` is populated with all of the - /// wanted tenures from the prior reward cycle, and `self.wanted_tenures` is populated with all - /// of the wanted tenures from the current reward cycle. - /// - /// Due to the way the chains coordinator works, the sortition DB will never be more than one - /// reward cycle ahead of the block downloader. This is because sortitions cannot be processed - /// (and will not be processed) until their corresponding PoX anchor block has been processed. - /// As such, the second case above only occurs at a reward cycle boundary -- specifically, the - /// sortition DB is in the process of being updated by the chains coordinator with the next - /// reward cycle's sortitions. - /// - /// Naturally, processing a new reward cycle is disruptive to the download state machine, which - /// can be in the process of finishing up downloading the prepare phase for a reward cycle at - /// the same time as the sortition DB processing the next reward cycle. To ensure that the - /// downloader doesn't miss anything, this code checks (via `have_unprocessed_tenures()`) that - /// all wanted tenures for which we have inventory data have been downloaded before advancing - /// `self.wanted_tenures` and `self.prev_wanted_tenures.` + /// Update `self.wanted_tenures` with newly-discovered sortition data. fn extend_wanted_tenures( &mut self, network: &PeerNetwork, sortdb: &SortitionDB, ) -> Result<(), NetError> { let sort_tip = &network.burnchain_tip; - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - debug!("No network inventories"); - return Err(NetError::PeerNotConnected); - }; - - let last_sort_height_opt = self.last_sort_tip.as_ref().map(|sn| sn.block_height); - let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height) - .expect("FATAL: burnchain tip is before system start"); let mut new_wanted_tenures = Self::load_wanted_tenures_at_tip( self.last_sort_tip.as_ref(), @@ -455,76 +424,13 @@ impl NakamotoDownloadStateMachine { &self.wanted_tenures, )?; - let can_advance_wanted_tenures = - if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() { - !Self::have_unprocessed_tenures( - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - self.nakamoto_start_height, - ) - .expect("FATAL: first nakamoto block from before system start"), - &self.tenure_downloads.completed_tenures, - prev_wanted_tenures, - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) - } else { - debug!("No prev_wanted_tenures yet"); - true - }; - - if can_advance_wanted_tenures && self.reward_cycle != sort_rc { - let mut prev_wanted_tenures = vec![]; - let mut cur_wanted_tenures = vec![]; - let prev_wts = self.prev_wanted_tenures.take().unwrap_or(vec![]); - let cur_wts = std::mem::replace(&mut self.wanted_tenures, vec![]); - - for wt in new_wanted_tenures - .into_iter() - .chain(prev_wts.into_iter()) - .chain(cur_wts.into_iter()) - { - debug!("Consider wanted tenure: {:?}", &wt); - let wt_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, wt.burn_height) - .expect("FATAL: height before system start"); - if wt_rc + 1 == sort_rc { - prev_wanted_tenures.push(wt); - } else if wt_rc == sort_rc { - cur_wanted_tenures.push(wt); - } else { - debug!("Drop wanted tenure: {:?}", &wt); - } - } - - prev_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); - cur_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); - - debug!("prev_wanted_tenures is now {:?}", &prev_wanted_tenures); - debug!("wanted_tenures is now {:?}", &cur_wanted_tenures); - debug!("set self.reward_cycle = {}", sort_rc); - - self.prev_wanted_tenures = if prev_wanted_tenures.is_empty() { - None - } else { - Some(prev_wanted_tenures) - }; - self.wanted_tenures = cur_wanted_tenures; - self.reward_cycle = sort_rc; - } else { - debug!( - "Append {} wanted tenures: {:?}", - new_wanted_tenures.len(), - &new_wanted_tenures - ); - self.wanted_tenures.append(&mut new_wanted_tenures); - debug!("wanted_tenures is now {:?}", &self.wanted_tenures); - } + debug!( + "Append {} wanted tenures: {:?}", + new_wanted_tenures.len(), + &new_wanted_tenures + ); + self.wanted_tenures.append(&mut new_wanted_tenures); + debug!("extended wanted_tenures is now {:?}", &self.wanted_tenures); Ok(()) } @@ -556,15 +462,17 @@ impl NakamotoDownloadStateMachine { .expect("FATAL: usize cannot support reward cycle length") { // this is the first-ever pass, so load up the last full reward cycle - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) - .expect("FATAL: burnchain tip is before system start") - .saturating_sub(1); + let prev_sort_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + sort_tip.block_height, + ) + .expect("FATAL: burnchain tip is before system start") + .saturating_sub(1); let mut prev_wanted_tenures = vec![]; Self::update_wanted_tenures_for_reward_cycle( - sort_rc, + prev_sort_rc, sort_tip, sortdb, &mut prev_wanted_tenures, @@ -572,16 +480,18 @@ impl NakamotoDownloadStateMachine { debug!( "initial prev_wanted_tenures (rc {}): {:?}", - sort_rc, &prev_wanted_tenures + prev_sort_rc, &prev_wanted_tenures ); self.prev_wanted_tenures = Some(prev_wanted_tenures); } if self.wanted_tenures.is_empty() { // this is the first-ever pass, so load up the current reward cycle - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) - .expect("FATAL: burnchain tip is before system start"); + let sort_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + sort_tip.block_height, + ) + .expect("FATAL: burnchain tip is before system start"); let mut wanted_tenures = vec![]; Self::update_wanted_tenures_for_reward_cycle( @@ -625,12 +535,12 @@ impl NakamotoDownloadStateMachine { let prev_wanted_rc = prev_wanted_tenures .last() .map(|wt| { - pox_constants - .block_height_to_reward_cycle( - first_burn_height, - wt.burn_height.saturating_sub(1), - ) - .expect("FATAL: wanted tenure before system start") + downloader_block_height_to_reward_cycle( + pox_constants, + first_burn_height, + wt.burn_height, + ) + .expect("FATAL: wanted tenure before system start") }) .unwrap_or(u64::MAX); @@ -763,49 +673,16 @@ impl NakamotoDownloadStateMachine { chainstate: &mut StacksChainState, ) -> Result<(), NetError> { let sort_tip = &network.burnchain_tip; - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - debug!("No network inventories"); - return Err(NetError::PeerNotConnected); - }; self.initialize_wanted_tenures(sort_tip, sortdb)?; let last_sort_height_opt = self.last_sort_tip.as_ref().map(|sn| sn.block_height); let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - last_sort_height.saturating_add(1), - ) - .expect("FATAL: burnchain tip is before system start"); - - let next_sort_rc = if last_sort_height == sort_tip.block_height { - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - sort_tip.block_height.saturating_add(2), - ) - .expect("FATAL: burnchain tip is before system start") - } else { - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - sort_tip.block_height.saturating_add(1), - ) - .expect("FATAL: burnchain tip is before system start") - }; - - debug!( - "last_sort_height = {}, sort_rc = {}, next_sort_rc = {}, self.reward_cycle = {}, sort_tip.block_height = {}", + let sort_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, last_sort_height, - sort_rc, - next_sort_rc, - self.reward_cycle, - sort_tip.block_height, - ); + ) + .expect("FATAL: burnchain tip is before system start"); if self.reward_cycle == sort_rc { // not at a reward cycle boundary, so just extend self.wanted_tenures @@ -815,35 +692,10 @@ impl NakamotoDownloadStateMachine { return Ok(()); } - let can_advance_wanted_tenures = - if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() { - !Self::have_unprocessed_tenures( - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - self.nakamoto_start_height, - ) - .expect("FATAL: nakamoto starts before system start"), - &self.tenure_downloads.completed_tenures, - prev_wanted_tenures, - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) - } else { - debug!("No prev_wanted_tenures yet"); - true - }; - if !can_advance_wanted_tenures { - return Ok(()); - } - // crossed reward cycle boundary let mut new_wanted_tenures = vec![]; Self::update_wanted_tenures_for_reward_cycle( - sort_rc + 1, + sort_rc, sort_tip, sortdb, &mut new_wanted_tenures, @@ -851,15 +703,20 @@ impl NakamotoDownloadStateMachine { let mut new_prev_wanted_tenures = vec![]; Self::update_wanted_tenures_for_reward_cycle( - sort_rc, + sort_rc.saturating_sub(1), sort_tip, sortdb, &mut new_prev_wanted_tenures, )?; - debug!("new_wanted_tenures is now {:?}", &new_wanted_tenures); debug!( - "new_prev_wanted_tenures is now {:?}", + "new_wanted_tenures is now {} {:?}", + new_wanted_tenures.len(), + &new_wanted_tenures + ); + debug!( + "new_prev_wanted_tenures is now {} {:?}", + new_prev_wanted_tenures.len(), &new_prev_wanted_tenures ); @@ -1200,6 +1057,43 @@ impl NakamotoDownloadStateMachine { ) } + /// Find the two highest tenure IDs that are available for download. + /// These are the ones that must be fetched via the unconfirmed tenure downloader. + /// They are returned in block order -- .0 has a lower block height than .1 + pub(crate) fn find_unconfirmed_tenure_ids( + wanted_tenures: &[WantedTenure], + prev_wanted_tenures: &[WantedTenure], + available: &HashMap>, + ) -> (Option, Option) { + // map each tenure ID to its block height + let tenure_block_heights: BTreeMap<_, _> = wanted_tenures + .iter() + .chain(prev_wanted_tenures.iter()) + .map(|wt| (wt.burn_height, &wt.tenure_id_consensus_hash)) + .collect(); + + debug!("Check availability {:?}", available); + let mut highest_available = Vec::with_capacity(2); + for (_, ch) in tenure_block_heights.iter().rev() { + let available_count = available + .get(ch) + .map(|neighbors| neighbors.len()) + .unwrap_or(0); + + debug!("Check is {} available: {}", ch, available_count); + if available_count == 0 { + continue; + } + highest_available.push((*ch).clone()); + if highest_available.len() == 2 { + break; + } + } + + highest_available.reverse(); + (highest_available.pop(), highest_available.pop()) + } + /// Determine whether or not we can start downloading the highest complete tenure and the /// unconfirmed tenure. Only do this if (1) the sortition DB is at the burnchain tip and (2) /// all of our wanted tenures are marked as either downloaded or complete. @@ -1209,17 +1103,15 @@ impl NakamotoDownloadStateMachine { /// /// This method is static to facilitate testing. pub(crate) fn need_unconfirmed_tenures<'a>( - nakamoto_start_block: u64, burnchain_height: u64, sort_tip: &BlockSnapshot, - completed_tenures: &HashSet, wanted_tenures: &[WantedTenure], prev_wanted_tenures: &[WantedTenure], tenure_block_ids: &HashMap, - pox_constants: &PoxConstants, - first_burn_height: u64, - inventory_iter: impl Iterator, + available_tenures: &HashMap>, ) -> bool { + debug!("Check if we need unconfirmed tenures"); + if sort_tip.block_height < burnchain_height { debug!( "sort_tip {} < burn tip {}", @@ -1238,34 +1130,53 @@ impl NakamotoDownloadStateMachine { return false; } - // there are still confirmed tenures we have to go and get - if Self::have_unprocessed_tenures( - pox_constants - .block_height_to_reward_cycle(first_burn_height, nakamoto_start_block) - .expect("FATAL: nakamoto starts before system start"), - completed_tenures, - prev_wanted_tenures, - tenure_block_ids, - pox_constants, - first_burn_height, - inventory_iter, - ) { - debug!("Still have unprocessed tenures, so we don't need unconfirmed tenures"); + if tenure_block_ids.is_empty() { + debug!("No tenure availability known"); return false; } + let (unconfirmed_tenure_opt, confirmed_tenure_opt) = Self::find_unconfirmed_tenure_ids( + wanted_tenures, + prev_wanted_tenures, + available_tenures, + ); + debug!( + "Check unconfirmed tenures: highest two available tenures are {:?}, {:?}", + &unconfirmed_tenure_opt, &confirmed_tenure_opt + ); + // see if we need any tenures still - for wt in wanted_tenures.iter() { - if completed_tenures.contains(&wt.tenure_id_consensus_hash) { - continue; - } - let is_available = tenure_block_ids - .iter() - .any(|(_, available)| available.contains_key(&wt.tenure_id_consensus_hash)); + for wt in wanted_tenures.iter().chain(prev_wanted_tenures.iter()) { + debug!("Check unconfirmed tenures: check {:?}", &wt); + let is_available_and_processed = tenure_block_ids.iter().any(|(_, available)| { + if let Some(tenure_start_end) = available.get(&wt.tenure_id_consensus_hash) { + tenure_start_end.processed + } else { + true + } + }); + + if !is_available_and_processed { + let is_unconfirmed = unconfirmed_tenure_opt + .as_ref() + .map(|ch| *ch == wt.tenure_id_consensus_hash) + .unwrap_or(false) + || confirmed_tenure_opt + .as_ref() + .map(|ch| *ch == wt.tenure_id_consensus_hash) + .unwrap_or(false); + + if is_unconfirmed { + debug!( + "Tenure {} is only available via the unconfirmed tenure downloader", + &wt.tenure_id_consensus_hash + ); + continue; + } - if is_available && !wt.processed { // a tenure is available but not yet processed, so we can't yet transition to // fetching unconfirmed tenures (we'd have no way to validate them). + // TODO: also check that this cannot be fetched by confirmed downloader debug!( "Tenure {} is available but not yet processed", &wt.tenure_id_consensus_hash @@ -1345,15 +1256,30 @@ impl NakamotoDownloadStateMachine { /// Update our unconfirmed tenure download state machines fn update_unconfirmed_tenure_downloaders( &mut self, + connection_opts: &ConnectionOptions, count: usize, highest_processed_block_id: Option, ) { + if self + .last_unconfirmed_download_run_ms + .saturating_add(connection_opts.nakamoto_unconfirmed_downloader_interval_ms) + > get_epoch_time_ms() + { + debug!( + "Throttle starting new unconfirmed downloaders until {}", + self.last_unconfirmed_download_run_ms + .saturating_add(connection_opts.nakamoto_unconfirmed_downloader_interval_ms) + / 1000 + ); + return; + } Self::make_unconfirmed_tenure_downloaders( &mut self.unconfirmed_tenure_download_schedule, count, &mut self.unconfirmed_tenure_downloads, highest_processed_block_id, ); + self.last_unconfirmed_download_run_ms = get_epoch_time_ms(); } /// Run unconfirmed tenure download state machines. @@ -1579,15 +1505,18 @@ impl NakamotoDownloadStateMachine { /// Run and process all unconfirmed tenure downloads, and highest complete tenure downloads. /// Do the needful bookkeeping to remove dead peers. + /// Returns map of tenure IDs to blocks we fetched, plus whether or not we returned because we + /// were throttled fn download_unconfirmed_tenures( &mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, chainstate: &StacksChainState, highest_processed_block_id: Option, - ) -> HashMap> { + ) -> (HashMap>, bool) { // queue up more downloaders self.update_unconfirmed_tenure_downloaders( + network.get_connection_opts(), usize::try_from(network.get_connection_opts().max_inflight_blocks) .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), highest_processed_block_id, @@ -1650,7 +1579,7 @@ impl NakamotoDownloadStateMachine { } } - coalesced_blocks + let tenure_blocks = coalesced_blocks .into_iter() .map(|(consensus_hash, block_map)| { let mut block_list: Vec<_> = @@ -1658,7 +1587,9 @@ impl NakamotoDownloadStateMachine { block_list.sort_unstable_by_key(|blk| blk.header.chain_length); (consensus_hash, block_list) }) - .collect() + .collect(); + + (tenure_blocks, false) } /// Top-level download state machine execution. @@ -1697,6 +1628,16 @@ impl NakamotoDownloadStateMachine { ibd, ); + // check this now, since we mutate self.available + let need_unconfirmed_tenures = Self::need_unconfirmed_tenures( + burnchain_height, + &network.burnchain_tip, + &self.wanted_tenures, + self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), + &self.tenure_block_ids, + &self.available_tenures, + ); + match self.state { NakamotoDownloadState::Confirmed => { let new_blocks = self.download_confirmed_tenures( @@ -1705,28 +1646,7 @@ impl NakamotoDownloadStateMachine { .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), ); - // keep borrow-checker happy by instantiang this ref again, now that `network` is - // no longer mutably borrowed. - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - debug!("No network inventories"); - return HashMap::new(); - }; - - if self.tenure_downloads.is_empty() - && Self::need_unconfirmed_tenures( - self.nakamoto_start_height, - burnchain_height, - &network.burnchain_tip, - &self.tenure_downloads.completed_tenures, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) - { + if self.tenure_downloads.is_empty() && need_unconfirmed_tenures { debug!( "Transition from {} to {}", &self.state, @@ -1749,7 +1669,7 @@ impl NakamotoDownloadStateMachine { &network.stacks_tip.block_hash, ); - let new_blocks = self.download_unconfirmed_tenures( + let (new_blocks, throttled) = self.download_unconfirmed_tenures( network, sortdb, chainstate, @@ -1760,13 +1680,10 @@ impl NakamotoDownloadStateMachine { }, ); - // keep borrow-checker happy by instantiang this ref again, now that `network` is - // no longer mutably borrowed. - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - debug!("No network inventories"); - return HashMap::new(); - }; + if throttled { + // stay in this state + return new_blocks; + } if !self.tenure_downloads.is_empty() { // need to go get this scheduled tenure @@ -1779,18 +1696,7 @@ impl NakamotoDownloadStateMachine { } else if self.unconfirmed_tenure_downloads.is_empty() && self.unconfirmed_tenure_download_schedule.is_empty() { - if Self::need_unconfirmed_tenures( - self.nakamoto_start_height, - burnchain_height, - &network.burnchain_tip, - &self.tenure_downloads.completed_tenures, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) { + if need_unconfirmed_tenures { // do this again self.unconfirmed_tenure_download_schedule = Self::make_unconfirmed_tenure_download_schedule( From 826e57106b91cebdb256ebe688e53006277a0657 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:45:32 -0400 Subject: [PATCH 0822/1400] fix: consistent start block height of a reward cycle --- stackslib/src/net/download/nakamoto/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index dd440ac110..5f03c3811a 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -176,6 +176,14 @@ pub use crate::net::download::nakamoto::tenure_downloader_unconfirmed::{ NakamotoUnconfirmedDownloadState, NakamotoUnconfirmedTenureDownloader, }; +pub fn downloader_block_height_to_reward_cycle( + pox_constants: &PoxConstants, + first_block_height: u64, + block_height: u64, +) -> Option { + pox_constants.block_height_to_reward_cycle(first_block_height, block_height.saturating_sub(1)) +} + impl PeerNetwork { /// Set up the Nakamoto block downloader pub fn init_nakamoto_block_downloader(&mut self) { From d1611024434347a4701c6256b81c81604132a727 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:45:48 -0400 Subject: [PATCH 0823/1400] fix: consistent block height to reward cycle --- stackslib/src/net/download/nakamoto/tenure.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index a2a3b3eddd..4fb050e591 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -51,6 +51,7 @@ use crate::core::{ use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::chat::ConversationP2P; use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::downloader_block_height_to_reward_cycle; use crate::net::http::HttpRequestContents; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::inv::epoch2x::InvState; @@ -325,9 +326,12 @@ impl TenureStartEnd { wt_start.winning_block_id.clone(), wt_end.winning_block_id.clone(), rc, - pox_constants - .block_height_to_reward_cycle(first_burn_height, wt_start.burn_height) - .expect("FATAL: tenure from before system start"), + downloader_block_height_to_reward_cycle( + pox_constants, + first_burn_height, + wt_start.burn_height, + ) + .expect("FATAL: tenure from before system start"), wt.processed, ); tenure_start_end.fetch_end_block = true; From dd702947cd16956ad266512652c49ef86c821abc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:46:17 -0400 Subject: [PATCH 0824/1400] feat: return the tenure-end block when downloading a tenure so the chainstate can mark the tenure as complete --- .../download/nakamoto/tenure_downloader.rs | 41 +++++++++++-------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index f7fb970bb6..95d97f67d5 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -161,13 +161,11 @@ pub struct NakamotoTenureDownloader { pub state: NakamotoTenureDownloadState, /// Tenure-start block pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// Pre-stored tenure end block. /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once /// the start-block for the current tenure is downloaded. This is that start-block, which is /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, /// Tenure blocks pub tenure_blocks: Option>, } @@ -195,7 +193,6 @@ impl NakamotoTenureDownloader { idle: false, state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), tenure_start_block: None, - tenure_end_header: None, tenure_end_block: None, tenure_blocks: None, } @@ -262,10 +259,7 @@ impl NakamotoTenureDownloader { ); self.tenure_start_block = Some(tenure_start_block); - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + if let Some(tenure_end_block) = self.tenure_end_block.take() { // we already have the tenure-end block, so immediately proceed to accept it. debug!( "Preemptively process tenure-end block {} for tenure {}", @@ -280,7 +274,7 @@ impl NakamotoTenureDownloader { ); self.try_accept_tenure_end_block(&tenure_end_block)?; } else { - // need to get tenure_end_header. By default, assume that another + // need to get tenure_end_block. By default, assume that another // NakamotoTenureDownloader will provide this block, and allow the // NakamotoTenureDownloaderSet instance that manages a collection of these // state-machines make the call to require this one to fetch the block directly. @@ -411,12 +405,12 @@ impl NakamotoTenureDownloader { } debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + "Accepted tenure-end block for tenure {} block={}; expect {} blocks", &self.tenure_id_consensus_hash, &tenure_end_block.block_id(), tc_payload.previous_tenure_blocks ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.tenure_end_block = Some(tenure_end_block.clone()); self.state = NakamotoTenureDownloadState::GetTenureBlocks( tenure_end_block.header.parent_block_id.clone(), ); @@ -426,17 +420,27 @@ impl NakamotoTenureDownloader { /// Determine how many blocks must be in this tenure. /// Returns None if we don't have the start and end blocks yet. pub fn tenure_length(&self) -> Option { - self.tenure_end_header + self.tenure_end_block .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + .map(|tenure_end_block| { + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + return None; + }; + + Some(u64::from(tc_payload.previous_tenure_blocks)) + }) + .flatten() } /// Add downloaded tenure blocks to this machine. /// If we have collected all tenure blocks, then return them and transition to the Done state. /// /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. + /// ascending order by height, and will include both the tenure-start block and the tenure-end + /// block. Including the tenure-end block is necessary because processing it will mark this + /// tenure as "complete" in the chainstate, which will allow the downloader to deduce when all + /// confirmed tenures have been completely downloaded. + /// /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to /// the next block to fetch (stored in self.state) will be updated. /// Returns Err(..) if the blocks were invalid. @@ -487,7 +491,8 @@ impl NakamotoTenureDownloader { .map(|blocks| blocks.len()) .unwrap_or(0) .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize + > self.tenure_length().unwrap_or(0).saturating_add(1) as usize + // + 1 due to the inclusion of the tenure-end block { // there are more blocks downloaded than indicated by the end-blocks tenure-change // transaction. @@ -503,6 +508,10 @@ impl NakamotoTenureDownloader { if let Some(blocks) = self.tenure_blocks.as_mut() { blocks.append(&mut tenure_blocks); } else { + // include tenure-end block + if let Some(tenure_end_block) = self.tenure_end_block.as_ref() { + tenure_blocks.insert(0, tenure_end_block.clone()); + } self.tenure_blocks = Some(tenure_blocks); } From 7d742f8a93aae6ad8e0eac8c1f5f3bebe31c7696 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:46:44 -0400 Subject: [PATCH 0825/1400] fix: consistent block height to reward cycle --- .../nakamoto/tenure_downloader_unconfirmed.rs | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index c96f718d2b..ddfd35fa97 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -53,8 +53,8 @@ use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::chat::ConversationP2P; use crate::net::db::{LocalPeer, PeerDB}; use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, + downloader_block_height_to_reward_cycle, AvailableTenures, NakamotoTenureDownloader, + NakamotoTenureDownloaderSet, TenureStartEnd, WantedTenure, }; use crate::net::http::HttpRequestContents; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; @@ -319,17 +319,18 @@ impl NakamotoUnconfirmedTenureDownloader { } // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); + let tenure_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = downloader_block_height_to_reward_cycle( + &sortdb.pox_constants, + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions let Some(Some(confirmed_reward_set)) = current_reward_sets From 0eac1f89b207bb63c1e7c9fe893168571b0ee3e9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:46:58 -0400 Subject: [PATCH 0826/1400] feat: do a burst of inv syncs at around the start of a new tenure -- i.e. right after a sortition --- stackslib/src/net/inv/nakamoto.rs | 42 ++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index d01e8625a1..f24ad1a87c 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -19,7 +19,7 @@ use std::collections::{BTreeMap, HashMap}; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::StacksEpochId; -use stacks_common::util::get_epoch_time_secs; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; @@ -515,8 +515,6 @@ impl NakamotoTenureInv { /// Get the burnchain tip reward cycle for purposes of inv sync fn get_current_reward_cycle(tip: &BlockSnapshot, sortdb: &SortitionDB) -> u64 { - // NOTE: reward cycles start when (sortition_height % reward_cycle_len) == 1, not 0, but - // .block_height_to_reward_cycle does not account for this. sortdb .pox_constants .block_height_to_reward_cycle( @@ -537,6 +535,10 @@ pub struct NakamotoInvStateMachine { reward_cycle_consensus_hashes: BTreeMap, /// last observed sortition tip last_sort_tip: Option, + /// deadline to stop inv sync burst + burst_deadline_ms: u128, + /// time we did our last burst + last_burst_ms: u128, } impl NakamotoInvStateMachine { @@ -546,6 +548,8 @@ impl NakamotoInvStateMachine { inventories: HashMap::new(), reward_cycle_consensus_hashes: BTreeMap::new(), last_sort_tip: None, + burst_deadline_ms: get_epoch_time_ms(), + last_burst_ms: get_epoch_time_ms(), } } @@ -805,20 +809,40 @@ impl NakamotoInvStateMachine { Ok((num_msgs, learned)) } + /// Do we need to do an inv sync burst? + /// This happens after `burst_interval` milliseconds have passed since we noticed the sortition + /// changed. + fn need_inv_burst(&self) -> bool { + self.burst_deadline_ms < get_epoch_time_ms() && self.last_burst_ms < self.burst_deadline_ms + } + /// Top-level state machine execution pub fn run(&mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, ibd: bool) -> bool { // if the burnchain tip has changed, then force all communications to reset for the current // reward cycle in order to hasten block download if let Some(last_sort_tip) = self.last_sort_tip.as_ref() { if last_sort_tip.consensus_hash != network.burnchain_tip.consensus_hash { - debug!("Forcibly restarting all Nakamoto inventory comms due to burnchain tip change ({} != {})", &last_sort_tip.consensus_hash, &network.burnchain_tip.consensus_hash); - let tip_rc = - NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); - for inv_state in self.inventories.values_mut() { - inv_state.reset_comms(tip_rc.saturating_sub(1)); - } + debug!( + "Sortition tip changed: {} != {}. Configuring inventory burst", + &last_sort_tip.consensus_hash, &network.burnchain_tip.consensus_hash + ); + self.burst_deadline_ms = get_epoch_time_ms() + .saturating_add(network.connection_opts.nakamoto_inv_sync_burst_interval_ms); } } + if self.need_inv_burst() { + debug!("Forcibly restarting all Nakamoto inventory comms due to inventory burst"); + + let tip_rc = + NakamotoTenureInv::get_current_reward_cycle(&network.burnchain_tip, sortdb); + for inv_state in self.inventories.values_mut() { + inv_state.reset_comms(tip_rc.saturating_sub(1)); + } + + self.last_burst_ms = get_epoch_time_ms() + .saturating_add(network.connection_opts.nakamoto_inv_sync_burst_interval_ms) + .max(self.burst_deadline_ms); + } if let Err(e) = self.process_getnakamotoinv_begins(network, sortdb, ibd) { warn!( From 1945b58a2dc73d583fe3172e068743c704e9751d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:47:24 -0400 Subject: [PATCH 0827/1400] fix: explicitly return pushed stackerdb chunks --- stackslib/src/net/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3ba4292f1c..5cedc4e068 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1484,6 +1484,8 @@ pub struct NetworkResult { pub uploaded_microblocks: Vec, /// chunks we received from the HTTP server pub uploaded_stackerdb_chunks: Vec, + /// chunks we received from p2p push + pub pushed_stackerdb_chunks: Vec, /// Atlas attachments we obtained pub attachments: Vec<(AttachmentInstance, Attachment)>, /// transactions we downloaded via a mempool sync @@ -1533,6 +1535,7 @@ impl NetworkResult { uploaded_blocks: vec![], uploaded_microblocks: vec![], uploaded_stackerdb_chunks: vec![], + pushed_stackerdb_chunks: vec![], attachments: vec![], synced_transactions: vec![], stacker_db_sync_results: vec![], @@ -1576,6 +1579,7 @@ impl NetworkResult { .fold(0, |acc, x| acc + x.chunks_to_store.len()) > 0 || self.uploaded_stackerdb_chunks.len() > 0 + || self.pushed_stackerdb_chunks.len() > 0 } pub fn transactions(&self) -> Vec { @@ -1639,6 +1643,9 @@ impl NetworkResult { .insert(neighbor_key.clone(), vec![(message.relayers, block_data)]); } } + StacksMessageType::StackerDBPushChunk(chunk_data) => { + self.pushed_stackerdb_chunks.push(chunk_data) + } _ => { // forward along if let Some(messages) = self.unhandled_messages.get_mut(&neighbor_key) { From f86b55f4bd71188c9419c942a27782a60e920ae2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:48:46 -0400 Subject: [PATCH 0828/1400] fix: don't connect to ourselves and cause the walk to fail; also, don't abort a walk because we can't find any always-allowed peers (use the seed peers in that case) --- stackslib/src/net/neighbors/db.rs | 17 +++++++++++++---- stackslib/src/net/neighbors/mod.rs | 2 +- stackslib/src/net/neighbors/walk.rs | 22 ++++++++++++++++++---- 3 files changed, 32 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index 5a40ac9677..c0e65a6f85 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -176,7 +176,8 @@ pub trait NeighborWalkDB { /// Get the number of peers in a given AS fn get_asn_count(&self, network: &PeerNetwork, asn: u32) -> u64; - /// Pick neighbors with a minimum age for a walk + /// Pick neighbors with a minimum age for a walk. + /// If there are none, then fall back to seed nodes. fn pick_walk_neighbors( network: &PeerNetwork, num_neighbors: u64, @@ -196,10 +197,18 @@ pub trait NeighborWalkDB { if neighbors.len() == 0 { debug!( - "{:?}: No neighbors available in the peer DB!", - network.get_local_peer() + "{:?}: No neighbors available in the peer DB newer than {}!", + network.get_local_peer(), + min_age ); - return Err(net_error::NoSuchNeighbor); + let seed_nodes = PeerDB::get_bootstrap_peers( + &network.peerdb_conn(), + network.get_local_peer().network_id, + )?; + if seed_nodes.len() == 0 { + return Err(net_error::NoSuchNeighbor); + } + return Ok(seed_nodes); } Ok(neighbors) } diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 7e01a0c448..6447a6ec00 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -215,7 +215,7 @@ impl PeerNetwork { .count_connected_always_allowed_peers() .unwrap_or((0, 0)); - // always ensure we're connected to always-allowed outbound peers + // always ensure we're connected to always-allowed outbound peers other than ourselves let walk_res = if ibd || (num_always_connected == 0 && total_always_connected > 0) { // always connect to bootstrap peers if in IBD, or if we're not connected to an // always-allowed peer already diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index 478f5c0e3d..e1207941e0 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -298,11 +298,25 @@ impl NeighborWalk { network: &PeerNetwork, ibd: bool, ) -> Result, net_error> { - let mut allowed_peers = db.get_initial_walk_neighbors(network, ibd)?; - let allowed_peer = if let Some(peer) = allowed_peers.pop() { + let allowed_peers = db.get_initial_walk_neighbors(network, ibd)?; + let allowed_peer_opt = allowed_peers.into_iter().find_map(|peer| { + if peer.public_key + == Secp256k1PublicKey::from_private(&network.get_local_peer().private_key) + { + None + } else { + Some(peer) + } + }); + + let allowed_peer = if let Some(peer) = allowed_peer_opt { peer } else { - // no allowed peers in DB. Try a different strategy + // no allowed peers in DB that aren't us. Try a different strategy + debug!( + "{:?}: No allowed peers in the DB that aren't us", + network.get_local_peer() + ); return Err(net_error::NotFoundError); }; @@ -401,7 +415,7 @@ impl NeighborWalk { /// Instantiate a neighbor walk, but go straight to the pingback logic (i.e. we don't have any /// immediate neighbors). That is, try to connect and step to a node that connected to us. - /// The returned neighbor walk will be in the PingabckHandshakesBegin state. + /// The returned neighbor walk will be in the PingbackHandshakesBegin state. /// /// Returns the new walk, if we have any pingbacks to connect to. /// Returns NoSuchNeighbor if there are no pingbacks to choose from From c755092753ce5ed6aa268734c2eea830fbaa317c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:49:23 -0400 Subject: [PATCH 0829/1400] fix: allow the relayer to push stackerdb chunks --- stackslib/src/net/p2p.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 4a52945521..b467e80f46 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1420,6 +1420,9 @@ impl PeerNetwork { } Ok(all_neighbors.into_iter().collect()) } + StacksMessageType::StackerDBPushChunk(ref data) => { + Ok(self.sample_broadcast_peers(&relay_hints, data)?) + } StacksMessageType::Transaction(ref data) => { self.sample_broadcast_peers(&relay_hints, data) } @@ -4289,6 +4292,7 @@ impl PeerNetwork { let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { + debug!("Refresh reward cycle info for cycle {}", rc); let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc); let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? @@ -4313,6 +4317,7 @@ impl PeerNetwork { } } + debug!("Load reward cycle info for cycle {}", rc); let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( rc, &tip_sn.sortition_id, From 565c806e02ddcc619b93e786708cd1f368f443f2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:49:40 -0400 Subject: [PATCH 0830/1400] fix: push stackerdb chunks that we stored but did not have, and push nakamoto blocks if told to (don't look at the chainstate, since the caller stores the block before relaying it). This latter change fixes nakamoto block push --- stackslib/src/net/relay.rs | 162 ++++++++++++++++++++++++------------- 1 file changed, 106 insertions(+), 56 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 0496909973..5f200f3f9d 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -29,8 +29,8 @@ use stacks_common::address::public_keys_to_address_hash; use stacks_common::codec::MAX_PAYLOAD_LEN; use stacks_common::types::chainstate::{BurnchainHeaderHash, PoxId, SortitionId, StacksBlockId}; use stacks_common::types::{MempoolCollectionBehavior, StacksEpochId}; -use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use crate::burnchains::{Burnchain, BurnchainView}; use crate::chainstate::burn::db::sortdb::{ @@ -77,6 +77,10 @@ pub struct Relayer { connection_opts: ConnectionOptions, /// StackerDB connection stacker_dbs: StackerDBs, + /// Recently-sent Nakamoto blocks, so we don't keep re-sending them. + /// Maps to tenure ID and timestamp, so we can garbage-collect. + /// Timestamp is in milliseconds + recently_sent_nakamoto_blocks: HashMap, } #[derive(Debug)] @@ -199,6 +203,20 @@ impl RelayPayload for StacksTransaction { } } +impl RelayPayload for StackerDBPushChunkData { + fn get_digest(&self) -> Sha512Trunc256Sum { + self.chunk_data.data_hash() + } + fn get_id(&self) -> String { + format!( + "StackerDBPushChunk(id={},ver={},data_hash={})", + &self.chunk_data.slot_id, + self.chunk_data.slot_version, + &self.chunk_data.data_hash() + ) + } +} + impl RelayerStats { pub fn new() -> RelayerStats { RelayerStats { @@ -509,6 +527,7 @@ impl Relayer { p2p: handle, connection_opts, stacker_dbs, + recently_sent_nakamoto_blocks: HashMap::new(), } } @@ -717,7 +736,7 @@ impl Relayer { block: &StacksBlock, download_time: u64, ) -> Result { - debug!( + info!( "Handle incoming block {}/{}", consensus_hash, &block.block_hash() @@ -838,7 +857,7 @@ impl Relayer { obtained_method: NakamotoBlockObtainMethod, force_broadcast: bool, ) -> Result { - debug!( + info!( "Handle incoming Nakamoto block {}/{} obtained via {}", &block.header.consensus_hash, &block.header.block_hash(), @@ -1010,7 +1029,7 @@ impl Relayer { let mut sort_handle = sortdb.index_handle(&tip.sortition_id); for block in blocks { let block_id = block.block_id(); - if let Err(e) = Self::process_new_nakamoto_block( + let accept = match Self::process_new_nakamoto_block( burnchain, sortdb, &mut sort_handle, @@ -1020,8 +1039,13 @@ impl Relayer { coord_comms, NakamotoBlockObtainMethod::Downloaded, ) { - warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); - } else { + Ok(x) => x, + Err(e) => { + warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); + continue; + } + }; + if accept { accepted.push(block); } } @@ -2314,8 +2338,11 @@ impl Relayer { } /// Process HTTP-uploaded stackerdb chunks. - /// They're already stored by the RPC handler, so just forward events for them. + /// They're already stored by the RPC handler, so all we have to do + /// is forward events for them and rebroadcast them (i.e. the fact that we stored them and got + /// this far at all means that they were novel, and thus potentially novel to our neighbors). pub fn process_uploaded_stackerdb_chunks( + &mut self, uploaded_chunks: Vec, event_observer: Option<&dyn StackerDBEventDispatcher>, ) { @@ -2325,9 +2352,13 @@ impl Relayer { for chunk in uploaded_chunks.into_iter() { debug!("Got uploaded StackerDB chunk"; "stackerdb_contract_id" => &format!("{}", &chunk.contract_id), "slot_id" => chunk.chunk_data.slot_id, "slot_version" => chunk.chunk_data.slot_version); if let Some(events) = all_events.get_mut(&chunk.contract_id) { - events.push(chunk.chunk_data); + events.push(chunk.chunk_data.clone()); } else { - all_events.insert(chunk.contract_id.clone(), vec![chunk.chunk_data]); + all_events.insert(chunk.contract_id.clone(), vec![chunk.chunk_data.clone()]); + } + let msg = StacksMessageType::StackerDBPushChunk(chunk); + if let Err(e) = self.p2p.broadcast_message(vec![], msg) { + warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); } } for (contract_id, new_chunks) in all_events.into_iter() { @@ -2337,8 +2368,11 @@ impl Relayer { } /// Process newly-arrived chunks obtained from a peer stackerdb replica. + /// Chunks that we store will be broadcast, since successful storage implies that they were new + /// to us (and thus might be new to our neighbors) pub fn process_stacker_db_chunks( - stackerdbs: &mut StackerDBs, + &mut self, + rc_consensus_hash: &ConsensusHash, stackerdb_configs: &HashMap, sync_results: Vec, event_observer: Option<&dyn StackerDBEventDispatcher>, @@ -2347,11 +2381,10 @@ impl Relayer { let mut sync_results_map: HashMap> = HashMap::new(); for sync_result in sync_results.into_iter() { - let sc = sync_result.contract_id.clone(); - if let Some(result_list) = sync_results_map.get_mut(&sc) { + if let Some(result_list) = sync_results_map.get_mut(&sync_result.contract_id) { result_list.push(sync_result); } else { - sync_results_map.insert(sc, vec![sync_result]); + sync_results_map.insert(sync_result.contract_id.clone(), vec![sync_result]); } } @@ -2360,7 +2393,7 @@ impl Relayer { for (sc, sync_results) in sync_results_map.into_iter() { if let Some(config) = stackerdb_configs.get(&sc) { - let tx = stackerdbs.tx_begin(config.clone())?; + let tx = self.stacker_dbs.tx_begin(config.clone())?; for sync_result in sync_results.into_iter() { for chunk in sync_result.chunks_to_store.into_iter() { let md = chunk.get_slot_metadata(); @@ -2373,14 +2406,23 @@ impl Relayer { "num_bytes" => chunk.data.len(), "error" => %e ); + continue; } else { debug!("Stored chunk"; "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), "slot_id" => md.slot_id, "slot_version" => md.slot_version); } if let Some(event_list) = all_events.get_mut(&sync_result.contract_id) { - event_list.push(chunk); + event_list.push(chunk.clone()); } else { - all_events.insert(sync_result.contract_id.clone(), vec![chunk]); + all_events.insert(sync_result.contract_id.clone(), vec![chunk.clone()]); + } + let msg = StacksMessageType::StackerDBPushChunk(StackerDBPushChunkData { + contract_id: sc.clone(), + rc_consensus_hash: rc_consensus_hash.clone(), + chunk_data: chunk, + }); + if let Err(e) = self.p2p.broadcast_message(vec![], msg) { + warn!("Failed to broadcast StackerDB chunk: {:?}", &e); } } } @@ -2401,27 +2443,24 @@ impl Relayer { /// Process StackerDB chunks pushed to us. /// extract all StackerDBPushChunk messages from `unhandled_messages` pub fn process_pushed_stacker_db_chunks( - stackerdbs: &mut StackerDBs, + &mut self, + rc_consensus_hash: &ConsensusHash, stackerdb_configs: &HashMap, - unhandled_messages: &mut HashMap>, + stackerdb_chunks: Vec, event_observer: Option<&dyn StackerDBEventDispatcher>, ) -> Result<(), Error> { // synthesize StackerDBSyncResults from each chunk - let mut sync_results = vec![]; - for (_nk, msgs) in unhandled_messages.iter_mut() { - msgs.retain(|msg| { - if let StacksMessageType::StackerDBPushChunk(data) = &msg.payload { - let sync_result = StackerDBSyncResult::from_pushed_chunk(data.clone()); - sync_results.push(sync_result); - false - } else { - true - } - }); - } + let sync_results = stackerdb_chunks + .into_iter() + .map(|chunk_data| { + debug!("Received pushed StackerDB chunk {:?}", &chunk_data); + let sync_result = StackerDBSyncResult::from_pushed_chunk(chunk_data); + sync_result + }) + .collect(); - Relayer::process_stacker_db_chunks( - stackerdbs, + self.process_stacker_db_chunks( + rc_consensus_hash, stackerdb_configs, sync_results, event_observer, @@ -2581,11 +2620,8 @@ impl Relayer { &mut self, _local_peer: &LocalPeer, sortdb: &SortitionDB, - chainstate: &StacksChainState, accepted_blocks: Vec, - force_send: bool, ) { - // TODO: we don't relay HTTP-uploaded blocks :( debug!( "{:?}: relay {} sets of Nakamoto blocks", _local_peer, @@ -2612,8 +2648,11 @@ impl Relayer { for blocks_and_relayers in accepted_blocks.into_iter() { let AcceptedNakamotoBlocks { relayers, blocks } = blocks_and_relayers; + if blocks.len() == 0 { + continue; + } - let relay_blocks: Vec<_> = blocks + let relay_blocks_set: HashMap<_, _> = blocks .into_iter() .filter(|blk| { // don't relay blocks for non-recent tenures @@ -2625,21 +2664,24 @@ impl Relayer { ); return false; } - // don't relay blocks we already have. - // If we have a DB error in figuring this out, then don't relay by - // default (lest a faulty DB cause the node to spam the network). - if !force_send - && chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&blk.block_id()) - .unwrap_or(true) + // don't relay blocks we've recently sent + if let Some((_ch, ts)) = self.recently_sent_nakamoto_blocks.get(&blk.block_id()) { - return false; + if ts + self.connection_opts.nakamoto_push_interval_ms + >= get_epoch_time_ms() + { + // too soon + test_debug!("Sent {} too recently; will not relay", &blk.block_id()); + return false; + } } true }) + .map(|blk| (blk.block_id(), blk)) .collect(); + let relay_blocks: Vec<_> = relay_blocks_set.into_values().collect(); + debug!( "{:?}: Forward {} Nakamoto blocks from {:?}", _local_peer, @@ -2651,12 +2693,16 @@ impl Relayer { continue; } - for _block in relay_blocks.iter() { - test_debug!( + for block in relay_blocks.iter() { + debug!( "{:?}: Forward Nakamoto block {}/{}", _local_peer, - &_block.header.consensus_hash, - &_block.header.block_hash() + &block.header.consensus_hash, + &block.header.block_hash() + ); + self.recently_sent_nakamoto_blocks.insert( + block.block_id(), + (block.header.consensus_hash.clone(), get_epoch_time_ms()), ); } @@ -2667,6 +2713,10 @@ impl Relayer { warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); } } + + // garbage-collect + self.recently_sent_nakamoto_blocks + .retain(|_blk_id, (ch, _ts)| relay_tenures.contains(ch)); } #[cfg_attr(test, mutants::skip)] @@ -2713,7 +2763,7 @@ impl Relayer { // relay if not IBD if !ibd && accepted_blocks.len() > 0 { - self.relay_epoch3_blocks(local_peer, sortdb, chainstate, accepted_blocks, false); + self.relay_epoch3_blocks(local_peer, sortdb, accepted_blocks); } num_new_nakamoto_blocks } @@ -2836,24 +2886,24 @@ impl Relayer { }; // push events for HTTP-uploaded stacker DB chunks - Relayer::process_uploaded_stackerdb_chunks( + self.process_uploaded_stackerdb_chunks( mem::replace(&mut network_result.uploaded_stackerdb_chunks, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), ); // store downloaded stacker DB chunks - Relayer::process_stacker_db_chunks( - &mut self.stacker_dbs, + self.process_stacker_db_chunks( + &network_result.rc_consensus_hash, &network_result.stacker_db_configs, mem::replace(&mut network_result.stacker_db_sync_results, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), )?; // store pushed stacker DB chunks - Relayer::process_pushed_stacker_db_chunks( - &mut self.stacker_dbs, + self.process_pushed_stacker_db_chunks( + &network_result.rc_consensus_hash, &network_result.stacker_db_configs, - &mut network_result.unhandled_messages, + mem::replace(&mut network_result.pushed_stackerdb_chunks, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), )?; From d5e5f8c4055558db12f169f8a4e8b32f4531c953 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:50:23 -0400 Subject: [PATCH 0831/1400] chore: API sync --- stackslib/src/net/tests/download/nakamoto.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 57bd557186..4110cae7ff 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -278,13 +278,7 @@ fn test_nakamoto_tenure_downloader() { next_tenure_start_block.header.parent_block_id.clone() ) ); - assert_eq!( - td.tenure_end_header, - Some(( - next_tenure_start_block.header.clone(), - next_tenure_change_payload.clone() - )) - ); + assert_eq!(td.tenure_end_block, Some(next_tenure_start_block.clone())); assert_eq!(td.tenure_length(), Some(11)); let mut td_one_shot = td.clone(); @@ -314,14 +308,17 @@ fn test_nakamoto_tenure_downloader() { let res = td.try_accept_tenure_blocks(vec![tenure_start_block.clone()]); assert!(res.is_ok()); let res_blocks = res.unwrap().unwrap(); - assert_eq!(res_blocks.len(), blocks.len()); - assert_eq!(res_blocks, blocks); + assert_eq!(res_blocks.len(), blocks.len() + 1); // includes tenure-end block + + let mut all_blocks = blocks.clone(); + all_blocks.push(next_tenure_start_block.clone()); + assert_eq!(res_blocks, all_blocks); assert_eq!(td.state, NakamotoTenureDownloadState::Done); // also works if we give blocks in one shot let res = td_one_shot.try_accept_tenure_blocks(blocks.clone().into_iter().rev().collect()); assert!(res.is_ok()); - assert_eq!(res.unwrap().unwrap(), blocks); + assert_eq!(res.unwrap().unwrap(), all_blocks); assert_eq!(td_one_shot.state, NakamotoTenureDownloadState::Done); // TODO: From 5a9cd84ef04af356858e87a98dfbd0bd3e4b2b9e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:50:32 -0400 Subject: [PATCH 0832/1400] chore: API sync --- stackslib/src/net/tests/relay/nakamoto.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 4df3171474..3ab91c14c2 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -302,12 +302,10 @@ impl SeedNode { peer.relayer.relay_epoch3_blocks( &local_peer, &sortdb, - &stacks_node.chainstate, vec![AcceptedNakamotoBlocks { relayers: vec![], blocks: blocks.clone(), }], - true, ); peer.sortdb = Some(sortdb); From a6b350442ccfef6f0be4b83698ef1e9b565453cc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 00:50:41 -0400 Subject: [PATCH 0833/1400] fix: honor poll_time_secs in config.burnchain --- testnet/stacks-node/src/run_loop/nakamoto.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 44a6c0fba9..a43854cc50 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -33,6 +33,7 @@ use stacks::core::StacksEpochId; use stacks::net::atlas::{AtlasConfig, AtlasDB, Attachment}; use stacks_common::types::PublicKey; use stacks_common::util::hash::Hash160; +use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stx_genesis::GenesisData; use crate::burnchains::make_bitcoin_indexer; @@ -521,6 +522,7 @@ impl RunLoop { ); let mut last_tenure_sortition_height = 0; + let mut poll_deadline = 0; loop { if !globals.keep_running() { @@ -580,6 +582,12 @@ impl RunLoop { break; } + if poll_deadline > get_epoch_time_secs() { + sleep_ms(1_000); + continue; + } + poll_deadline = get_epoch_time_secs() + self.config().burnchain.poll_time_secs; + let (next_burnchain_tip, tip_burnchain_height) = match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, From 59fd0fd9ad718883aeff34b38219a63e0cde62f8 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 20 Aug 2024 10:21:48 -0400 Subject: [PATCH 0834/1400] fix: Revert change to `BurnchainHeaderHash` serialization --- stacks-common/src/types/chainstate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index c5208d02f9..47d6c3c499 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -30,11 +30,11 @@ impl_byte_array_serde!(TrieHash); pub const TRIEHASH_ENCODED_SIZE: usize = 32; +#[derive(Serialize, Deserialize)] pub struct BurnchainHeaderHash(pub [u8; 32]); impl_array_newtype!(BurnchainHeaderHash, u8, 32); impl_array_hexstring_fmt!(BurnchainHeaderHash); impl_byte_array_newtype!(BurnchainHeaderHash, u8, 32); -impl_byte_array_serde!(BurnchainHeaderHash); pub struct BlockHeaderHash(pub [u8; 32]); impl_array_newtype!(BlockHeaderHash, u8, 32); From 11de2a31ed901f75ba7979d1aa9bcb17cc5a5600 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 11:39:31 -0400 Subject: [PATCH 0835/1400] Fix signer_set_rollover. Use the old signer set when at a reward cycle boundary Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2a9c81c4dd..fcf88f8a68 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -293,6 +293,8 @@ impl SignerTest { fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); + let old_reward_cycle = self.get_current_reward_cycle(); + self.mine_nakamoto_block(timeout); // Verify that the signers accepted the proposed block, sending back a validate ok response @@ -311,7 +313,16 @@ impl SignerTest { // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - let reward_cycle = self.get_current_reward_cycle(); + let new_reward_cycle = self.get_current_reward_cycle(); + let reward_cycle = if new_reward_cycle != old_reward_cycle { + old_reward_cycle + } else { + new_reward_cycle + }; + info!( + "Verifying signatures against signers for reward cycle {:?}", + reward_cycle + ); let signers = self.get_reward_set_signers(reward_cycle); // Verify that the signers signed the proposed block @@ -2784,8 +2795,7 @@ fn signer_set_rollover() { .running_nodes .btc_regtest_controller .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle) - .saturating_add(1); + .reward_cycle_to_block_height(next_reward_cycle); info!("---- Mining to next reward set calculation -----"); signer_test.run_until_burnchain_height_nakamoto( From 51e46faef150987cc298b730fc836aa8e67e018c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 12:01:20 -0400 Subject: [PATCH 0836/1400] Simplify logic to ensure at reward cycle boundaries, the old reward cycle is used Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fcf88f8a68..fbea6e8f1e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -293,7 +293,7 @@ impl SignerTest { fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); - let old_reward_cycle = self.get_current_reward_cycle(); + let reward_cycle = self.get_current_reward_cycle(); self.mine_nakamoto_block(timeout); @@ -312,13 +312,6 @@ impl SignerTest { // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - - let new_reward_cycle = self.get_current_reward_cycle(); - let reward_cycle = if new_reward_cycle != old_reward_cycle { - old_reward_cycle - } else { - new_reward_cycle - }; info!( "Verifying signatures against signers for reward cycle {:?}", reward_cycle From 24c425f5922b0c144d4a5cf38aaf166384151026 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 13:34:31 -0400 Subject: [PATCH 0837/1400] Add a config option and wait a minimum number of seconds between mining blocks to prevent signers rejecting a block with same timestamp as its parent Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 12 ++++++++ .../stacks-node/src/nakamoto_node/miner.rs | 28 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 30a5990319..2e0d8e963c 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,6 +86,7 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; +const DEFAULT_MINIMUM_GAP_SECS: u64 = 1; #[derive(Clone, Deserialize, Default, Debug)] pub struct ConfigFile { @@ -2358,6 +2359,9 @@ pub struct MinerConfig { pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, + /// The minimum gap to wait between blocks in seconds. The value must be greater than or equal to 1 second because if a block is mined + /// within the same second as its parent, it will be rejected by the signers. + pub min_block_time_gap_secs: u64, } impl Default for MinerConfig { @@ -2389,6 +2393,7 @@ impl Default for MinerConfig { // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set + min_block_time_gap_secs: DEFAULT_MINIMUM_GAP_SECS, } } } @@ -2739,6 +2744,7 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, + pub min_block_time_gap_secs: Option, } impl MinerConfigFile { @@ -2850,6 +2856,12 @@ impl MinerConfigFile { pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set + min_block_time_gap_secs: self.min_block_time_gap_secs.map(|secs| if secs < DEFAULT_MINIMUM_GAP_SECS { + warn!("miner.min_block_time_gap_secs is less than the minimum allowed value of {DEFAULT_MINIMUM_GAP_SECS} secs. Using the default value instead."); + DEFAULT_MINIMUM_GAP_SECS + } else { + secs + }).unwrap_or(miner_default_config.min_block_time_gap_secs), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 8036389d53..775aa46672 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,6 +45,7 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -317,6 +318,8 @@ impl BlockMinerThread { } } } + self.wait_min_time_between_blocks()?; + match self.mine_block(&stackerdbs) { Ok(x) => break Some(x), Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { @@ -1037,6 +1040,31 @@ impl BlockMinerThread { Some(vrf_proof) } + /// Wait the minimum time between blocks before mining a new block (if necessary) + /// This is to ensure that the signers do not reject the block due to the block being mined within the same second as the parent block. + fn wait_min_time_between_blocks(&self) -> Result<(), NakamotoNodeError> { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = + SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + let parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; + let time_since_parent_secs = get_epoch_time_secs() + .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp); + if time_since_parent_secs < self.config.miner.min_block_time_gap_secs { + let wait_secs = self + .config + .miner + .min_block_time_gap_secs + .saturating_sub(time_since_parent_secs); + info!("Waiting {wait_secs} seconds before mining a new block."); + std::thread::sleep(Duration::from_secs(wait_secs)); + } + Ok(()) + } + // TODO: add tests from mutation testing results #4869 #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a From 71deeab452986eb80edf4d9a344d207725febe03 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 14:11:40 -0400 Subject: [PATCH 0838/1400] CRC: use ms precision for min_block_time config option Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/config.rs | 20 +++++++++---------- .../stacks-node/src/nakamoto_node/miner.rs | 19 +++++++++--------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 2e0d8e963c..3f4e804f26 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,7 +86,7 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; -const DEFAULT_MINIMUM_GAP_SECS: u64 = 1; +const DEFAULT_MINIMUM_GAP_MS: u64 = 1000; #[derive(Clone, Deserialize, Default, Debug)] pub struct ConfigFile { @@ -2359,9 +2359,9 @@ pub struct MinerConfig { pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, - /// The minimum gap to wait between blocks in seconds. The value must be greater than or equal to 1 second because if a block is mined + /// The minimum gap to wait between blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. - pub min_block_time_gap_secs: u64, + pub min_block_time_gap_ms: u64, } impl Default for MinerConfig { @@ -2393,7 +2393,7 @@ impl Default for MinerConfig { // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set - min_block_time_gap_secs: DEFAULT_MINIMUM_GAP_SECS, + min_block_time_gap_ms: DEFAULT_MINIMUM_GAP_MS, } } } @@ -2744,7 +2744,7 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, - pub min_block_time_gap_secs: Option, + pub min_block_time_gap_ms: Option, } impl MinerConfigFile { @@ -2856,12 +2856,12 @@ impl MinerConfigFile { pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set - min_block_time_gap_secs: self.min_block_time_gap_secs.map(|secs| if secs < DEFAULT_MINIMUM_GAP_SECS { - warn!("miner.min_block_time_gap_secs is less than the minimum allowed value of {DEFAULT_MINIMUM_GAP_SECS} secs. Using the default value instead."); - DEFAULT_MINIMUM_GAP_SECS + min_block_time_gap_ms: self.min_block_time_gap_ms.map(|ms| if ms < DEFAULT_MINIMUM_GAP_MS { + warn!("miner.min_block_time_gap_ms is less than the minimum allowed value of {DEFAULT_MINIMUM_GAP_MS} ms. Using the default value instead."); + DEFAULT_MINIMUM_GAP_MS } else { - secs - }).unwrap_or(miner_default_config.min_block_time_gap_secs), + ms + }).unwrap_or(miner_default_config.min_block_time_gap_ms), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 775aa46672..9ed3412a1a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,8 +45,8 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; -use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; +use stacks::util::{get_epoch_time_secs, sleep_ms}; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -1051,16 +1051,17 @@ impl BlockMinerThread { let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); let parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; - let time_since_parent_secs = get_epoch_time_secs() - .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp); - if time_since_parent_secs < self.config.miner.min_block_time_gap_secs { - let wait_secs = self + let time_since_parent_ms = get_epoch_time_secs() + .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp) + / 1000; + if time_since_parent_ms < self.config.miner.min_block_time_gap_ms { + let wait_ms = self .config .miner - .min_block_time_gap_secs - .saturating_sub(time_since_parent_secs); - info!("Waiting {wait_secs} seconds before mining a new block."); - std::thread::sleep(Duration::from_secs(wait_secs)); + .min_block_time_gap_ms + .saturating_sub(time_since_parent_ms); + info!("Waiting {wait_ms} ms before mining a new block."); + sleep_ms(wait_ms); } Ok(()) } From 4d3236fe08ec35d93b5c4184d47931f6c3a6bd52 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 20 Aug 2024 16:58:15 -0400 Subject: [PATCH 0839/1400] Add a test to check if min_time_between_blocks_ms config option works Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/config.rs | 18 +-- .../stacks-node/src/nakamoto_node/miner.rs | 10 +- testnet/stacks-node/src/tests/signer/v0.rs | 117 +++++++++++++++++- 4 files changed, 133 insertions(+), 13 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 901b9fc040..27e76a646d 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -98,6 +98,7 @@ jobs: - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in - tests::signer::v0::signers_broadcast_signed_blocks + - tests::signer::v0::min_gap_between_blocks - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 3f4e804f26..d1b115d9cf 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,7 +86,7 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; -const DEFAULT_MINIMUM_GAP_MS: u64 = 1000; +const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; #[derive(Clone, Deserialize, Default, Debug)] pub struct ConfigFile { @@ -2359,9 +2359,9 @@ pub struct MinerConfig { pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, - /// The minimum gap to wait between blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined + /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. - pub min_block_time_gap_ms: u64, + pub min_time_between_blocks_ms: u64, } impl Default for MinerConfig { @@ -2393,7 +2393,7 @@ impl Default for MinerConfig { // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set - min_block_time_gap_ms: DEFAULT_MINIMUM_GAP_MS, + min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, } } } @@ -2744,7 +2744,7 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, - pub min_block_time_gap_ms: Option, + pub min_time_between_blocks_ms: Option, } impl MinerConfigFile { @@ -2856,12 +2856,12 @@ impl MinerConfigFile { pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set - min_block_time_gap_ms: self.min_block_time_gap_ms.map(|ms| if ms < DEFAULT_MINIMUM_GAP_MS { - warn!("miner.min_block_time_gap_ms is less than the minimum allowed value of {DEFAULT_MINIMUM_GAP_MS} ms. Using the default value instead."); - DEFAULT_MINIMUM_GAP_MS + min_time_between_blocks_ms: self.min_time_between_blocks_ms.map(|ms| if ms < DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS { + warn!("miner.min_time_between_blocks_ms is less than the minimum allowed value of {DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS} ms. Using the default value instead."); + DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS } else { ms - }).unwrap_or(miner_default_config.min_block_time_gap_ms), + }).unwrap_or(miner_default_config.min_time_between_blocks_ms), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 9ed3412a1a..dc57ca16de 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1054,13 +1054,17 @@ impl BlockMinerThread { let time_since_parent_ms = get_epoch_time_secs() .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp) / 1000; - if time_since_parent_ms < self.config.miner.min_block_time_gap_ms { + if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { let wait_ms = self .config .miner - .min_block_time_gap_ms + .min_time_between_blocks_ms .saturating_sub(time_since_parent_ms); - info!("Waiting {wait_ms} ms before mining a new block."); + info!("Parent block mined {} ms ago, waiting {} ms before mining a new block", time_since_parent_ms, wait_ms; + "parent_block_id" => %parent_block_info.stacks_parent_header.index_block_hash(), + "parent_block_height" => parent_block_info.stacks_parent_header.stacks_block_height, + "parent_block_timestamp" => parent_block_info.stacks_parent_header.burn_header_timestamp, + ); sleep_ms(wait_ms); } Ok(()) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fbea6e8f1e..049dcad379 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2788,7 +2788,8 @@ fn signer_set_rollover() { .running_nodes .btc_regtest_controller .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle); + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_add(1); info!("---- Mining to next reward set calculation -----"); signer_test.run_until_burnchain_height_nakamoto( @@ -2847,3 +2848,117 @@ fn signer_set_rollover() { assert!(signer.stop().is_none()); } } + +#[test] +#[ignore] +/// This test checks that the signers will broadcast a block once they receive enough signatures. +fn min_gap_between_blocks() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let time_between_blocks_ms = 10_000; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |_config| {}, + |config| { + config.miner.min_time_between_blocks_ms = time_between_blocks_ms; + }, + &[], + ); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + + // submit a tx so that the miner will mine a block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal. Ensure it does not arrive before the gap is exceeded"); + let start_time = Instant::now(); + while start_time.elapsed().as_millis() < (time_between_blocks_ms - 1000).into() { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + assert_eq!( + blocks_proposed, proposals_before, + "Block proposed before gap was exceeded" + ); + std::thread::sleep(Duration::from_millis(100)); + } + + let start_time = Instant::now(); + loop { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + if blocks_proposed > proposals_before { + break; + } + assert!( + start_time.elapsed().as_secs() < 30, + "Block not proposed after gap was exceeded within timeout" + ); + std::thread::sleep(Duration::from_millis(100)); + } + + debug!("Ensure that the block is mined after the gap is exceeded"); + + let start = Instant::now(); + let duration = 30; + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + let info = get_chain_info(&signer_test.running_nodes.conf); + if blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height { + break; + } + + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info_before.stacks_tip_height, info.stacks_tip_height + ); + + std::thread::sleep(Duration::from_millis(100)); + assert!( + start.elapsed() < Duration::from_secs(duration), + "Block not mined within timeout" + ); + } + + signer_test.shutdown(); +} From 79005738bd018dc191116dc24c13e62c554b4798 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 20 Aug 2024 17:45:24 -0400 Subject: [PATCH 0840/1400] ci: Fix `mock_miner_replay()` --- testnet/stacks-node/src/tests/neon_integrations.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index b876257bf2..0905fb1f60 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12784,7 +12784,6 @@ fn mock_miner_replay() { // ---------- Test finished, clean up ---------- - btcd_controller.stop_bitcoind().unwrap(); miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } From 869b2e020cb6d697cb392de83dc1508064d5e7f5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 20 Aug 2024 23:43:55 -0400 Subject: [PATCH 0841/1400] fix: when building a Bitcoin transaction that consumes multiple UTXOs, the partially-signed transaction needs to have all of the inputs present before we can begin signing them (since the sighash commits to the number of inputs). Fix this and add a unit test. --- .../burnchains/bitcoin_regtest_controller.rs | 197 +++++++++++++++++- testnet/stacks-node/src/keychain.rs | 1 - 2 files changed, 191 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 39ef40490b..8977cd2923 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1373,14 +1373,36 @@ impl BitcoinRegtestController { previous_fees: Option, previous_txids: &Vec, ) -> Option { - let mut estimated_fees = match previous_fees { + let _ = self.sortdb_mut(); + let burn_chain_tip = self.burnchain_db.as_ref()?.get_canonical_chain_tip().ok()?; + let estimated_fees = match previous_fees { Some(fees) => fees.fees_from_previous_tx(&payload, &self.config), None => LeaderBlockCommitFees::estimated_fees_from_payload(&payload, &self.config), }; - let _ = self.sortdb_mut(); - let burn_chain_tip = self.burnchain_db.as_ref()?.get_canonical_chain_tip().ok()?; + self.send_block_commit_operation_at_burnchain_height( + epoch_id, + payload, + signer, + utxos_to_include, + utxos_to_exclude, + estimated_fees, + previous_txids, + burn_chain_tip.block_height, + ) + } + fn send_block_commit_operation_at_burnchain_height( + &mut self, + epoch_id: StacksEpochId, + payload: LeaderBlockCommitOp, + signer: &mut BurnchainOpSigner, + utxos_to_include: Option, + utxos_to_exclude: Option, + mut estimated_fees: LeaderBlockCommitFees, + previous_txids: &Vec, + burnchain_block_height: u64, + ) -> Option { let public_key = signer.get_public_key(); let (mut tx, mut utxos) = self.prepare_tx( epoch_id, @@ -1388,7 +1410,7 @@ impl BitcoinRegtestController { estimated_fees.estimated_amount_required(), utxos_to_include, utxos_to_exclude, - burn_chain_tip.block_height, + burnchain_block_height, )?; // Serialize the payload @@ -1817,7 +1839,7 @@ impl BitcoinRegtestController { debug!("Not enough change to clear dust limit. Not adding change address."); } - for (i, utxo) in utxos_set.utxos.iter().enumerate() { + for (_i, utxo) in utxos_set.utxos.iter().enumerate() { let input = TxIn { previous_output: OutPoint { txid: utxo.txid, @@ -1828,7 +1850,8 @@ impl BitcoinRegtestController { witness: vec![], }; tx.input.push(input); - + } + for (i, utxo) in utxos_set.utxos.iter().enumerate() { let script_pub_key = utxo.script_pub_key.clone(); let sig_hash_all = 0x01; @@ -2805,6 +2828,12 @@ mod tests { use std::fs::File; use std::io::Write; + use stacks::burnchains::BurnchainSigner; + use stacks_common::deps_common::bitcoin::blockdata::script::Builder; + use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; + use stacks_common::util::hash::to_hex; + use stacks_common::util::secp256k1::Secp256k1PrivateKey; + use super::*; use crate::config::DEFAULT_SATS_PER_VB; @@ -2825,4 +2854,160 @@ mod tests { assert_eq!(get_satoshis_per_byte(&config), 51); } + + /// Verify that we can build a valid Bitcoin transaction with multiple UTXOs. + /// Taken from production data. + /// Tests `serialize_tx()` and `send_block_commit_operation_at_burnchain_height()` + #[test] + fn test_multiple_inputs() { + let spend_utxos = vec![ + UTXO { + txid: Sha256dHash::from_hex( + "d3eafb3aba3cec925473550ed2e4d00bcb0d00744bb3212e4a8e72878909daee", + ) + .unwrap(), + vout: 3, + script_pub_key: Builder::from( + hex_bytes("76a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac").unwrap(), + ) + .into_script(), + amount: 42051, + confirmations: 1421, + }, + UTXO { + txid: Sha256dHash::from_hex( + "01132f2d4a98cc715624e033214c8d841098a1ee15b30188ab89589a320b3b24", + ) + .unwrap(), + vout: 0, + script_pub_key: Builder::from( + hex_bytes("76a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac").unwrap(), + ) + .into_script(), + amount: 326456, + confirmations: 1421, + }, + ]; + + // test serialize_tx() + let mut config = Config::default(); + config.burnchain.magic_bytes = "T3".as_bytes().into(); + + let mut btc_controller = BitcoinRegtestController::new(config, None); + let mut utxo_set = UTXOSet { + bhh: BurnchainHeaderHash([0x01; 32]), + utxos: spend_utxos.clone(), + }; + let mut transaction = Transaction { + input: vec![], + output: vec![ + TxOut { + value: 0, + script_pubkey: Builder::from(hex_bytes("6a4c5054335be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a").unwrap()).into_script(), + }, + TxOut { + value: 10000, + script_pubkey: Builder::from(hex_bytes("76a914000000000000000000000000000000000000000088ac").unwrap()).into_script(), + }, + TxOut { + value: 10000, + script_pubkey: Builder::from(hex_bytes("76a914000000000000000000000000000000000000000088ac").unwrap()).into_script(), + }, + ], + version: 1, + lock_time: 0, + }; + + let mut signer = BurnchainOpSigner::new( + Secp256k1PrivateKey::from_hex( + "9e446f6b0c6a96cf2190e54bcd5a8569c3e386f091605499464389b8d4e0bfc201", + ) + .unwrap(), + false, + ); + assert!(btc_controller.serialize_tx( + StacksEpochId::Epoch25, + &mut transaction, + 44950, + &mut utxo_set, + &mut signer, + true + )); + assert_eq!(transaction.output[3].value, 323557); + + // test send_block_commit_operation_at_burn_height() + let utxo_set = UTXOSet { + bhh: BurnchainHeaderHash([0x01; 32]), + utxos: spend_utxos.clone(), + }; + + let commit_op = LeaderBlockCommitOp { + block_header_hash: BlockHeaderHash::from_hex( + "e88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32af", + ) + .unwrap(), + new_seed: VRFSeed::from_hex( + "d5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375", + ) + .unwrap(), + parent_block_ptr: 2211, // 0x000008a3 + parent_vtxindex: 1, // 0x0001 + key_block_ptr: 1432, // 0x00000598 + key_vtxindex: 1, // 0x0001 + memo: vec![11], // 0x5a >> 3 + + burn_fee: 0, + input: (Txid([0x00; 32]), 0), + burn_parent_modulus: 2, // 0x5a & 0b111 + + apparent_sender: BurnchainSigner("mgbpit8FvkVJ9kuXY8QSM5P7eibnhcEMBk".to_string()), + commit_outs: vec![ + PoxAddress::Standard(StacksAddress::burn_address(false), None), + PoxAddress::Standard(StacksAddress::burn_address(false), None), + ], + + treatment: vec![], + sunset_burn: 0, + + txid: Txid([0x00; 32]), + vtxindex: 0, + block_height: 2212, + burn_header_hash: BurnchainHeaderHash([0x01; 32]), + }; + + assert_eq!(to_hex(&commit_op.serialize_to_vec()), "5be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a".to_string()); + + let leader_fees = LeaderBlockCommitFees { + sunset_fee: 0, + fee_rate: 50, + sortition_fee: 20000, + outputs_len: 2, + default_tx_size: 380, + spent_in_attempts: 0, + is_rbf_enabled: false, + final_size: 498, + }; + + assert_eq!(leader_fees.amount_per_output(), 10000); + assert_eq!(leader_fees.total_spent(), 44900); + + let block_commit = btc_controller + .send_block_commit_operation_at_burnchain_height( + StacksEpochId::Epoch30, + commit_op, + &mut signer, + Some(utxo_set), + None, + leader_fees, + &vec![], + 2212, + ) + .unwrap(); + + debug!("send_block_commit_operation:\n{:#?}", &block_commit); + debug!("{}", &SerializedTx::new(block_commit.clone()).to_hex()); + assert_eq!(block_commit.output[3].value, 323507); + + assert_eq!(&SerializedTx::new(block_commit.clone()).to_hex(), "0100000002eeda098987728e4a2e21b34b74000dcb0bd0e4d20e55735492ec3cba3afbead3030000006a4730440220558286e20e10ce31537f0625dae5cc62fac7961b9d2cf272c990de96323d7e2502202255adbea3d2e0509b80c5d8a3a4fe6397a87bcf18da1852740d5267d89a0cb20121035379aa40c02890d253cfa577964116eb5295570ae9f7287cbae5f2585f5b2c7cfdffffff243b0b329a5889ab8801b315eea19810848d4c2133e0245671cc984a2d2f1301000000006a47304402206d9f8de107f9e1eb15aafac66c2bb34331a7523260b30e18779257e367048d34022013c7dabb32a5c281aa00d405e2ccbd00f34f03a65b2336553a4acd6c52c251ef0121035379aa40c02890d253cfa577964116eb5295570ae9f7287cbae5f2585f5b2c7cfdffffff040000000000000000536a4c5054335be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a10270000000000001976a914000000000000000000000000000000000000000088ac10270000000000001976a914000000000000000000000000000000000000000088acb3ef0400000000001976a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac00000000"); + } } diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index c9ed722a9e..b6df8549c4 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -206,7 +206,6 @@ impl Keychain { } /// Create a BurnchainOpSigner representation of this keychain - /// (this is going to be removed in 2.1) pub fn generate_op_signer(&self) -> BurnchainOpSigner { BurnchainOpSigner::new(self.get_secret_key(), false) } From 6ada69da3e6e5c983526856c8512235823b7f496 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 13:32:31 +0300 Subject: [PATCH 0842/1400] remove input mutants dispatch and automatically proceed from the action's context --- .github/workflows/pr-differences-mutants.yml | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index c109b69cfe..ca2bac5081 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -10,14 +10,6 @@ on: paths: - '**.rs' workflow_dispatch: - inputs: - ignore_timeout: - description: "Ignore mutants timeout limit" - required: false - type: choice - options: - - true - default: 'true' concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -38,9 +30,8 @@ jobs: team: 'blockchain-team' GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - - name: Fail if the user does not have the right permissions - if: ${{ inputs.ignore_timeout == 'true' && steps.check_right_permissions.outputs.is_team_member != 'true' }} - run: exit 1 + outputs: + ignore_timeout: ${{ steps.check_right_permissions.outputs.is_team_member == 'true' }} # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: @@ -63,7 +54,7 @@ jobs: - id: check_packages_and_shards uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main with: - ignore_timeout: ${{ inputs.ignore_timeout }} + ignore_timeout: ${{ needs.check-right-permissions.outputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: From 18ed8d0116ce1422f314a8cfb09a48665ca78a47 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 13:35:54 +0300 Subject: [PATCH 0843/1400] add files for testing --- stackslib/src/net/download/nakamoto/mod.rs | 6 + .../nakamoto/tenure_downloader_copy.rs | 693 ++++++++++++++ .../nakamoto/tenure_downloader_opy.rs | 693 ++++++++++++++ .../nakamoto/tenure_downloader_set_copy.rs | 660 +++++++++++++ .../nakamoto/tenure_downloader_set_opy.rs | 660 +++++++++++++ .../tenure_downloader_unconfirmed_copy.rs | 867 ++++++++++++++++++ .../tenure_downloader_unconfirmed_opy.rs | 867 ++++++++++++++++++ 7 files changed, 4446 insertions(+) create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs create mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index dd440ac110..7643c54ff7 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,8 +161,14 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; +mod tenure_downloader_copy; +mod tenure_downloader_opy; mod tenure_downloader_set; +mod tenure_downloader_set_copy; +mod tenure_downloader_set_opy; mod tenure_downloader_unconfirmed; +mod tenure_downloader_unconfirmed_copy; +mod tenure_downloader_unconfirmed_opy; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs new file mode 100644 index 0000000000..f7fb970bb6 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs @@ -0,0 +1,693 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for an historic tenure. This is a tenure for which we know the hashes of the +/// start and end block. This includes all tenures except for the two most recent ones. +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoTenureDownloadState { + /// Getting the tenure-start block (the given StacksBlockId is it's block ID). + GetTenureStartBlock(StacksBlockId), + /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not + /// always) handled by the execution of another NakamotoTenureDownloader. The only + /// exceptions are as follows: + /// + /// * if this tenure contains the anchor block, and it's the last tenure in the + /// reward cycle. In this case, the end-block must be directly fetched, since there will be no + /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. + /// + /// * if this tenure is the highest complete tenure, and we just learned the start-block of the + /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block + /// already known. This step will be skipped because the end-block is already present in the + /// state machine. + /// + /// * if the deadline (second parameter) is exceeded, the state machine transitions to + /// GetTenureEndBlock. + /// + /// The two fields here are: + /// * the block ID of the last block in the tenure (which happens to be the block ID of the + /// start block of the next tenure) + /// * the deadline by which this state machine needs to have obtained the tenure end-block + /// before transitioning to `GetTenureEndBlock`. + WaitForTenureEndBlock(StacksBlockId, Instant), + /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks + /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in + /// which we cannot quickly get the tenure-end block. + /// + /// The field here is the block ID of the tenure end block. + GetTenureEndBlock(StacksBlockId), + /// Receiving tenure blocks. + /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This + /// is because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId), + /// We have gotten all the blocks for this tenure + Done, +} + +pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; + +impl fmt::Display for NakamotoTenureDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs +/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent +/// tenures). +/// +/// This state machine works as follows: +/// +/// 1. Fetch the first block in the given tenure +/// 2. Obtain the last block in the given tenure, via one of the following means: +/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this +/// machine's tenure, and can be copied into this machine. +/// b. This machine is configured to directly fetch the end-block. This only happens if this +/// tenure both contains the anchor block for the next reward cycle and happens to be the last +/// tenure in the current reward cycle. +/// c. This machine is given the end-block on instantiation. This only happens when the machine +/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); +/// in this case, the end-block is the start-block of the ongoing tenure. +/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start +/// and end block hashes; their quantity will be validated against the tenure-change transaction +/// in the end-block. +/// +/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto +/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of +/// whether or not it straddles a reward cycle boundary). +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoTenureDownloader { + /// Consensus hash that identifies this tenure + pub tenure_id_consensus_hash: ConsensusHash, + /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and + /// sortition DB. + pub tenure_start_block_id: StacksBlockId, + /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID + /// for some other tenure). Learned from the inventory state machine and sortition DB. + pub tenure_end_block_id: StacksBlockId, + /// Address of who we're asking for blocks + pub naddr: NeighborAddress, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, + /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with + /// this state machine. + pub idle: bool, + + /// What state we're in for downloading this tenure + pub state: NakamotoTenureDownloadState, + /// Tenure-start block + pub tenure_start_block: Option, + /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once + /// the start-block for the current tenure is downloaded. This is that start-block, which is + /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + pub tenure_end_block: Option, + /// Tenure-end block header and TenureChange + pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, + /// Tenure blocks + pub tenure_blocks: Option>, +} + +impl NakamotoTenureDownloader { + pub fn new( + tenure_id_consensus_hash: ConsensusHash, + tenure_start_block_id: StacksBlockId, + tenure_end_block_id: StacksBlockId, + naddr: NeighborAddress, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, + ) -> Self { + debug!( + "Instantiate downloader to {} for tenure {}: {}-{}", + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + ); + Self { + tenure_id_consensus_hash, + tenure_start_block_id, + tenure_end_block_id, + naddr, + start_signer_keys, + end_signer_keys, + idle: false, + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + tenure_start_block: None, + tenure_end_header: None, + tenure_end_block: None, + tenure_blocks: None, + } + } + + /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed + /// tenure. This supplies the tenure end-block if known in advance. + pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { + self.tenure_end_block = Some(tenure_end_block); + self + } + + /// Is this downloader waiting for the tenure-end block data from some other downloader? Per + /// the struct documentation, this is case 2(a). + pub fn is_waiting(&self) -> bool { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { + return true; + } else { + return false; + } + } + + /// Validate and accept a given tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the start-block is valid. + /// Returns Err(..) if it is not valid. + pub fn try_accept_tenure_start_block( + &mut self, + tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + // not the right state for this + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if self.tenure_start_block_id != tenure_start_block.header.block_id() { + // not the block we were expecting + warn!("Invalid tenure-start block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_start_block" => %self.tenure_start_block_id, + "tenure_start_block ID" => %tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-start block for tenure {} block={}", + &self.tenure_id_consensus_hash, + &tenure_start_block.block_id() + ); + self.tenure_start_block = Some(tenure_start_block); + + if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { + // tenure_end_header supplied externally + self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); + } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + // we already have the tenure-end block, so immediately proceed to accept it. + debug!( + "Preemptively process tenure-end block {} for tenure {}", + tenure_end_block.block_id(), + &self.tenure_id_consensus_hash + ); + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + tenure_end_block.block_id(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + // need to get tenure_end_header. By default, assume that another + // NakamotoTenureDownloader will provide this block, and allow the + // NakamotoTenureDownloaderSet instance that manages a collection of these + // state-machines make the call to require this one to fetch the block directly. + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + self.tenure_end_block_id.clone(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + } + Ok(()) + } + + /// Transition this state-machine from waiting for its tenure-end block from another + /// state-machine to directly fetching it. This only needs to happen if the tenure this state + /// machine is downloading contains the PoX anchor block, and it's also the last confirmed + /// tenurein this reward cycle. + /// + /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and + /// runs a set of these machines based on the peers' inventory vectors. But because we don't + /// know if this is the PoX anchor block tenure (or even the last tenure) until we have + /// inventory vectors for this tenure's reward cycle, this state-transition must be driven + /// after this machine's instantiation. + pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state + else { + return Err(NetError::InvalidState); + }; + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + Ok(()) + } + + /// Transition to fetching the tenure-end block directly if waiting has taken too long. + pub fn transition_to_fetch_end_block_on_timeout(&mut self) { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = + self.state + { + if wait_deadline < Instant::now() { + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + } + } + } + + /// Validate and accept a tenure-end block. If accepted, then advance the state. + /// Once accepted, this function extracts the tenure-change transaction and block header from + /// this block (it does not need the entire block). + /// + /// Returns Ok(()) if the block was valid + /// Returns Err(..) if the block was invalid + pub fn try_accept_tenure_end_block( + &mut self, + tenure_end_block: &NakamotoBlock, + ) -> Result<(), NetError> { + if !matches!( + &self.state, + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) + | NakamotoTenureDownloadState::GetTenureEndBlock(_) + ) { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + warn!("Invalid state -- tenure_start_block is not set"); + return Err(NetError::InvalidState); + }; + + if self.tenure_end_block_id != tenure_end_block.header.block_id() { + // not the block we asked for + warn!("Invalid tenure-end block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_end_block" => %self.tenure_end_block_id, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // extract the needful -- need the tenure-change payload (which proves that the tenure-end + // block is the tenure-start block for the next tenure) and the parent block ID (which is + // the next block to download). + let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-end block: failed to validate tenure-start"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + if !valid { + warn!("Invalid tenure-end block: not a well-formed tenure-start block"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + } + + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + warn!("Invalid tenure-end block: no tenure-change transaction"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + // tc_payload must point to the tenure-start block's header + if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { + warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; + "start_block_id" => %tenure_start_block.block_id(), + "end_block_id" => %tenure_end_block.block_id(), + "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, + "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + &self.tenure_id_consensus_hash, + &tenure_end_block.block_id(), + tc_payload.previous_tenure_blocks + ); + self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.state = NakamotoTenureDownloadState::GetTenureBlocks( + tenure_end_block.header.parent_block_id.clone(), + ); + Ok(()) + } + + /// Determine how many blocks must be in this tenure. + /// Returns None if we don't have the start and end blocks yet. + pub fn tenure_length(&self) -> Option { + self.tenure_end_header + .as_ref() + .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + } + + /// Add downloaded tenure blocks to this machine. + /// If we have collected all tenure blocks, then return them and transition to the Done state. + /// + /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in + /// ascending order by height, and will include the tenure-start block but exclude the + /// tenure-end block. + /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to + /// the next block to fetch (stored in self.state) will be updated. + /// Returns Err(..) if the blocks were invalid. + pub fn try_accept_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest + let mut expected_block_id = block_cursor; + let mut count = 0; + for block in tenure_blocks.iter() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + expected_block_id = &block.header.parent_block_id; + count += 1; + if self + .tenure_blocks + .as_ref() + .map(|blocks| blocks.len()) + .unwrap_or(0) + .saturating_add(count) + > self.tenure_length().unwrap_or(0) as usize + { + // there are more blocks downloaded than indicated by the end-blocks tenure-change + // transaction. + warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); + "tenure_id" => %self.tenure_id_consensus_hash, + "count" => %count, + "tenure_length" => self.tenure_length().unwrap_or(0), + "num_blocks" => tenure_blocks.len()); + return Err(NetError::InvalidMessage); + } + } + + if let Some(blocks) = self.tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.tenure_blocks = Some(tenure_blocks); + } + + // did we reach the tenure start block? + let Some(blocks) = self.tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got None)"); + return Err(NetError::InvalidState); + }; + + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no tenure-start block (infallible)"); + return Err(NetError::InvalidState); + }; + + debug!( + "Accepted tenure blocks for tenure {} cursor={} ({})", + &self.tenure_id_consensus_hash, &block_cursor, count + ); + if earliest_block.block_id() != tenure_start_block.block_id() { + // still have more blocks to download + let next_block_id = earliest_block.header.parent_block_id.clone(); + debug!( + "Need more blocks for tenure {} (went from {} to {}, next is {})", + &self.tenure_id_consensus_hash, + &block_cursor, + &earliest_block.block_id(), + &next_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + return Ok(None); + } + + // finished! + self.state = NakamotoTenureDownloadState::Done; + Ok(self + .tenure_blocks + .take() + .map(|blocks| blocks.into_iter().rev().collect())) + } + + /// Produce the next HTTP request that, when successfully executed, will fetch the data needed + /// to advance this state machine. + /// Not all states require an HTTP request for advanceement. + /// + /// Returns Ok(Some(request)) if a request is needed + /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's + /// state) + /// Returns Err(()) if we're done. + pub fn make_next_download_request( + &self, + peerhost: PeerHost, + ) -> Result, ()> { + let request = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { + debug!("Request tenure-start block {}", &start_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { + // we're waiting for some other downloader's block-fetch to complete + debug!( + "Waiting for tenure-end block {} until {:?}", + &_block_id, _deadline + ); + return Ok(None); + } + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { + debug!("Request tenure-end block {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) + } + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { + debug!("Downloading tenure ending at {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) + } + NakamotoTenureDownloadState::Done => { + // nothing more to do + return Err(()); + } + }; + Ok(Some(request)) + } + + /// Begin the next download request for this state machine. The request will be sent to the + /// data URL corresponding to self.naddr. + /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. + /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) + /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to + /// resolve its data URL to a socket address. + pub fn send_next_download_request( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(true); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let request = match self.make_next_download_request(peerhost) { + Ok(Some(request)) => request, + Ok(None) => { + return Ok(true); + } + Err(_) => { + return Ok(false); + } + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + self.idle = false; + Ok(true) + } + + /// Handle a received StacksHttpResponse and advance the state machine. + /// If we get the full tenure's blocks, then return them. + /// Returns Ok(Some([blocks])) if we successfully complete the state machine. + /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done + /// yet. The caller should now call `send_next_download_request()` + /// Returns Err(..) on failure to process the response. + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + ) -> Result>, NetError> { + let handle_result = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + debug!( + "Got download response for tenure-start block {}", + &_block_id + ); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_start_block(block)?; + Ok(None) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + Err(NetError::InvalidState) + } + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { + debug!("Got download response to tenure-end block {}", &_block_id); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_end_block(&block)?; + Ok(None) + } + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + debug!( + "Got download response for tenure blocks ending at {}", + &_end_block_id + ); + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), + }; + self.idle = true; + handle_result + } + + pub fn is_done(&self) -> bool { + self.state == NakamotoTenureDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs new file mode 100644 index 0000000000..f7fb970bb6 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs @@ -0,0 +1,693 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for an historic tenure. This is a tenure for which we know the hashes of the +/// start and end block. This includes all tenures except for the two most recent ones. +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoTenureDownloadState { + /// Getting the tenure-start block (the given StacksBlockId is it's block ID). + GetTenureStartBlock(StacksBlockId), + /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not + /// always) handled by the execution of another NakamotoTenureDownloader. The only + /// exceptions are as follows: + /// + /// * if this tenure contains the anchor block, and it's the last tenure in the + /// reward cycle. In this case, the end-block must be directly fetched, since there will be no + /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. + /// + /// * if this tenure is the highest complete tenure, and we just learned the start-block of the + /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block + /// already known. This step will be skipped because the end-block is already present in the + /// state machine. + /// + /// * if the deadline (second parameter) is exceeded, the state machine transitions to + /// GetTenureEndBlock. + /// + /// The two fields here are: + /// * the block ID of the last block in the tenure (which happens to be the block ID of the + /// start block of the next tenure) + /// * the deadline by which this state machine needs to have obtained the tenure end-block + /// before transitioning to `GetTenureEndBlock`. + WaitForTenureEndBlock(StacksBlockId, Instant), + /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks + /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in + /// which we cannot quickly get the tenure-end block. + /// + /// The field here is the block ID of the tenure end block. + GetTenureEndBlock(StacksBlockId), + /// Receiving tenure blocks. + /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This + /// is because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId), + /// We have gotten all the blocks for this tenure + Done, +} + +pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; + +impl fmt::Display for NakamotoTenureDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs +/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent +/// tenures). +/// +/// This state machine works as follows: +/// +/// 1. Fetch the first block in the given tenure +/// 2. Obtain the last block in the given tenure, via one of the following means: +/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this +/// machine's tenure, and can be copied into this machine. +/// b. This machine is configured to directly fetch the end-block. This only happens if this +/// tenure both contains the anchor block for the next reward cycle and happens to be the last +/// tenure in the current reward cycle. +/// c. This machine is given the end-block on instantiation. This only happens when the machine +/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); +/// in this case, the end-block is the start-block of the ongoing tenure. +/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse +/// order. As blocks are found, their signer signatures will be validated against the signer +/// public keys for this tenure; their hash-chain continuity will be validated against the start +/// and end block hashes; their quantity will be validated against the tenure-change transaction +/// in the end-block. +/// +/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto +/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of +/// whether or not it straddles a reward cycle boundary). +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoTenureDownloader { + /// Consensus hash that identifies this tenure + pub tenure_id_consensus_hash: ConsensusHash, + /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and + /// sortition DB. + pub tenure_start_block_id: StacksBlockId, + /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID + /// for some other tenure). Learned from the inventory state machine and sortition DB. + pub tenure_end_block_id: StacksBlockId, + /// Address of who we're asking for blocks + pub naddr: NeighborAddress, + /// Signer public keys that signed the start-block of this tenure, in reward cycle order + pub start_signer_keys: RewardSet, + /// Signer public keys that signed the end-block of this tenure + pub end_signer_keys: RewardSet, + /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with + /// this state machine. + pub idle: bool, + + /// What state we're in for downloading this tenure + pub state: NakamotoTenureDownloadState, + /// Tenure-start block + pub tenure_start_block: Option, + /// Pre-stored tenure end block (used by the unconfirmed block downloader). + /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once + /// the start-block for the current tenure is downloaded. This is that start-block, which is + /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + pub tenure_end_block: Option, + /// Tenure-end block header and TenureChange + pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, + /// Tenure blocks + pub tenure_blocks: Option>, +} + +impl NakamotoTenureDownloader { + pub fn new( + tenure_id_consensus_hash: ConsensusHash, + tenure_start_block_id: StacksBlockId, + tenure_end_block_id: StacksBlockId, + naddr: NeighborAddress, + start_signer_keys: RewardSet, + end_signer_keys: RewardSet, + ) -> Self { + debug!( + "Instantiate downloader to {} for tenure {}: {}-{}", + &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + ); + Self { + tenure_id_consensus_hash, + tenure_start_block_id, + tenure_end_block_id, + naddr, + start_signer_keys, + end_signer_keys, + idle: false, + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + tenure_start_block: None, + tenure_end_header: None, + tenure_end_block: None, + tenure_blocks: None, + } + } + + /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed + /// tenure. This supplies the tenure end-block if known in advance. + pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { + self.tenure_end_block = Some(tenure_end_block); + self + } + + /// Is this downloader waiting for the tenure-end block data from some other downloader? Per + /// the struct documentation, this is case 2(a). + pub fn is_waiting(&self) -> bool { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { + return true; + } else { + return false; + } + } + + /// Validate and accept a given tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the start-block is valid. + /// Returns Err(..) if it is not valid. + pub fn try_accept_tenure_start_block( + &mut self, + tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + // not the right state for this + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if self.tenure_start_block_id != tenure_start_block.header.block_id() { + // not the block we were expecting + warn!("Invalid tenure-start block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_start_block" => %self.tenure_start_block_id, + "tenure_start_block ID" => %tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_start_block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + // signature verification failed + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-start block for tenure {} block={}", + &self.tenure_id_consensus_hash, + &tenure_start_block.block_id() + ); + self.tenure_start_block = Some(tenure_start_block); + + if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { + // tenure_end_header supplied externally + self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); + } else if let Some(tenure_end_block) = self.tenure_end_block.take() { + // we already have the tenure-end block, so immediately proceed to accept it. + debug!( + "Preemptively process tenure-end block {} for tenure {}", + tenure_end_block.block_id(), + &self.tenure_id_consensus_hash + ); + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + tenure_end_block.block_id(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + // need to get tenure_end_header. By default, assume that another + // NakamotoTenureDownloader will provide this block, and allow the + // NakamotoTenureDownloaderSet instance that manages a collection of these + // state-machines make the call to require this one to fetch the block directly. + self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( + self.tenure_end_block_id.clone(), + Instant::now() + .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) + .ok_or(NetError::OverflowError("Deadline is too big".into()))?, + ); + } + Ok(()) + } + + /// Transition this state-machine from waiting for its tenure-end block from another + /// state-machine to directly fetching it. This only needs to happen if the tenure this state + /// machine is downloading contains the PoX anchor block, and it's also the last confirmed + /// tenurein this reward cycle. + /// + /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and + /// runs a set of these machines based on the peers' inventory vectors. But because we don't + /// know if this is the PoX anchor block tenure (or even the last tenure) until we have + /// inventory vectors for this tenure's reward cycle, this state-transition must be driven + /// after this machine's instantiation. + pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state + else { + return Err(NetError::InvalidState); + }; + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + Ok(()) + } + + /// Transition to fetching the tenure-end block directly if waiting has taken too long. + pub fn transition_to_fetch_end_block_on_timeout(&mut self) { + if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = + self.state + { + if wait_deadline < Instant::now() { + debug!( + "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", + &self.naddr, &end_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); + } + } + } + + /// Validate and accept a tenure-end block. If accepted, then advance the state. + /// Once accepted, this function extracts the tenure-change transaction and block header from + /// this block (it does not need the entire block). + /// + /// Returns Ok(()) if the block was valid + /// Returns Err(..) if the block was invalid + pub fn try_accept_tenure_end_block( + &mut self, + tenure_end_block: &NakamotoBlock, + ) -> Result<(), NetError> { + if !matches!( + &self.state, + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) + | NakamotoTenureDownloadState::GetTenureEndBlock(_) + ) { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + warn!("Invalid state -- tenure_start_block is not set"); + return Err(NetError::InvalidState); + }; + + if self.tenure_end_block_id != tenure_end_block.header.block_id() { + // not the block we asked for + warn!("Invalid tenure-end block: unexpected"; + "tenure_id" => %self.tenure_id_consensus_hash, + "tenure_id_end_block" => %self.tenure_end_block_id, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = tenure_end_block + .header + .verify_signer_signatures(&self.end_signer_keys) + { + // bad signature + warn!("Invalid tenure-end block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %tenure_end_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // extract the needful -- need the tenure-change payload (which proves that the tenure-end + // block is the tenure-start block for the next tenure) and the parent block ID (which is + // the next block to download). + let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-end block: failed to validate tenure-start"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + if !valid { + warn!("Invalid tenure-end block: not a well-formed tenure-start block"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + } + + let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { + warn!("Invalid tenure-end block: no tenure-change transaction"; + "block_id" => %tenure_end_block.block_id()); + return Err(NetError::InvalidMessage); + }; + + // tc_payload must point to the tenure-start block's header + if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { + warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; + "start_block_id" => %tenure_start_block.block_id(), + "end_block_id" => %tenure_end_block.block_id(), + "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, + "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); + return Err(NetError::InvalidMessage); + } + + debug!( + "Accepted tenure-end header for tenure {} block={}; expect {} blocks", + &self.tenure_id_consensus_hash, + &tenure_end_block.block_id(), + tc_payload.previous_tenure_blocks + ); + self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); + self.state = NakamotoTenureDownloadState::GetTenureBlocks( + tenure_end_block.header.parent_block_id.clone(), + ); + Ok(()) + } + + /// Determine how many blocks must be in this tenure. + /// Returns None if we don't have the start and end blocks yet. + pub fn tenure_length(&self) -> Option { + self.tenure_end_header + .as_ref() + .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) + } + + /// Add downloaded tenure blocks to this machine. + /// If we have collected all tenure blocks, then return them and transition to the Done state. + /// + /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in + /// ascending order by height, and will include the tenure-start block but exclude the + /// tenure-end block. + /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to + /// the next block to fetch (stored in self.state) will be updated. + /// Returns Err(..) if the blocks were invalid. + pub fn try_accept_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest + let mut expected_block_id = block_cursor; + let mut count = 0; + for block in tenure_blocks.iter() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if let Err(e) = block + .header + .verify_signer_signatures(&self.start_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %self.tenure_id_consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + expected_block_id = &block.header.parent_block_id; + count += 1; + if self + .tenure_blocks + .as_ref() + .map(|blocks| blocks.len()) + .unwrap_or(0) + .saturating_add(count) + > self.tenure_length().unwrap_or(0) as usize + { + // there are more blocks downloaded than indicated by the end-blocks tenure-change + // transaction. + warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); + "tenure_id" => %self.tenure_id_consensus_hash, + "count" => %count, + "tenure_length" => self.tenure_length().unwrap_or(0), + "num_blocks" => tenure_blocks.len()); + return Err(NetError::InvalidMessage); + } + } + + if let Some(blocks) = self.tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.tenure_blocks = Some(tenure_blocks); + } + + // did we reach the tenure start block? + let Some(blocks) = self.tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got None)"); + return Err(NetError::InvalidState); + }; + + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no tenure-start block (infallible)"); + return Err(NetError::InvalidState); + }; + + debug!( + "Accepted tenure blocks for tenure {} cursor={} ({})", + &self.tenure_id_consensus_hash, &block_cursor, count + ); + if earliest_block.block_id() != tenure_start_block.block_id() { + // still have more blocks to download + let next_block_id = earliest_block.header.parent_block_id.clone(); + debug!( + "Need more blocks for tenure {} (went from {} to {}, next is {})", + &self.tenure_id_consensus_hash, + &block_cursor, + &earliest_block.block_id(), + &next_block_id + ); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + return Ok(None); + } + + // finished! + self.state = NakamotoTenureDownloadState::Done; + Ok(self + .tenure_blocks + .take() + .map(|blocks| blocks.into_iter().rev().collect())) + } + + /// Produce the next HTTP request that, when successfully executed, will fetch the data needed + /// to advance this state machine. + /// Not all states require an HTTP request for advanceement. + /// + /// Returns Ok(Some(request)) if a request is needed + /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's + /// state) + /// Returns Err(()) if we're done. + pub fn make_next_download_request( + &self, + peerhost: PeerHost, + ) -> Result, ()> { + let request = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { + debug!("Request tenure-start block {}", &start_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { + // we're waiting for some other downloader's block-fetch to complete + debug!( + "Waiting for tenure-end block {} until {:?}", + &_block_id, _deadline + ); + return Ok(None); + } + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { + debug!("Request tenure-end block {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) + } + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { + debug!("Downloading tenure ending at {}", &end_block_id); + StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) + } + NakamotoTenureDownloadState::Done => { + // nothing more to do + return Err(()); + } + }; + Ok(Some(request)) + } + + /// Begin the next download request for this state machine. The request will be sent to the + /// data URL corresponding to self.naddr. + /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. + /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) + /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to + /// resolve its data URL to a socket address. + pub fn send_next_download_request( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(true); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let request = match self.make_next_download_request(peerhost) { + Ok(Some(request)) => request, + Ok(None) => { + return Ok(true); + } + Err(_) => { + return Ok(false); + } + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + self.idle = false; + Ok(true) + } + + /// Handle a received StacksHttpResponse and advance the state machine. + /// If we get the full tenure's blocks, then return them. + /// Returns Ok(Some([blocks])) if we successfully complete the state machine. + /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done + /// yet. The caller should now call `send_next_download_request()` + /// Returns Err(..) on failure to process the response. + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + ) -> Result>, NetError> { + let handle_result = match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + debug!( + "Got download response for tenure-start block {}", + &_block_id + ); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_start_block(block)?; + Ok(None) + } + NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { + debug!("Invalid state -- Got download response for WaitForTenureBlock"); + Err(NetError::InvalidState) + } + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { + debug!("Got download response to tenure-end block {}", &_block_id); + let block = response.decode_nakamoto_block().map_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {:?}", &e); + e + })?; + self.try_accept_tenure_end_block(&block)?; + Ok(None) + } + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + debug!( + "Got download response for tenure blocks ending at {}", + &_end_block_id + ); + let blocks = response.decode_nakamoto_tenure().map_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); + e + })?; + let blocks_opt = self.try_accept_tenure_blocks(blocks)?; + Ok(blocks_opt) + } + NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), + }; + self.idle = true; + handle_result + } + + pub fn is_done(&self) -> bool { + self.state == NakamotoTenureDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs new file mode 100644 index 0000000000..28a40e7eb5 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs @@ -0,0 +1,660 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, + NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// A set of confirmed downloader state machines assigned to one or more neighbors. The block +/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure +/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer +/// connections to downloader state machines, such that each peer is assigned to at most one +/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at +/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine +/// can make progress even if there is only one available peer (in which case, that peer will get +/// scheduled across multiple machines to drive their progress in the right sequence such that +/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). +pub struct NakamotoTenureDownloaderSet { + /// A list of instantiated downloaders that are in progress + pub(crate) downloaders: Vec>, + /// An assignment of peers to downloader machines in the `downloaders` list. + pub(crate) peers: HashMap, + /// The set of tenures that have been successfully downloaded (but possibly not yet stored or + /// processed) + pub(crate) completed_tenures: HashSet, +} + +impl NakamotoTenureDownloaderSet { + pub fn new() -> Self { + Self { + downloaders: vec![], + peers: HashMap::new(), + completed_tenures: HashSet::new(), + } + } + + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if + /// needed. + fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { + debug!( + "Add downloader for tenure {} driven by {}", + &downloader.tenure_id_consensus_hash, &naddr + ); + if let Some(idx) = self.peers.get(&naddr) { + self.downloaders[*idx] = Some(downloader); + } else { + self.downloaders.push(Some(downloader)); + self.peers.insert(naddr, self.downloaders.len() - 1); + } + } + + /// Does the given neighbor have an assigned downloader state machine? + pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { + let Some(idx) = self.peers.get(naddr) else { + return false; + }; + let Some(downloader_opt) = self.downloaders.get(*idx) else { + return false; + }; + downloader_opt.is_some() + } + + /// Drop the downloader associated with the given neighbor, if any. + pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { + let Some(index) = self.peers.remove(naddr) else { + return; + }; + self.downloaders[index] = None; + } + + /// How many downloaders are there? + pub fn num_downloaders(&self) -> usize { + self.downloaders + .iter() + .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) + } + + /// How many downloaders are there, which are scheduled? + pub fn num_scheduled_downloaders(&self) -> usize { + let mut cnt = 0; + for (_, idx) in self.peers.iter() { + if let Some(Some(_)) = self.downloaders.get(*idx) { + cnt += 1; + } + } + cnt + } + + /// Add a sequence of (address, downloader) pairs to this downloader set. + pub(crate) fn add_downloaders( + &mut self, + iter: impl IntoIterator, + ) { + for (naddr, downloader) in iter { + if self.has_downloader(&naddr) { + debug!("Already have downloader for {}", &naddr); + continue; + } + self.add_downloader(naddr, downloader); + } + } + + /// Count up the number of in-flight messages, based on the states of each instantiated + /// downloader. + pub fn inflight(&self) -> usize { + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.is_done() { + continue; + } + cnt += 1; + } + cnt + } + + /// Determine whether or not there exists a downloader for the given tenure, identified by its + /// consensus hash. + pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { + self.downloaders + .iter() + .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) + .is_some() + } + + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. + pub fn is_empty(&self) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true + } + + /// Try to resume processing a download state machine with a given peer. Since a peer is + /// detached from the machine after a single RPC call, this call is needed to re-attach it to a + /// (potentially different, unblocked) machine for the next RPC call to this peer. + /// + /// Returns true if the peer gets scheduled. + /// Returns false if not. + pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); + if let Some(idx) = self.peers.get(&naddr) { + let Some(Some(_downloader)) = self.downloaders.get(*idx) else { + return false; + }; + + debug!( + "Peer {} already bound to downloader for {}", + &naddr, &_downloader.tenure_id_consensus_hash + ); + return true; + } + for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { + let Some(downloader) = downloader_opt else { + continue; + }; + if !downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.naddr != naddr { + continue; + } + debug!( + "Assign peer {} to work on downloader for {} in state {}", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + self.peers.insert(naddr, i); + return true; + } + return false; + } + + /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to + /// blocked downloaders. + pub fn clear_available_peers(&mut self) { + let mut idled: Vec = vec![]; + for (naddr, i) in self.peers.iter() { + let Some(downloader_opt) = self.downloaders.get(*i) else { + // should be unreachable + idled.push(naddr.clone()); + continue; + }; + let Some(downloader) = downloader_opt else { + debug!("Remove peer {} for null download {}", &naddr, i); + idled.push(naddr.clone()); + continue; + }; + if downloader.idle || downloader.is_waiting() { + debug!( + "Remove idled peer {} for tenure download {}", + &naddr, &downloader.tenure_id_consensus_hash + ); + idled.push(naddr.clone()); + } + } + for naddr in idled.into_iter() { + self.peers.remove(&naddr); + } + } + + /// Clear out downloaders (but not their peers) that have finished. The caller should follow + /// this up with a call to `clear_available_peers()`. + pub fn clear_finished_downloaders(&mut self) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + *downloader_opt = None; + } + } + } + + /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These + /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. + pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { + let mut ret = HashMap::new(); + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + let Some(block) = downloader.tenure_start_block.as_ref() else { + continue; + }; + ret.insert(block.block_id(), block.clone()); + } + ret + } + + /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their + /// tenure-end blocks. + /// Return a list of peers driving downloaders with failing `tenure_start_blocks` + pub(crate) fn handle_tenure_end_blocks( + &mut self, + tenure_start_blocks: &HashMap, + ) -> Vec { + debug!( + "handle tenure-end blocks: {:?}", + &tenure_start_blocks.keys().collect::>() + ); + let mut dead = vec![]; + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = + &downloader.state + else { + continue; + }; + let Some(end_block) = tenure_start_blocks.get(end_block_id) else { + continue; + }; + if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { + warn!( + "Failed to accept tenure end-block {} for tenure {}: {:?}", + &end_block.block_id(), + &downloader.tenure_id_consensus_hash, + &e + ); + dead.push(downloader.naddr.clone()); + } + } + dead + } + + /// Does there exist a downloader (possibly unscheduled) for the given tenure? + pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if &downloader.tenure_id_consensus_hash == tenure_id { + debug!( + "Have downloader for tenure {} already (idle={}, waiting={}, state={})", + tenure_id, + downloader.idle, + downloader.is_waiting(), + &downloader.state + ); + return true; + } + } + false + } + + /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor + /// block, we need to go and directly fetch its end block instead of waiting for another + /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method + /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. + pub(crate) fn try_transition_fetch_tenure_end_blocks( + &mut self, + tenure_block_ids: &HashMap, + ) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + downloader.transition_to_fetch_end_block_on_timeout(); + } + + // find tenures in which we need to fetch the tenure-end block directly. + let mut last_available_tenures: HashSet = HashSet::new(); + for (_, all_available) in tenure_block_ids.iter() { + for (_, available) in all_available.iter() { + if available.fetch_end_block { + last_available_tenures.insert(available.end_block_id.clone()); + } + } + } + + // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to + // fetching + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + if !downloader.idle { + continue; + } + if !downloader.is_waiting() { + continue; + } + if !last_available_tenures.contains(&downloader.tenure_end_block_id) { + continue; + } + debug!( + "Transition downloader for {} from waiting to fetching", + &downloader.tenure_id_consensus_hash + ); + if let Err(e) = downloader.transition_to_fetch_end_block() { + warn!( + "Downloader for {} failed to transition to fetch end block: {:?}", + &downloader.tenure_id_consensus_hash, &e + ); + } + } + } + + /// Create a given number of downloads from a schedule and availability set. + /// Removes items from the schedule, and neighbors from the availability set. + /// A neighbor will be issued at most one request. + pub(crate) fn make_tenure_downloaders( + &mut self, + schedule: &mut VecDeque, + available: &mut HashMap>, + tenure_block_ids: &HashMap, + count: usize, + current_reward_cycles: &BTreeMap, + ) { + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); + + self.clear_finished_downloaders(); + self.clear_available_peers(); + self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); + while self.inflight() < count { + let Some(ch) = schedule.front() else { + break; + }; + if self.completed_tenures.contains(&ch) { + debug!("Already successfully downloaded tenure {}", &ch); + schedule.pop_front(); + continue; + } + let Some(neighbors) = available.get_mut(ch) else { + // not found on any neighbors, so stop trying this tenure + debug!("No neighbors have tenure {}", ch); + schedule.pop_front(); + continue; + }; + if neighbors.is_empty() { + // no more neighbors to try + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + } + let Some(naddr) = neighbors.pop() else { + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + }; + if self.try_resume_peer(naddr.clone()) { + continue; + }; + if self.has_downloader_for_tenure(&ch) { + schedule.pop_front(); + continue; + } + + let Some(available_tenures) = tenure_block_ids.get(&naddr) else { + // this peer doesn't have any known tenures, so try the others + debug!("No tenures available from {}", &naddr); + continue; + }; + let Some(tenure_info) = available_tenures.get(ch) else { + // this peer does not have a tenure start/end block for this tenure, so try the + // others. + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + continue; + }; + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + + debug!( + "Download tenure {} (start={}, end={}) (rc {},{})", + &ch, + &tenure_info.start_block_id, + &tenure_info.end_block_id, + tenure_info.start_reward_cycle, + tenure_info.end_reward_cycle + ); + let tenure_download = NakamotoTenureDownloader::new( + ch.clone(), + tenure_info.start_block_id.clone(), + tenure_info.end_block_id.clone(), + naddr.clone(), + start_reward_set.clone(), + end_reward_set.clone(), + ); + + debug!("Request tenure {} from neighbor {}", ch, &naddr); + self.add_downloader(naddr, tenure_download); + schedule.pop_front(); + } + } + + /// Run all confirmed downloaders. + /// * Identify neighbors for which we do not have an inflight request + /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that + /// request to the neighbor and begin driving the underlying socket I/O. + /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance + /// its state. + /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. + /// + /// Returns the set of downloaded blocks obtained for completed downloaders. These will be + /// full confirmed tenures. + pub fn run( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> HashMap> { + let addrs: Vec<_> = self.peers.keys().cloned().collect(); + let mut finished = vec![]; + let mut finished_tenures = vec![]; + let mut new_blocks = HashMap::new(); + + // send requests + for (naddr, index) in self.peers.iter() { + if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", &naddr); + continue; + } + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + if downloader.is_done() { + debug!("Downloader for {} is done", &naddr); + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + debug!( + "Send request to {} for tenure {} (state {})", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { + debug!("Downloader for {} failed; this peer is dead", &naddr); + neighbor_rpc.add_dead(network, naddr); + continue; + }; + if !sent { + // this downloader is dead or broken + finished.push(naddr.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(&naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + // handle responses + for (naddr, response) in neighbor_rpc.collect_replies(network) { + let Some(index) = self.peers.get(&naddr) else { + debug!("No downloader for {}", &naddr); + continue; + }; + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + debug!("Got response from {}", &naddr); + + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { + debug!("Failed to handle download response from {}", &naddr); + neighbor_rpc.add_dead(network, &naddr); + continue; + }; + + let Some(blocks) = blocks_opt else { + continue; + }; + + debug!( + "Got {} blocks for tenure {}", + blocks.len(), + &downloader.tenure_id_consensus_hash + ); + new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); + if downloader.is_done() { + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + new_blocks + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs new file mode 100644 index 0000000000..28a40e7eb5 --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs @@ -0,0 +1,660 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::to_hex; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, + NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// A set of confirmed downloader state machines assigned to one or more neighbors. The block +/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure +/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer +/// connections to downloader state machines, such that each peer is assigned to at most one +/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at +/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine +/// can make progress even if there is only one available peer (in which case, that peer will get +/// scheduled across multiple machines to drive their progress in the right sequence such that +/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). +pub struct NakamotoTenureDownloaderSet { + /// A list of instantiated downloaders that are in progress + pub(crate) downloaders: Vec>, + /// An assignment of peers to downloader machines in the `downloaders` list. + pub(crate) peers: HashMap, + /// The set of tenures that have been successfully downloaded (but possibly not yet stored or + /// processed) + pub(crate) completed_tenures: HashSet, +} + +impl NakamotoTenureDownloaderSet { + pub fn new() -> Self { + Self { + downloaders: vec![], + peers: HashMap::new(), + completed_tenures: HashSet::new(), + } + } + + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if + /// needed. + fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { + debug!( + "Add downloader for tenure {} driven by {}", + &downloader.tenure_id_consensus_hash, &naddr + ); + if let Some(idx) = self.peers.get(&naddr) { + self.downloaders[*idx] = Some(downloader); + } else { + self.downloaders.push(Some(downloader)); + self.peers.insert(naddr, self.downloaders.len() - 1); + } + } + + /// Does the given neighbor have an assigned downloader state machine? + pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { + let Some(idx) = self.peers.get(naddr) else { + return false; + }; + let Some(downloader_opt) = self.downloaders.get(*idx) else { + return false; + }; + downloader_opt.is_some() + } + + /// Drop the downloader associated with the given neighbor, if any. + pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { + let Some(index) = self.peers.remove(naddr) else { + return; + }; + self.downloaders[index] = None; + } + + /// How many downloaders are there? + pub fn num_downloaders(&self) -> usize { + self.downloaders + .iter() + .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) + } + + /// How many downloaders are there, which are scheduled? + pub fn num_scheduled_downloaders(&self) -> usize { + let mut cnt = 0; + for (_, idx) in self.peers.iter() { + if let Some(Some(_)) = self.downloaders.get(*idx) { + cnt += 1; + } + } + cnt + } + + /// Add a sequence of (address, downloader) pairs to this downloader set. + pub(crate) fn add_downloaders( + &mut self, + iter: impl IntoIterator, + ) { + for (naddr, downloader) in iter { + if self.has_downloader(&naddr) { + debug!("Already have downloader for {}", &naddr); + continue; + } + self.add_downloader(naddr, downloader); + } + } + + /// Count up the number of in-flight messages, based on the states of each instantiated + /// downloader. + pub fn inflight(&self) -> usize { + let mut cnt = 0; + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.is_done() { + continue; + } + cnt += 1; + } + cnt + } + + /// Determine whether or not there exists a downloader for the given tenure, identified by its + /// consensus hash. + pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { + self.downloaders + .iter() + .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) + .is_some() + } + + /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. + pub fn is_empty(&self) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + continue; + } + debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); + return false; + } + true + } + + /// Try to resume processing a download state machine with a given peer. Since a peer is + /// detached from the machine after a single RPC call, this call is needed to re-attach it to a + /// (potentially different, unblocked) machine for the next RPC call to this peer. + /// + /// Returns true if the peer gets scheduled. + /// Returns false if not. + pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { + debug!("Try resume {}", &naddr); + if let Some(idx) = self.peers.get(&naddr) { + let Some(Some(_downloader)) = self.downloaders.get(*idx) else { + return false; + }; + + debug!( + "Peer {} already bound to downloader for {}", + &naddr, &_downloader.tenure_id_consensus_hash + ); + return true; + } + for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { + let Some(downloader) = downloader_opt else { + continue; + }; + if !downloader.idle { + continue; + } + if downloader.is_waiting() { + continue; + } + if downloader.naddr != naddr { + continue; + } + debug!( + "Assign peer {} to work on downloader for {} in state {}", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + self.peers.insert(naddr, i); + return true; + } + return false; + } + + /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to + /// blocked downloaders. + pub fn clear_available_peers(&mut self) { + let mut idled: Vec = vec![]; + for (naddr, i) in self.peers.iter() { + let Some(downloader_opt) = self.downloaders.get(*i) else { + // should be unreachable + idled.push(naddr.clone()); + continue; + }; + let Some(downloader) = downloader_opt else { + debug!("Remove peer {} for null download {}", &naddr, i); + idled.push(naddr.clone()); + continue; + }; + if downloader.idle || downloader.is_waiting() { + debug!( + "Remove idled peer {} for tenure download {}", + &naddr, &downloader.tenure_id_consensus_hash + ); + idled.push(naddr.clone()); + } + } + for naddr in idled.into_iter() { + self.peers.remove(&naddr); + } + } + + /// Clear out downloaders (but not their peers) that have finished. The caller should follow + /// this up with a call to `clear_available_peers()`. + pub fn clear_finished_downloaders(&mut self) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + if downloader.is_done() { + *downloader_opt = None; + } + } + } + + /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These + /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. + pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { + let mut ret = HashMap::new(); + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + let Some(block) = downloader.tenure_start_block.as_ref() else { + continue; + }; + ret.insert(block.block_id(), block.clone()); + } + ret + } + + /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their + /// tenure-end blocks. + /// Return a list of peers driving downloaders with failing `tenure_start_blocks` + pub(crate) fn handle_tenure_end_blocks( + &mut self, + tenure_start_blocks: &HashMap, + ) -> Vec { + debug!( + "handle tenure-end blocks: {:?}", + &tenure_start_blocks.keys().collect::>() + ); + let mut dead = vec![]; + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt else { + continue; + }; + let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = + &downloader.state + else { + continue; + }; + let Some(end_block) = tenure_start_blocks.get(end_block_id) else { + continue; + }; + if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { + warn!( + "Failed to accept tenure end-block {} for tenure {}: {:?}", + &end_block.block_id(), + &downloader.tenure_id_consensus_hash, + &e + ); + dead.push(downloader.naddr.clone()); + } + } + dead + } + + /// Does there exist a downloader (possibly unscheduled) for the given tenure? + pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { + for downloader_opt in self.downloaders.iter() { + let Some(downloader) = downloader_opt else { + continue; + }; + if &downloader.tenure_id_consensus_hash == tenure_id { + debug!( + "Have downloader for tenure {} already (idle={}, waiting={}, state={})", + tenure_id, + downloader.idle, + downloader.is_waiting(), + &downloader.state + ); + return true; + } + } + false + } + + /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor + /// block, we need to go and directly fetch its end block instead of waiting for another + /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method + /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. + pub(crate) fn try_transition_fetch_tenure_end_blocks( + &mut self, + tenure_block_ids: &HashMap, + ) { + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + downloader.transition_to_fetch_end_block_on_timeout(); + } + + // find tenures in which we need to fetch the tenure-end block directly. + let mut last_available_tenures: HashSet = HashSet::new(); + for (_, all_available) in tenure_block_ids.iter() { + for (_, available) in all_available.iter() { + if available.fetch_end_block { + last_available_tenures.insert(available.end_block_id.clone()); + } + } + } + + // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to + // fetching + for downloader_opt in self.downloaders.iter_mut() { + let Some(downloader) = downloader_opt.as_mut() else { + continue; + }; + if !downloader.idle { + continue; + } + if !downloader.is_waiting() { + continue; + } + if !last_available_tenures.contains(&downloader.tenure_end_block_id) { + continue; + } + debug!( + "Transition downloader for {} from waiting to fetching", + &downloader.tenure_id_consensus_hash + ); + if let Err(e) = downloader.transition_to_fetch_end_block() { + warn!( + "Downloader for {} failed to transition to fetch end block: {:?}", + &downloader.tenure_id_consensus_hash, &e + ); + } + } + } + + /// Create a given number of downloads from a schedule and availability set. + /// Removes items from the schedule, and neighbors from the availability set. + /// A neighbor will be issued at most one request. + pub(crate) fn make_tenure_downloaders( + &mut self, + schedule: &mut VecDeque, + available: &mut HashMap>, + tenure_block_ids: &HashMap, + count: usize, + current_reward_cycles: &BTreeMap, + ) { + debug!("make_tenure_downloaders"; + "schedule" => ?schedule, + "available" => ?available, + "tenure_block_ids" => ?tenure_block_ids, + "inflight" => %self.inflight(), + "count" => count, + "running" => self.num_downloaders(), + "scheduled" => self.num_scheduled_downloaders()); + + self.clear_finished_downloaders(); + self.clear_available_peers(); + self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); + while self.inflight() < count { + let Some(ch) = schedule.front() else { + break; + }; + if self.completed_tenures.contains(&ch) { + debug!("Already successfully downloaded tenure {}", &ch); + schedule.pop_front(); + continue; + } + let Some(neighbors) = available.get_mut(ch) else { + // not found on any neighbors, so stop trying this tenure + debug!("No neighbors have tenure {}", ch); + schedule.pop_front(); + continue; + }; + if neighbors.is_empty() { + // no more neighbors to try + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + } + let Some(naddr) = neighbors.pop() else { + debug!("No more neighbors can serve tenure {}", ch); + schedule.pop_front(); + continue; + }; + if self.try_resume_peer(naddr.clone()) { + continue; + }; + if self.has_downloader_for_tenure(&ch) { + schedule.pop_front(); + continue; + } + + let Some(available_tenures) = tenure_block_ids.get(&naddr) else { + // this peer doesn't have any known tenures, so try the others + debug!("No tenures available from {}", &naddr); + continue; + }; + let Some(tenure_info) = available_tenures.get(ch) else { + // this peer does not have a tenure start/end block for this tenure, so try the + // others. + debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + continue; + }; + let Some(Some(start_reward_set)) = current_reward_cycles + .get(&tenure_info.start_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + tenure_info.start_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + let Some(Some(end_reward_set)) = current_reward_cycles + .get(&tenure_info.end_reward_cycle) + .map(|cycle_info| cycle_info.reward_set()) + else { + debug!( + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + tenure_info.end_reward_cycle, + &tenure_info + ); + schedule.pop_front(); + continue; + }; + + debug!( + "Download tenure {} (start={}, end={}) (rc {},{})", + &ch, + &tenure_info.start_block_id, + &tenure_info.end_block_id, + tenure_info.start_reward_cycle, + tenure_info.end_reward_cycle + ); + let tenure_download = NakamotoTenureDownloader::new( + ch.clone(), + tenure_info.start_block_id.clone(), + tenure_info.end_block_id.clone(), + naddr.clone(), + start_reward_set.clone(), + end_reward_set.clone(), + ); + + debug!("Request tenure {} from neighbor {}", ch, &naddr); + self.add_downloader(naddr, tenure_download); + schedule.pop_front(); + } + } + + /// Run all confirmed downloaders. + /// * Identify neighbors for which we do not have an inflight request + /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that + /// request to the neighbor and begin driving the underlying socket I/O. + /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance + /// its state. + /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. + /// + /// Returns the set of downloaded blocks obtained for completed downloaders. These will be + /// full confirmed tenures. + pub fn run( + &mut self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> HashMap> { + let addrs: Vec<_> = self.peers.keys().cloned().collect(); + let mut finished = vec![]; + let mut finished_tenures = vec![]; + let mut new_blocks = HashMap::new(); + + // send requests + for (naddr, index) in self.peers.iter() { + if neighbor_rpc.has_inflight(&naddr) { + debug!("Peer {} has an inflight request", &naddr); + continue; + } + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + if downloader.is_done() { + debug!("Downloader for {} is done", &naddr); + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + debug!( + "Send request to {} for tenure {} (state {})", + &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + ); + let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { + debug!("Downloader for {} failed; this peer is dead", &naddr); + neighbor_rpc.add_dead(network, naddr); + continue; + }; + if !sent { + // this downloader is dead or broken + finished.push(naddr.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(&naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + // handle responses + for (naddr, response) in neighbor_rpc.collect_replies(network) { + let Some(index) = self.peers.get(&naddr) else { + debug!("No downloader for {}", &naddr); + continue; + }; + let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { + debug!("No downloader for {}", &naddr); + continue; + }; + debug!("Got response from {}", &naddr); + + let Ok(blocks_opt) = downloader + .handle_next_download_response(response) + .map_err(|e| { + debug!("Failed to handle response from {}: {:?}", &naddr, &e); + e + }) + else { + debug!("Failed to handle download response from {}", &naddr); + neighbor_rpc.add_dead(network, &naddr); + continue; + }; + + let Some(blocks) = blocks_opt else { + continue; + }; + + debug!( + "Got {} blocks for tenure {}", + blocks.len(), + &downloader.tenure_id_consensus_hash + ); + new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); + if downloader.is_done() { + finished.push(naddr.clone()); + finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + continue; + } + } + + // clear dead, broken, and done + for naddr in addrs.iter() { + if neighbor_rpc.is_dead_or_broken(network, naddr) { + debug!("Remove dead/broken downloader for {}", &naddr); + self.clear_downloader(naddr); + } + } + for done_naddr in finished.drain(..) { + debug!("Remove finished downloader for {}", &done_naddr); + self.clear_downloader(&done_naddr); + } + for done_tenure in finished_tenures.drain(..) { + self.completed_tenures.insert(done_tenure); + } + + new_blocks + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs new file mode 100644 index 0000000000..c96f718d2b --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs @@ -0,0 +1,867 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, + WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the +/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but +/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoUnconfirmedDownloadState { + /// Getting the tenure tip information + GetTenureInfo, + /// Get the tenure start block for the ongoing tenure. + /// The inner value is tenure-start block ID of the ongoing tenure. + GetTenureStartBlock(StacksBlockId), + /// Receiving unconfirmed tenure blocks. + /// The inner value is the block ID of the next block to fetch. + GetUnconfirmedTenureBlocks(StacksBlockId), + /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block + /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). + Done, +} + +impl fmt::Display for NakamotoUnconfirmedDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for the unconfirmed tenures. It operates in the following steps: +/// +/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip +/// 2. Get the tenure-start block for the unconfirmed chain tip +/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the +/// immediate child of the one obtained in (2) +/// +/// Once this state-machine finishes execution, the tenure-start block is used to construct a +/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. +/// +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoUnconfirmedTenureDownloader { + /// state of this machine + pub state: NakamotoUnconfirmedDownloadState, + /// Address of who we're asking + pub naddr: NeighborAddress, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, + /// Block ID of this node's highest-processed block. + /// We will not download any blocks lower than this, if it's set. + pub highest_processed_block_id: Option, + /// Highest processed block height (which may not need to be loaded) + pub highest_processed_block_height: Option, + + /// Tenure tip info we obtained for this peer + pub tenure_tip: Option, + /// Tenure start block for the ongoing tip. + /// This is also the tenure-end block for the highest-complete tip. + pub unconfirmed_tenure_start_block: Option, + /// Unconfirmed tenure blocks obtained + pub unconfirmed_tenure_blocks: Option>, +} + +impl NakamotoUnconfirmedTenureDownloader { + /// Make a new downloader which will download blocks from the tip back down to the optional + /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). + pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { + Self { + state: NakamotoUnconfirmedDownloadState::GetTenureInfo, + naddr, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, + highest_processed_block_id, + highest_processed_block_height: None, + tenure_tip: None, + unconfirmed_tenure_start_block: None, + unconfirmed_tenure_blocks: None, + } + } + + /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is + /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote + /// node). + pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { + self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) + } + + /// Set the highest-processed block. + /// This can be performed by the downloader itself in order to inform ongoing requests for + /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node + /// has already handled. + pub fn set_highest_processed_block( + &mut self, + highest_processed_block_id: StacksBlockId, + highest_processed_block_height: u64, + ) { + self.highest_processed_block_id = Some(highest_processed_block_id); + self.highest_processed_block_height = Some(highest_processed_block_height); + } + + /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. + /// + /// * tenure_tip.consensus_hash + /// This is the consensus hash of the remote node's ongoing tenure. It may not be the + /// sortition tip, e.g. if the tenure spans multiple sortitions. + /// * tenure_tip.tenure_start_block_id + /// This is the first block ID of the ongoing unconfirmed tenure. + /// * tenure_tip.parent_consensus_hash + /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest + /// complete tenure, for which we know the start and end block IDs. + /// * tenure_tip.parent_tenure_start_block_id + /// This is the tenure start block for the highest complete tenure. It should be equal to + /// the winning Stacks block hash of the snapshot for the ongoing tenure. + /// + /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go + /// fetch it again; just get the new unconfirmed blocks. + pub fn try_accept_tenure_info( + &mut self, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + remote_tenure_tip: RPCGetTenureInfo, + current_reward_sets: &BTreeMap, + ) -> Result<(), NetError> { + if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { + return Err(NetError::InvalidState); + } + if self.tenure_tip.is_some() { + return Err(NetError::InvalidState); + } + + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + + // authenticate consensus hashes against canonical chain history + let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.parent_consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + let ih = sortdb.index_handle(&local_sort_tip.sortition_id); + let ancestor_local_tenure_sn = ih + .get_block_snapshot_by_height(local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { + // .consensus_hash is not on the canonical fork + warn!("Unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash); + return Err(DBError::NotFoundError.into()); + } + let ancestor_parent_local_tenure_sn = ih + .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; + + if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { + // .parent_consensus_hash is not on the canonical fork + warn!("Parent unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(DBError::NotFoundError.into()); + } + + // parent tenure sortition must precede the ongoing tenure sortition + if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { + warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash, + "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(NetError::InvalidMessage); + } + + // parent tenure start block ID must be the winning block hash for the ongoing tenure's + // snapshot + if local_tenure_sn.winning_stacks_block_hash.0 + != remote_tenure_tip.parent_tenure_start_block_id.0 + { + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, + "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); + return Err(NetError::StaleView); + } + + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + // we've synchronized this tenure before, so don't get anymore blocks before it. + let highest_processed_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(highest_processed_block_id)? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? + .0; + + let highest_processed_block_height = highest_processed_block.header.chain_length; + self.highest_processed_block_height = Some(highest_processed_block_height); + + if &remote_tenure_tip.tip_block_id == highest_processed_block_id + || highest_processed_block_height > remote_tenure_tip.tip_height + { + // nothing to do -- we're at or ahead of the remote peer, so finish up. + // If we don't have the tenure-start block for the confirmed tenure that the remote + // peer claims to have, then the remote peer has sent us invalid data and we should + // treat it as such. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or(NetError::InvalidMessage)? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::Done; + } + } + + if self.state == NakamotoUnconfirmedDownloadState::Done { + // only need to remember the tenure tip + self.tenure_tip = Some(remote_tenure_tip); + return Ok(()); + } + + // we're not finished + let tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for confirmed tenure {} (rc {})", + &parent_local_tenure_sn.consensus_hash, parent_tenure_rc + ); + return Err(NetError::InvalidState); + }; + + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for unconfirmed tenure {} (rc {})", + &local_tenure_sn.consensus_hash, tenure_rc + ); + return Err(NetError::InvalidState); + }; + + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? + { + // proceed to get unconfirmed blocks. We already have the tenure-start block. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + remote_tenure_tip.tip_block_id.clone(), + ); + } else { + // get the tenure-start block first + self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( + remote_tenure_tip.tenure_start_block_id.clone(), + ); + } + + debug!( + "Will validate unconfirmed blocks with reward sets in ({},{})", + parent_tenure_rc, tenure_rc + ); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); + self.tenure_tip = Some(remote_tenure_tip); + + Ok(()) + } + + /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the unconfirmed tenure start block was valid + /// Returns Err(..) if it was not valid, or if this function was called out of sequence. + pub fn try_accept_unconfirmed_tenure_start_block( + &mut self, + unconfirmed_tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = + &self.state + else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // block has to match the expected hash + if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { + warn!("Invalid tenure-start block"; + "tenure_id_start_block" => %tenure_start_block_id, + "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + // furthermore, the block has to match the expected tenure ID + if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { + warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); + return Err(NetError::InvalidMessage); + } + + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + tenure_tip.tip_block_id.clone(), + ); + Ok(()) + } + + /// Add downloaded unconfirmed tenure blocks. + /// If we have collected all tenure blocks, then return them. + /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the + /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come + /// after the highest-processed block (if set). + /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on invalid state or invalid block. + pub fn try_accept_unconfirmed_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = + &self.state + else { + return Err(NetError::InvalidState); + }; + + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + debug!("No tenure blocks obtained"); + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest. + // If there's a tenure-start block, it must be last. + let mut expected_block_id = last_block_id; + let mut finished_download = false; + let mut last_block_index = None; + for (cnt, block) in tenure_blocks.iter().enumerate() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id()); + return Err(NetError::InvalidMessage); + } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // we may or may not need the tenure-start block for the unconfirmed tenure. But if we + // do, make sure it's valid, and it's the last block we receive. + let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + }; + if is_tenure_start { + // this is the tenure-start block, so make sure it matches our /v3/tenure/info + if block.header.block_id() != tenure_tip.tenure_start_block_id { + warn!("Unexpected tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); + return Err(NetError::InvalidMessage); + } + + if cnt.saturating_add(1) != tenure_blocks.len() { + warn!("Invalid tenure stream -- got tenure-start before end of tenure"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "cnt" => cnt, + "len" => tenure_blocks.len(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + finished_download = true; + last_block_index = Some(cnt); + break; + } + + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + if expected_block_id == highest_processed_block_id { + // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_height) = + self.highest_processed_block_height.as_ref() + { + if &block.header.chain_length <= highest_processed_block_height { + // no need to continue this download + debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); + } + + if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.unconfirmed_tenure_blocks = Some(tenure_blocks); + } + + if finished_download { + // we have all of the unconfirmed tenure blocks that were requested. + // only return those newer than the highest block. + self.state = NakamotoUnconfirmedDownloadState::Done; + let highest_processed_block_height = + *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + debug!("Finished receiving unconfirmed tenure"); + return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { + blocks + .into_iter() + .filter(|block| block.header.chain_length > highest_processed_block_height) + .rev() + .collect() + })); + } + + let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + // still have more to get + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + let next_block_id = earliest_block.header.parent_block_id.clone(); + + debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); + Ok(None) + } + + /// Once this machine runs to completion, examine its state to see if we still need to fetch + /// the highest complete tenure. We may not need to, especially if we're just polling for new + /// unconfirmed blocks. + /// + /// Return Ok(true) if we need it still + /// Return Ok(false) if we already have it + /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. + pub fn need_highest_complete_tenure( + &self, + chainstate: &StacksChainState, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() + else { + return Err(NetError::InvalidState); + }; + + // if we've processed the unconfirmed tenure-start block already, then we've necessarily + // downloaded and processed the highest-complete tenure already. + Ok(!NakamotoChainState::has_block_header( + chainstate.db(), + &unconfirmed_tenure_start_block.header.block_id(), + false, + )?) + } + + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the + /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get + /// its tenure-start block. + /// + /// Returns Ok(downloader) on success + /// Returns Err(..) if we call this function out of sequence. + pub fn make_highest_complete_tenure_downloader( + &self, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + Ok(ntd) + } + + /// Produce the next HTTP request that, when successfully executed, will advance this state + /// machine. + /// + /// Returns Some(request) if a request must be sent. + /// Returns None if we're done + pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // need to get the tenure tip + return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_block( + peerhost, + block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_tenure( + peerhost, + tip_block_id.clone(), + self.highest_processed_block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::Done => { + // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed + // tenure downloader using the earliest unconfirmed tenure block. + return None; + } + } + } + + /// Begin the next download request for this state machine. + /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. It's + /// up to the caller to determine when it's appropriate to convert this state machine into a + /// `NakamotoTenureDownloader`. + /// Returns Err(..) if the neighbor is dead or broken. + pub fn send_next_download_request( + &self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result<(), NetError> { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(()); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let Some(request) = self.make_next_download_request(peerhost) else { + // treat this downloader as still in-flight since the overall state machine will need + // to keep it around long enough to convert it into a tenure downloader for the highest + // complete tenure. + return Ok(()); + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + Ok(()) + } + + /// Handle a received StacksHttpResponse and advance this machine's state + /// If we get the full tenure, return it. + /// + /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure + /// Returns Ok(None) if we're still working, in which case the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on unrecoverable failure to advance state + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + current_reward_sets: &BTreeMap, + ) -> Result>, NetError> { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + debug!("Got tenure-info response"); + let remote_tenure_info = response.decode_nakamoto_tenure_info()?; + debug!("Got tenure-info response: {:?}", &remote_tenure_info); + self.try_accept_tenure_info( + sortdb, + local_sort_tip, + chainstate, + remote_tenure_info, + current_reward_sets, + )?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { + debug!("Got tenure start-block response"); + let block = response.decode_nakamoto_block()?; + self.try_accept_unconfirmed_tenure_start_block(block)?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + debug!("Got unconfirmed tenure blocks response"); + let blocks = response.decode_nakamoto_tenure()?; + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) + } + NakamotoUnconfirmedDownloadState::Done => { + return Err(NetError::InvalidState); + } + } + } + + /// Is this machine finished? + pub fn is_done(&self) -> bool { + self.state == NakamotoUnconfirmedDownloadState::Done + } +} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs new file mode 100644 index 0000000000..c96f718d2b --- /dev/null +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs @@ -0,0 +1,867 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::io::{Read, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::time::{Duration, Instant}; + +use rand::seq::SliceRandom; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, +}; +use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; + +use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; +use crate::chainstate::burn::db::sortdb::{ + BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, +}; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::RewardCycleInfo; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, +}; +use crate::core::{ + EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, +}; +use crate::net::api::gettenureinfo::RPCGetTenureInfo; +use crate::net::chat::ConversationP2P; +use crate::net::db::{LocalPeer, PeerDB}; +use crate::net::download::nakamoto::{ + AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, + WantedTenure, +}; +use crate::net::http::HttpRequestContents; +use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; +use crate::net::inv::epoch2x::InvState; +use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; +use crate::net::neighbors::rpc::NeighborRPC; +use crate::net::neighbors::NeighborComms; +use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; +use crate::net::server::HttpPeer; +use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the +/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but +/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). +#[derive(Debug, Clone, PartialEq)] +pub enum NakamotoUnconfirmedDownloadState { + /// Getting the tenure tip information + GetTenureInfo, + /// Get the tenure start block for the ongoing tenure. + /// The inner value is tenure-start block ID of the ongoing tenure. + GetTenureStartBlock(StacksBlockId), + /// Receiving unconfirmed tenure blocks. + /// The inner value is the block ID of the next block to fetch. + GetUnconfirmedTenureBlocks(StacksBlockId), + /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block + /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). + Done, +} + +impl fmt::Display for NakamotoUnconfirmedDownloadState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Download state machine for the unconfirmed tenures. It operates in the following steps: +/// +/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip +/// 2. Get the tenure-start block for the unconfirmed chain tip +/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the +/// immediate child of the one obtained in (2) +/// +/// Once this state-machine finishes execution, the tenure-start block is used to construct a +/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. +/// +#[derive(Debug, Clone, PartialEq)] +pub struct NakamotoUnconfirmedTenureDownloader { + /// state of this machine + pub state: NakamotoUnconfirmedDownloadState, + /// Address of who we're asking + pub naddr: NeighborAddress, + /// reward set of the highest confirmed tenure + pub confirmed_signer_keys: Option, + /// reward set of the unconfirmed (ongoing) tenure + pub unconfirmed_signer_keys: Option, + /// Block ID of this node's highest-processed block. + /// We will not download any blocks lower than this, if it's set. + pub highest_processed_block_id: Option, + /// Highest processed block height (which may not need to be loaded) + pub highest_processed_block_height: Option, + + /// Tenure tip info we obtained for this peer + pub tenure_tip: Option, + /// Tenure start block for the ongoing tip. + /// This is also the tenure-end block for the highest-complete tip. + pub unconfirmed_tenure_start_block: Option, + /// Unconfirmed tenure blocks obtained + pub unconfirmed_tenure_blocks: Option>, +} + +impl NakamotoUnconfirmedTenureDownloader { + /// Make a new downloader which will download blocks from the tip back down to the optional + /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). + pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { + Self { + state: NakamotoUnconfirmedDownloadState::GetTenureInfo, + naddr, + confirmed_signer_keys: None, + unconfirmed_signer_keys: None, + highest_processed_block_id, + highest_processed_block_height: None, + tenure_tip: None, + unconfirmed_tenure_start_block: None, + unconfirmed_tenure_blocks: None, + } + } + + /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is + /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote + /// node). + pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { + self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) + } + + /// Set the highest-processed block. + /// This can be performed by the downloader itself in order to inform ongoing requests for + /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node + /// has already handled. + pub fn set_highest_processed_block( + &mut self, + highest_processed_block_id: StacksBlockId, + highest_processed_block_height: u64, + ) { + self.highest_processed_block_id = Some(highest_processed_block_id); + self.highest_processed_block_height = Some(highest_processed_block_height); + } + + /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. + /// + /// * tenure_tip.consensus_hash + /// This is the consensus hash of the remote node's ongoing tenure. It may not be the + /// sortition tip, e.g. if the tenure spans multiple sortitions. + /// * tenure_tip.tenure_start_block_id + /// This is the first block ID of the ongoing unconfirmed tenure. + /// * tenure_tip.parent_consensus_hash + /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest + /// complete tenure, for which we know the start and end block IDs. + /// * tenure_tip.parent_tenure_start_block_id + /// This is the tenure start block for the highest complete tenure. It should be equal to + /// the winning Stacks block hash of the snapshot for the ongoing tenure. + /// + /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go + /// fetch it again; just get the new unconfirmed blocks. + pub fn try_accept_tenure_info( + &mut self, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + remote_tenure_tip: RPCGetTenureInfo, + current_reward_sets: &BTreeMap, + ) -> Result<(), NetError> { + if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { + return Err(NetError::InvalidState); + } + if self.tenure_tip.is_some() { + return Err(NetError::InvalidState); + } + + debug!("Got tenure info {:?}", remote_tenure_tip); + debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); + + // authenticate consensus hashes against canonical chain history + let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for tenure {}", + &remote_tenure_tip.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &remote_tenure_tip.parent_consensus_hash, + )? + .ok_or_else(|| { + debug!( + "No snapshot for parent tenure {}", + &remote_tenure_tip.parent_consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + let ih = sortdb.index_handle(&local_sort_tip.sortition_id); + let ancestor_local_tenure_sn = ih + .get_block_snapshot_by_height(local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError) + })?; + + if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { + // .consensus_hash is not on the canonical fork + warn!("Unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash); + return Err(DBError::NotFoundError.into()); + } + let ancestor_parent_local_tenure_sn = ih + .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? + .ok_or_else(|| { + debug!( + "No parent tenure snapshot at burn block height {} off of sortition {} ({})", + local_tenure_sn.block_height, + &local_tenure_sn.sortition_id, + &local_tenure_sn.consensus_hash + ); + NetError::DBError(DBError::NotFoundError.into()) + })?; + + if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { + // .parent_consensus_hash is not on the canonical fork + warn!("Parent unconfirmed tenure consensus hash is not canonical"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(DBError::NotFoundError.into()); + } + + // parent tenure sortition must precede the ongoing tenure sortition + if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { + warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; + "peer" => %self.naddr, + "consensus_hash" => %remote_tenure_tip.consensus_hash, + "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); + return Err(NetError::InvalidMessage); + } + + // parent tenure start block ID must be the winning block hash for the ongoing tenure's + // snapshot + if local_tenure_sn.winning_stacks_block_hash.0 + != remote_tenure_tip.parent_tenure_start_block_id.0 + { + debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; + "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, + "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); + return Err(NetError::StaleView); + } + + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + // we've synchronized this tenure before, so don't get anymore blocks before it. + let highest_processed_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(highest_processed_block_id)? + .ok_or_else(|| { + debug!("No such Nakamoto block {}", &highest_processed_block_id); + NetError::DBError(DBError::NotFoundError) + })? + .0; + + let highest_processed_block_height = highest_processed_block.header.chain_length; + self.highest_processed_block_height = Some(highest_processed_block_height); + + if &remote_tenure_tip.tip_block_id == highest_processed_block_id + || highest_processed_block_height > remote_tenure_tip.tip_height + { + // nothing to do -- we're at or ahead of the remote peer, so finish up. + // If we don't have the tenure-start block for the confirmed tenure that the remote + // peer claims to have, then the remote peer has sent us invalid data and we should + // treat it as such. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or(NetError::InvalidMessage)? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::Done; + } + } + + if self.state == NakamotoUnconfirmedDownloadState::Done { + // only need to remember the tenure tip + self.tenure_tip = Some(remote_tenure_tip); + return Ok(()); + } + + // we're not finished + let tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) + .expect("FATAL: sortition from before system start"); + let parent_tenure_rc = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + parent_local_tenure_sn.block_height, + ) + .expect("FATAL: sortition from before system start"); + + // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions + let Some(Some(confirmed_reward_set)) = current_reward_sets + .get(&parent_tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for confirmed tenure {} (rc {})", + &parent_local_tenure_sn.consensus_hash, parent_tenure_rc + ); + return Err(NetError::InvalidState); + }; + + let Some(Some(unconfirmed_reward_set)) = current_reward_sets + .get(&tenure_rc) + .map(|cycle_info| cycle_info.reward_set()) + else { + warn!( + "No signer public keys for unconfirmed tenure {} (rc {})", + &local_tenure_sn.consensus_hash, tenure_rc + ); + return Err(NetError::InvalidState); + }; + + if chainstate + .nakamoto_blocks_db() + .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? + { + // proceed to get unconfirmed blocks. We already have the tenure-start block. + let unconfirmed_tenure_start_block = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? + .ok_or_else(|| { + debug!( + "No such tenure-start Nakamoto block {}", + &remote_tenure_tip.tenure_start_block_id + ); + NetError::DBError(DBError::NotFoundError) + })? + .0; + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + remote_tenure_tip.tip_block_id.clone(), + ); + } else { + // get the tenure-start block first + self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( + remote_tenure_tip.tenure_start_block_id.clone(), + ); + } + + debug!( + "Will validate unconfirmed blocks with reward sets in ({},{})", + parent_tenure_rc, tenure_rc + ); + self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); + self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); + self.tenure_tip = Some(remote_tenure_tip); + + Ok(()) + } + + /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. + /// Returns Ok(()) if the unconfirmed tenure start block was valid + /// Returns Err(..) if it was not valid, or if this function was called out of sequence. + pub fn try_accept_unconfirmed_tenure_start_block( + &mut self, + unconfirmed_tenure_start_block: NakamotoBlock, + ) -> Result<(), NetError> { + let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = + &self.state + else { + warn!("Invalid state for this method"; + "state" => %self.state); + return Err(NetError::InvalidState); + }; + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + // stacker signature has to match the current reward set + if let Err(e) = unconfirmed_tenure_start_block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid tenure-start block: bad signer signature"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // block has to match the expected hash + if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { + warn!("Invalid tenure-start block"; + "tenure_id_start_block" => %tenure_start_block_id, + "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + // furthermore, the block has to match the expected tenure ID + if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { + warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; + "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, + "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); + return Err(NetError::InvalidMessage); + } + + self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( + tenure_tip.tip_block_id.clone(), + ); + Ok(()) + } + + /// Add downloaded unconfirmed tenure blocks. + /// If we have collected all tenure blocks, then return them. + /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the + /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come + /// after the highest-processed block (if set). + /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on invalid state or invalid block. + pub fn try_accept_unconfirmed_tenure_blocks( + &mut self, + mut tenure_blocks: Vec, + ) -> Result>, NetError> { + let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = + &self.state + else { + return Err(NetError::InvalidState); + }; + + let Some(tenure_tip) = self.tenure_tip.as_ref() else { + warn!("tenure_tip is not set"); + return Err(NetError::InvalidState); + }; + + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + warn!("unconfirmed_signer_keys is not set"); + return Err(NetError::InvalidState); + }; + + if tenure_blocks.is_empty() { + // nothing to do + debug!("No tenure blocks obtained"); + return Ok(None); + } + + // blocks must be contiguous and in order from highest to lowest. + // If there's a tenure-start block, it must be last. + let mut expected_block_id = last_block_id; + let mut finished_download = false; + let mut last_block_index = None; + for (cnt, block) in tenure_blocks.iter().enumerate() { + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- not part of tenure"; + "expected_block_id" => %expected_block_id, + "block_id" => %block.header.block_id()); + return Err(NetError::InvalidMessage); + } + if let Err(e) = block + .header + .verify_signer_signatures(unconfirmed_signer_keys) + { + warn!("Invalid block: bad signer signature"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state, + "error" => %e); + return Err(NetError::InvalidMessage); + } + + // we may or may not need the tenure-start block for the unconfirmed tenure. But if we + // do, make sure it's valid, and it's the last block we receive. + let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { + warn!("Invalid tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + }; + if is_tenure_start { + // this is the tenure-start block, so make sure it matches our /v3/tenure/info + if block.header.block_id() != tenure_tip.tenure_start_block_id { + warn!("Unexpected tenure-start block"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); + return Err(NetError::InvalidMessage); + } + + if cnt.saturating_add(1) != tenure_blocks.len() { + warn!("Invalid tenure stream -- got tenure-start before end of tenure"; + "tenure_id" => %tenure_tip.consensus_hash, + "block.header.block_id" => %block.header.block_id(), + "cnt" => cnt, + "len" => tenure_blocks.len(), + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + finished_download = true; + last_block_index = Some(cnt); + break; + } + + debug!("Got unconfirmed tenure block {}", &block.header.block_id()); + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { + if expected_block_id == highest_processed_block_id { + // got all the blocks we asked for + debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + // NOTE: this field can get updated by the downloader while this state-machine is in + // this state. + if let Some(highest_processed_block_height) = + self.highest_processed_block_height.as_ref() + { + if &block.header.chain_length <= highest_processed_block_height { + // no need to continue this download + debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); + finished_download = true; + last_block_index = Some(cnt); + break; + } + } + + expected_block_id = &block.header.parent_block_id; + last_block_index = Some(cnt); + } + + // blocks after the last_block_index were not processed, so should be dropped + if let Some(last_block_index) = last_block_index { + tenure_blocks.truncate(last_block_index + 1); + } + + if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { + blocks.append(&mut tenure_blocks); + } else { + self.unconfirmed_tenure_blocks = Some(tenure_blocks); + } + + if finished_download { + // we have all of the unconfirmed tenure blocks that were requested. + // only return those newer than the highest block. + self.state = NakamotoUnconfirmedDownloadState::Done; + let highest_processed_block_height = + *self.highest_processed_block_height.as_ref().unwrap_or(&0); + + debug!("Finished receiving unconfirmed tenure"); + return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { + blocks + .into_iter() + .filter(|block| block.header.chain_length > highest_processed_block_height) + .rev() + .collect() + })); + } + + let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + + // still have more to get + let Some(earliest_block) = blocks.last() else { + // unreachable but be defensive + warn!("Invalid state: no blocks (infallible -- got empty vec)"); + return Err(NetError::InvalidState); + }; + let next_block_id = earliest_block.header.parent_block_id.clone(); + + debug!( + "Will resume fetching unconfirmed tenure blocks starting at {}", + &next_block_id + ); + self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); + Ok(None) + } + + /// Once this machine runs to completion, examine its state to see if we still need to fetch + /// the highest complete tenure. We may not need to, especially if we're just polling for new + /// unconfirmed blocks. + /// + /// Return Ok(true) if we need it still + /// Return Ok(false) if we already have it + /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. + pub fn need_highest_complete_tenure( + &self, + chainstate: &StacksChainState, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() + else { + return Err(NetError::InvalidState); + }; + + // if we've processed the unconfirmed tenure-start block already, then we've necessarily + // downloaded and processed the highest-complete tenure already. + Ok(!NakamotoChainState::has_block_header( + chainstate.db(), + &unconfirmed_tenure_start_block.header.block_id(), + false, + )?) + } + + /// Determine if we can produce a highest-complete tenure request. + /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure + pub fn can_make_highest_complete_tenure_downloader( + &self, + sortdb: &SortitionDB, + ) -> Result { + let Some(tenure_tip) = &self.tenure_tip else { + return Ok(false); + }; + + let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tenure_tip.parent_consensus_hash, + )? + else { + return Ok(false); + }; + + let Some(tip_sn) = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? + else { + return Ok(false); + }; + + let Some(parent_tenure) = + SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? + else { + return Ok(false); + }; + + let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? + else { + return Ok(false); + }; + + if parent_tenure.epoch_id < StacksEpochId::Epoch30 + || tip_tenure.epoch_id < StacksEpochId::Epoch30 + { + debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; + "start_tenure" => %tenure_tip.parent_consensus_hash, + "end_tenure" => %tenure_tip.consensus_hash, + "start_tenure_epoch" => %parent_tenure.epoch_id, + "end_tenure_epoch" => %tip_tenure.epoch_id + ); + return Ok(false); + } + + Ok(true) + } + + /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the + /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get + /// its tenure-start block. + /// + /// Returns Ok(downloader) on success + /// Returns Err(..) if we call this function out of sequence. + pub fn make_highest_complete_tenure_downloader( + &self, + ) -> Result { + if self.state != NakamotoUnconfirmedDownloadState::Done { + return Err(NetError::InvalidState); + } + let Some(tenure_tip) = &self.tenure_tip else { + return Err(NetError::InvalidState); + }; + let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { + return Err(NetError::InvalidState); + }; + + debug!( + "Create downloader for highest complete tenure {} known by {}", + &tenure_tip.parent_consensus_hash, &self.naddr, + ); + let ntd = NakamotoTenureDownloader::new( + tenure_tip.parent_consensus_hash.clone(), + tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.tenure_start_block_id.clone(), + self.naddr.clone(), + confirmed_signer_keys.clone(), + unconfirmed_signer_keys.clone(), + ); + + Ok(ntd) + } + + /// Produce the next HTTP request that, when successfully executed, will advance this state + /// machine. + /// + /// Returns Some(request) if a request must be sent. + /// Returns None if we're done + pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // need to get the tenure tip + return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_block( + peerhost, + block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { + return Some(StacksHttpRequest::new_get_nakamoto_tenure( + peerhost, + tip_block_id.clone(), + self.highest_processed_block_id.clone(), + )); + } + NakamotoUnconfirmedDownloadState::Done => { + // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed + // tenure downloader using the earliest unconfirmed tenure block. + return None; + } + } + } + + /// Begin the next download request for this state machine. + /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The + /// caller should try this again until it gets one of the other possible return values. It's + /// up to the caller to determine when it's appropriate to convert this state machine into a + /// `NakamotoTenureDownloader`. + /// Returns Err(..) if the neighbor is dead or broken. + pub fn send_next_download_request( + &self, + network: &mut PeerNetwork, + neighbor_rpc: &mut NeighborRPC, + ) -> Result<(), NetError> { + if neighbor_rpc.has_inflight(&self.naddr) { + debug!("Peer {} has an inflight request", &self.naddr); + return Ok(()); + } + if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { + return Err(NetError::PeerNotConnected); + } + + let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { + // no conversation open to this neighbor + neighbor_rpc.add_dead(network, &self.naddr); + return Err(NetError::PeerNotConnected); + }; + + let Some(request) = self.make_next_download_request(peerhost) else { + // treat this downloader as still in-flight since the overall state machine will need + // to keep it around long enough to convert it into a tenure downloader for the highest + // complete tenure. + return Ok(()); + }; + + neighbor_rpc.send_request(network, self.naddr.clone(), request)?; + Ok(()) + } + + /// Handle a received StacksHttpResponse and advance this machine's state + /// If we get the full tenure, return it. + /// + /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure + /// Returns Ok(None) if we're still working, in which case the caller should call + /// `send_next_download_request()` + /// Returns Err(..) on unrecoverable failure to advance state + pub fn handle_next_download_response( + &mut self, + response: StacksHttpResponse, + sortdb: &SortitionDB, + local_sort_tip: &BlockSnapshot, + chainstate: &StacksChainState, + current_reward_sets: &BTreeMap, + ) -> Result>, NetError> { + match &self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + debug!("Got tenure-info response"); + let remote_tenure_info = response.decode_nakamoto_tenure_info()?; + debug!("Got tenure-info response: {:?}", &remote_tenure_info); + self.try_accept_tenure_info( + sortdb, + local_sort_tip, + chainstate, + remote_tenure_info, + current_reward_sets, + )?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { + debug!("Got tenure start-block response"); + let block = response.decode_nakamoto_block()?; + self.try_accept_unconfirmed_tenure_start_block(block)?; + Ok(None) + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + debug!("Got unconfirmed tenure blocks response"); + let blocks = response.decode_nakamoto_tenure()?; + let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; + debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); + Ok(accepted_opt) + } + NakamotoUnconfirmedDownloadState::Done => { + return Err(NetError::InvalidState); + } + } + } + + /// Is this machine finished? + pub fn is_done(&self) -> bool { + self.state == NakamotoUnconfirmedDownloadState::Done + } +} From 70afa9fd654eb74a58ea549b5de653eadb6e98d8 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 17:35:02 +0300 Subject: [PATCH 0844/1400] update ignore timeout only on workflow dispatch by team member && update the job name --- .github/workflows/pr-differences-mutants.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index ca2bac5081..d53e2ca661 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -17,13 +17,13 @@ concurrency: cancel-in-progress: true jobs: - check-right-permissions: - name: Check Right Permissions + check-access-permissions: + name: Check Access Permissions runs-on: ubuntu-latest steps: - - name: Check Right Permissions To Trigger This - id: check_right_permissions + - name: Check Access Permissions To Trigger This + id: check_access_permissions uses: stacks-network/actions/team-membership@main with: username: ${{ github.actor }} @@ -31,12 +31,12 @@ jobs: GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} outputs: - ignore_timeout: ${{ steps.check_right_permissions.outputs.is_team_member == 'true' }} + ignore_timeout: ${{ steps.check_access_permissions.outputs.is_team_member == 'true' && github.event_name == 'workflow_dispatch' }} # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: name: Check Packages and Shards - needs: check-right-permissions + needs: check-access-permissions runs-on: ubuntu-latest @@ -54,7 +54,7 @@ jobs: - id: check_packages_and_shards uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main with: - ignore_timeout: ${{ needs.check-right-permissions.outputs.ignore_timeout }} + ignore_timeout: ${{ needs.check-access-permissions.outputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: From 238367c0504e6077bb5e9b3b5c66c382b7db95f9 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 17:39:48 +0300 Subject: [PATCH 0845/1400] modify files to trigger testing --- stackslib/src/net/download/nakamoto/mod.rs | 12 ++++++------ ...ownloader_copy.rs => tenure_downloader_copy_2.rs} | 0 ..._downloader_opy.rs => tenure_downloader_opy_2.rs} | 0 ...r_set_copy.rs => tenure_downloader_set_copy_2.rs} | 0 ...der_set_opy.rs => tenure_downloader_set_opy_2.rs} | 0 ...py.rs => tenure_downloader_unconfirmed_copy_2.rs} | 0 ...opy.rs => tenure_downloader_unconfirmed_opy_2.rs} | 0 7 files changed, 6 insertions(+), 6 deletions(-) rename stackslib/src/net/download/nakamoto/{tenure_downloader_copy.rs => tenure_downloader_copy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_opy.rs => tenure_downloader_opy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_set_copy.rs => tenure_downloader_set_copy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_set_opy.rs => tenure_downloader_set_opy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_unconfirmed_copy.rs => tenure_downloader_unconfirmed_copy_2.rs} (100%) rename stackslib/src/net/download/nakamoto/{tenure_downloader_unconfirmed_opy.rs => tenure_downloader_unconfirmed_opy_2.rs} (100%) diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index 7643c54ff7..d97eecafe2 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,14 +161,14 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; -mod tenure_downloader_copy; -mod tenure_downloader_opy; +mod tenure_downloader_copy_2; +mod tenure_downloader_opy_2; mod tenure_downloader_set; -mod tenure_downloader_set_copy; -mod tenure_downloader_set_opy; +mod tenure_downloader_set_copy_2; +mod tenure_downloader_set_opy_2; mod tenure_downloader_unconfirmed; -mod tenure_downloader_unconfirmed_copy; -mod tenure_downloader_unconfirmed_opy; +mod tenure_downloader_unconfirmed_copy_2; +mod tenure_downloader_unconfirmed_opy_2; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_copy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_opy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_set_copy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_set_opy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs similarity index 100% rename from stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy.rs rename to stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs From 3aed5cb499c90a3234b530062110bffd67d0d1e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 21 Aug 2024 10:52:24 -0400 Subject: [PATCH 0846/1400] fix: fix compile issues in tests --- stackslib/src/net/stackerdb/tests/sync.rs | 310 ++++++++++++---------- 1 file changed, 173 insertions(+), 137 deletions(-) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index d1ac5e58be..69bdad93d9 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -258,40 +258,48 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { test_reconnect(&mut peer_1.network); test_reconnect(&mut peer_2.network); - if let Ok(mut res) = res_1 { + if let Ok(res) = res_1 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } - if let Ok(mut res) = res_2 { + if let Ok(res) = res_2 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } let db1 = load_stackerdb(&peer_1, idx_1); @@ -379,7 +387,7 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { let res_1 = peer_1.step_with_ibd(false); let res_2 = peer_2.step_with_ibd(false); - if let Ok(mut res) = res_1 { + if let Ok(res) = res_1 { check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); @@ -387,23 +395,28 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { peer_1_stale = true; } } - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } - if let Ok(mut res) = res_2 { + if let Ok(res) = res_2 { check_sync_results(&res); for sync_res in res.stacker_db_sync_results.iter() { assert_eq!(sync_res.chunks_to_store.len(), 0); @@ -411,20 +424,24 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { peer_2_stale = true; } } - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } if peer_1_stale && peer_2_stale { @@ -455,40 +472,48 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { let res_1 = peer_1.step_with_ibd(false); let res_2 = peer_2.step_with_ibd(false); - if let Ok(mut res) = res_1 { + if let Ok(res) = res_1 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } - if let Ok(mut res) = res_2 { + if let Ok(res) = res_2 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } let db1 = load_stackerdb(&peer_1, idx_1); @@ -579,40 +604,48 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port let res_1 = peer_1.step_with_ibd(false); let res_2 = peer_2.step_with_ibd(false); - if let Ok(mut res) = res_1 { + if let Ok(res) = res_1 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } - if let Ok(mut res) = res_2 { + if let Ok(res) = res_2 { check_sync_results(&res); - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } let db1 = load_stackerdb(&peer_1, idx_1); @@ -719,24 +752,27 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, peers[i].network.stacker_db_configs = peer_db_configs[i].clone(); let res = peers[i].step_with_ibd(false); - if let Ok(mut res) = res { + if let Ok(res) = res { check_sync_results(&res); - let rc_consensus_hash = - peers[i].network.get_chain_view().rc_consensus_hash.clone(); - Relayer::process_stacker_db_chunks( - &mut peers[i].network.stackerdbs, - &peer_db_configs[i], - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peers[i].network.stackerdbs, - &peer_db_configs[i], - &mut res.unhandled_messages, - None, - ) - .unwrap(); + let rc_ch = peers[i].network.get_chain_view().rc_consensus_hash.clone(); + peers[i] + .relayer + .process_stacker_db_chunks( + &rc_ch, + &peer_db_configs[i], + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peers[i] + .relayer + .process_pushed_stacker_db_chunks( + &rc_ch, + &peer_db_configs[i], + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); } } From abf5544f2191e8d86ff3e659c26215295d00107d Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 21 Aug 2024 17:52:53 +0300 Subject: [PATCH 0847/1400] remove extra copied files --- stackslib/src/net/download/nakamoto/mod.rs | 6 - .../nakamoto/tenure_downloader_copy_2.rs | 693 -------------- .../nakamoto/tenure_downloader_opy_2.rs | 693 -------------- .../nakamoto/tenure_downloader_set_copy_2.rs | 660 ------------- .../nakamoto/tenure_downloader_set_opy_2.rs | 660 ------------- .../tenure_downloader_unconfirmed_copy_2.rs | 867 ------------------ .../tenure_downloader_unconfirmed_opy_2.rs | 867 ------------------ 7 files changed, 4446 deletions(-) delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs delete mode 100644 stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index d97eecafe2..dd440ac110 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -161,14 +161,8 @@ use crate::util_lib::db::{DBConn, Error as DBError}; mod download_state_machine; mod tenure; mod tenure_downloader; -mod tenure_downloader_copy_2; -mod tenure_downloader_opy_2; mod tenure_downloader_set; -mod tenure_downloader_set_copy_2; -mod tenure_downloader_set_opy_2; mod tenure_downloader_unconfirmed; -mod tenure_downloader_unconfirmed_copy_2; -mod tenure_downloader_unconfirmed_opy_2; pub use crate::net::download::nakamoto::download_state_machine::{ NakamotoDownloadState, NakamotoDownloadStateMachine, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs deleted file mode 100644 index f7fb970bb6..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_copy_2.rs +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the signer -/// public keys for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Signer public keys that signed the start-block of this tenure, in reward cycle order - pub start_signer_keys: RewardSet, - /// Signer public keys that signed the end-block of this tenure - pub end_signer_keys: RewardSet, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_signer_keys: RewardSet, - end_signer_keys: RewardSet, - ) -> Self { - debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_signer_keys, - end_signer_keys, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_start_block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_end_block - .header - .verify_signer_signatures(&self.end_signer_keys) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, &block_cursor, count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e - })?; - let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - Ok(blocks_opt) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - }; - self.idle = true; - handle_result - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs deleted file mode 100644 index f7fb970bb6..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_opy_2.rs +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the signer -/// public keys for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Signer public keys that signed the start-block of this tenure, in reward cycle order - pub start_signer_keys: RewardSet, - /// Signer public keys that signed the end-block of this tenure - pub end_signer_keys: RewardSet, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_signer_keys: RewardSet, - end_signer_keys: RewardSet, - ) -> Self { - debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_signer_keys, - end_signer_keys, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_start_block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = tenure_end_block - .header - .verify_signer_signatures(&self.end_signer_keys) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if let Err(e) = block - .header - .verify_signer_signatures(&self.start_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, &block_cursor, count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e - })?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e - })?; - let blocks_opt = self.try_accept_tenure_blocks(blocks)?; - Ok(blocks_opt) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - }; - self.idle = true; - handle_result - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs deleted file mode 100644 index 28a40e7eb5..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set_copy_2.rs +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. - pub fn is_empty(&self) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - continue; - } - debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return false; - } - true - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - debug!("Try resume {}", &naddr); - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - current_reward_cycles: &BTreeMap, - ) { - debug!("make_tenure_downloaders"; - "schedule" => ?schedule, - "available" => ?available, - "tenure_block_ids" => ?tenure_block_ids, - "inflight" => %self.inflight(), - "count" => count, - "running" => self.num_downloaders(), - "scheduled" => self.num_scheduled_downloaders()); - - self.clear_finished_downloaders(); - self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_reward_set)) = current_reward_cycles - .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", - tenure_info.start_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_reward_set)) = current_reward_cycles - .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", - tenure_info.end_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_reward_set.clone(), - end_reward_set.clone(), - ); - - debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader - .handle_next_download_response(response) - .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); - e - }) - else { - debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs deleted file mode 100644 index 28a40e7eb5..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set_opy_2.rs +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::{PoxAnchorBlockStatus, RewardCycleInfo}; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. - pub fn is_empty(&self) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - continue; - } - debug!("TenureDownloadSet::is_empty(): have downloader for tenure {:?} assigned to {} in state {}", &downloader.tenure_id_consensus_hash, &downloader.naddr, &downloader.state); - return false; - } - true - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - debug!("Try resume {}", &naddr); - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - current_reward_cycles: &BTreeMap, - ) { - debug!("make_tenure_downloaders"; - "schedule" => ?schedule, - "available" => ?available, - "tenure_block_ids" => ?tenure_block_ids, - "inflight" => %self.inflight(), - "count" => count, - "running" => self.num_downloaders(), - "scheduled" => self.num_scheduled_downloaders()); - - self.clear_finished_downloaders(); - self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_reward_set)) = current_reward_cycles - .get(&tenure_info.start_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", - tenure_info.start_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_reward_set)) = current_reward_cycles - .get(&tenure_info.end_reward_cycle) - .map(|cycle_info| cycle_info.reward_set()) - else { - debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", - tenure_info.end_reward_cycle, - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_reward_set.clone(), - end_reward_set.clone(), - ); - - debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); - continue; - }; - debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader - .handle_next_download_response(response) - .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); - e - }) - else { - debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs deleted file mode 100644 index c96f718d2b..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_copy_2.rs +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::RewardCycleInfo; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the block ID of the next block to fetch. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// reward set of the highest confirmed tenure - pub confirmed_signer_keys: Option, - /// reward set of the unconfirmed (ongoing) tenure - pub unconfirmed_signer_keys: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_signer_keys: None, - unconfirmed_signer_keys: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - debug!("Got tenure info {:?}", remote_tenure_tip); - debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for tenure {}", - &remote_tenure_tip.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for parent tenure {}", - &remote_tenure_tip.parent_consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No parent tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError.into()) - })?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::StaleView); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronized this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or_else(|| { - debug!("No such Nakamoto block {}", &highest_processed_block_id); - NetError::DBError(DBError::NotFoundError) - })? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_reward_set)) = current_reward_sets - .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_reward_set)) = current_reward_sets - .get(&tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for unconfirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or_else(|| { - debug!( - "No such tenure-start Nakamoto block {}", - &remote_tenure_tip.tenure_start_block_id - ); - NetError::DBError(DBError::NotFoundError) - })? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - debug!( - "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, tenure_rc - ); - self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); - self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current reward set - if let Err(e) = unconfirmed_tenure_start_block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - debug!("No tenure blocks obtained"); - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - let mut last_block_index = None; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if let Err(e) = block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - last_block_index = Some(cnt); - break; - } - - debug!("Got unconfirmed tenure block {}", &block.header.block_id()); - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length <= highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - expected_block_id = &block.header.parent_block_id; - last_block_index = Some(cnt); - } - - // blocks after the last_block_index were not processed, so should be dropped - if let Some(last_block_index) = last_block_index { - tenure_blocks.truncate(last_block_index + 1); - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - - debug!("Finished receiving unconfirmed tenure"); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - debug!( - "Will resume fetching unconfirmed tenure blocks starting at {}", - &next_block_id - ); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Determine if we can produce a highest-complete tenure request. - /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure - pub fn can_make_highest_complete_tenure_downloader( - &self, - sortdb: &SortitionDB, - ) -> Result { - let Some(tenure_tip) = &self.tenure_tip else { - return Ok(false); - }; - - let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_tip.parent_consensus_hash, - )? - else { - return Ok(false); - }; - - let Some(tip_sn) = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? - else { - return Ok(false); - }; - - let Some(parent_tenure) = - SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? - else { - return Ok(false); - }; - - let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? - else { - return Ok(false); - }; - - if parent_tenure.epoch_id < StacksEpochId::Epoch30 - || tip_tenure.epoch_id < StacksEpochId::Epoch30 - { - debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; - "start_tenure" => %tenure_tip.parent_consensus_hash, - "end_tenure" => %tenure_tip.consensus_hash, - "start_tenure_epoch" => %parent_tenure.epoch_id, - "end_tenure_epoch" => %tip_tenure.epoch_id - ); - return Ok(false); - } - - Ok(true) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(tenure_tip) = &self.tenure_tip else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - debug!( - "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, &self.naddr, - ); - let ntd = NakamotoTenureDownloader::new( - tenure_tip.parent_consensus_hash.clone(), - tenure_tip.parent_tenure_start_block_id.clone(), - tenure_tip.tenure_start_block_id.clone(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - current_reward_sets, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); - Ok(accepted_opt) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs deleted file mode 100644 index c96f718d2b..0000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed_opy_2.rs +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::coordinator::RewardCycleInfo; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::boot::RewardSet; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::{CurrentRewardSet, PeerNetwork}; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the block ID of the next block to fetch. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// reward set of the highest confirmed tenure - pub confirmed_signer_keys: Option, - /// reward set of the unconfirmed (ongoing) tenure - pub unconfirmed_signer_keys: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_signer_keys: None, - unconfirmed_signer_keys: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - current_reward_sets: &BTreeMap, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - debug!("Got tenure info {:?}", remote_tenure_tip); - debug!("Local sortition tip is {}", &local_sort_tip.consensus_hash); - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for tenure {}", - &remote_tenure_tip.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or_else(|| { - debug!( - "No snapshot for parent tenure {}", - &remote_tenure_tip.parent_consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError) - })?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or_else(|| { - debug!( - "No parent tenure snapshot at burn block height {} off of sortition {} ({})", - local_tenure_sn.block_height, - &local_tenure_sn.sortition_id, - &local_tenure_sn.consensus_hash - ); - NetError::DBError(DBError::NotFoundError.into()) - })?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - debug!("Ongoing tenure does not commit to highest complete tenure's start block. Treating remote peer {} as stale.", &self.naddr; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.parent_tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::StaleView); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronized this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or_else(|| { - debug!("No such Nakamoto block {}", &highest_processed_block_id); - NetError::DBError(DBError::NotFoundError) - })? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get reward set info for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_reward_set)) = current_reward_sets - .get(&parent_tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_reward_set)) = current_reward_sets - .get(&tenure_rc) - .map(|cycle_info| cycle_info.reward_set()) - else { - warn!( - "No signer public keys for unconfirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block_with_index_hash(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or_else(|| { - debug!( - "No such tenure-start Nakamoto block {}", - &remote_tenure_tip.tenure_start_block_id - ); - NetError::DBError(DBError::NotFoundError) - })? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - debug!( - "Will validate unconfirmed blocks with reward sets in ({},{})", - parent_tenure_rc, tenure_rc - ); - self.confirmed_signer_keys = Some(confirmed_reward_set.clone()); - self.unconfirmed_signer_keys = Some(unconfirmed_reward_set.clone()); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current reward set - if let Err(e) = unconfirmed_tenure_start_block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - warn!("tenure_tip is not set"); - return Err(NetError::InvalidState); - }; - - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - warn!("unconfirmed_signer_keys is not set"); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - debug!("No tenure blocks obtained"); - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - let mut last_block_index = None; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if let Err(e) = block - .header - .verify_signer_signatures(unconfirmed_signer_keys) - { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state, - "error" => %e); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - last_block_index = Some(cnt); - break; - } - - debug!("Got unconfirmed tenure block {}", &block.header.block_id()); - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - debug!("Cancelling unconfirmed tenure download to {}: have processed block up to block {} already", &self.naddr, highest_processed_block_id); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length <= highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - last_block_index = Some(cnt); - break; - } - } - - expected_block_id = &block.header.parent_block_id; - last_block_index = Some(cnt); - } - - // blocks after the last_block_index were not processed, so should be dropped - if let Some(last_block_index) = last_block_index { - tenure_blocks.truncate(last_block_index + 1); - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - - debug!("Finished receiving unconfirmed tenure"); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - debug!( - "Will resume fetching unconfirmed tenure blocks starting at {}", - &next_block_id - ); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Determine if we can produce a highest-complete tenure request. - /// This can be false if the tenure tip isn't present, or it doesn't point to a Nakamoto tenure - pub fn can_make_highest_complete_tenure_downloader( - &self, - sortdb: &SortitionDB, - ) -> Result { - let Some(tenure_tip) = &self.tenure_tip else { - return Ok(false); - }; - - let Some(parent_sn) = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_tip.parent_consensus_hash, - )? - else { - return Ok(false); - }; - - let Some(tip_sn) = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_tip.consensus_hash)? - else { - return Ok(false); - }; - - let Some(parent_tenure) = - SortitionDB::get_stacks_epoch(sortdb.conn(), parent_sn.block_height)? - else { - return Ok(false); - }; - - let Some(tip_tenure) = SortitionDB::get_stacks_epoch(sortdb.conn(), tip_sn.block_height)? - else { - return Ok(false); - }; - - if parent_tenure.epoch_id < StacksEpochId::Epoch30 - || tip_tenure.epoch_id < StacksEpochId::Epoch30 - { - debug!("Cannot make highest complete tenure: start and/or end block is not a Nakamoto block"; - "start_tenure" => %tenure_tip.parent_consensus_hash, - "end_tenure" => %tenure_tip.consensus_hash, - "start_tenure_epoch" => %parent_tenure.epoch_id, - "end_tenure_epoch" => %tip_tenure.epoch_id - ); - return Ok(false); - } - - Ok(true) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(tenure_tip) = &self.tenure_tip else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_signer_keys) = self.confirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_signer_keys) = self.unconfirmed_signer_keys.as_ref() else { - return Err(NetError::InvalidState); - }; - - debug!( - "Create downloader for highest complete tenure {} known by {}", - &tenure_tip.parent_consensus_hash, &self.naddr, - ); - let ntd = NakamotoTenureDownloader::new( - tenure_tip.parent_consensus_hash.clone(), - tenure_tip.parent_tenure_start_block_id.clone(), - tenure_tip.tenure_start_block_id.clone(), - self.naddr.clone(), - confirmed_signer_keys.clone(), - unconfirmed_signer_keys.clone(), - ); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - current_reward_sets: &BTreeMap, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - current_reward_sets, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - let accepted_opt = self.try_accept_unconfirmed_tenure_blocks(blocks)?; - debug!("Got unconfirmed tenure blocks"; "complete" => accepted_opt.is_some()); - Ok(accepted_opt) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} From 387dbc349dc056b988c711d38dbb9ceb76866689 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 21 Aug 2024 11:02:44 -0400 Subject: [PATCH 0848/1400] fix: replace async code in event dispatcher The reason behind this is that with the async code we had, we were unable to successfully implement a timeout, so when there was a network glitch, it would get stuck at `TcpStream::connect`, and crash after 60 minutes. This new implementation ensures that all parts of the event dispatcher networking code are able to fail with a timeout. --- testnet/stacks-node/src/event_dispatcher.rs | 357 +++++++++++++++++--- 1 file changed, 308 insertions(+), 49 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index ad0b70a2f1..fcfc084969 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,20 +16,18 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::io::{Read, Write}; +use std::net::{TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Mutex; use std::thread::sleep; -use std::time::Duration; +use std::time::{Duration, Instant}; -use async_h1::client; -use async_std::future::timeout; -use async_std::net::TcpStream; -use async_std::task; use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; -use http_types::{Method, Request, Url}; +use http_types::Url; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; @@ -313,12 +311,112 @@ impl RewardSetEventPayload { } } +fn send_request( + host: &str, + port: u16, + body: &[u8], + url: &Url, + timeout: Duration, +) -> Result { + let addr = format!("{}:{}", host, port) + .to_socket_addrs()? + .next() + .ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::NotFound, "No valid address found") + })?; + let mut stream = TcpStream::connect_timeout(&addr, timeout)?; + stream.set_read_timeout(Some(timeout))?; + stream.set_write_timeout(Some(timeout))?; + + let request = format!( + "POST {} HTTP/1.1\r\n\ + Host: {}\r\n\ + Content-Type: application/json\r\n\ + Content-Length: {}\r\n\ + Connection: close\r\n\ + \r\n", + url.path(), + host, + body.len(), + ); + debug!("Event dispatcher: Sending request"; "request" => &request); + + stream.write_all(request.as_bytes())?; + stream.write_all(body)?; + stream.flush()?; + debug!("Event dispatcher: Request sent"); + + let mut response = Vec::new(); + let mut buffer = [0; 512]; + let mut headers_parsed = false; + let mut content_length = None; + let mut total_read = 0; + + let start_time = Instant::now(); + + while total_read < content_length.unwrap_or(usize::MAX) { + if start_time.elapsed() >= timeout { + return Err(std::io::Error::new( + std::io::ErrorKind::TimedOut, + "Response reading timed out", + )); + } + + let bytes_read = stream.read(&mut buffer)?; + if bytes_read == 0 { + // Connection closed + break; + } + + response.extend_from_slice(&buffer[..bytes_read]); + + // Parse headers if not already done + if !headers_parsed { + if let Some(headers_end) = response.windows(4).position(|window| window == b"\r\n\r\n") + { + headers_parsed = true; + // Parse Content-Length header + let headers = &response[..headers_end]; + let headers_str = String::from_utf8_lossy(headers); + if let Some(content_length_line) = headers_str + .lines() + .find(|line| line.to_lowercase().starts_with("content-length:")) + { + let length_str = content_length_line + .split(":") + .nth(1) + // This is safe because we already know the line starts with "Content-Length:" + .expect("unreachable"); + match length_str.trim().parse::() { + Ok(len) => content_length = Some(len), + Err(_) => { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid Content-Length header", + )) + } + } + } + total_read = response[headers_end + 4..].len(); + } + } else { + total_read += bytes_read; + } + } + + let response_str = String::from_utf8_lossy(&response).to_string(); + debug!("Event dispatcher: Response received"; "response" => &response_str); + + Ok(response_str) +} + impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { debug!( "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload ); - let body = match serde_json::to_vec(&payload) { + + let body = match serde_json::to_vec(payload) { Ok(body) => body, Err(err) => { error!("Event dispatcher: serialization failed - {:?}", err); @@ -327,57 +425,37 @@ impl EventObserver { }; let url = { - let joined_components = match path.starts_with('/') { - true => format!("{}{}", &self.endpoint, path), - false => format!("{}/{}", &self.endpoint, path), + let joined_components = if path.starts_with('/') { + format!("{}{}", &self.endpoint, path) + } else { + format!("{}/{}", &self.endpoint, path) }; let url = format!("http://{}", joined_components); Url::parse(&url) .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", url)) }; - let backoff = Duration::from_millis((1.0 * 1_000.0) as u64); - let connection_timeout = Duration::from_secs(5); + let host = url.host_str().expect("Invalid URL: missing host"); + let port = url.port_or_known_default().unwrap_or(80); - loop { - let body = body.clone(); - let mut req = Request::new(Method::Post, url.clone()); - req.append_header("Content-Type", "application/json"); - req.set_body(body); - - let response = task::block_on(async { - let stream = - match timeout(connection_timeout, TcpStream::connect(&self.endpoint)).await { - Ok(Ok(stream)) => stream, - Ok(Err(err)) => { - warn!("Event dispatcher: connection failed - {:?}", err); - return None; - } - Err(_) => { - error!("Event dispatcher: connection attempt timed out"); - return None; - } - }; + let backoff = Duration::from_millis(1000); // 1 second - match client::connect(stream, req).await { - Ok(response) => Some(response), - Err(err) => { - warn!("Event dispatcher: rpc invocation failed - {:?}", err); - None + loop { + match send_request(host, port, &body, &url, backoff) { + Ok(response) => { + if response.starts_with("HTTP/1.1 200") { + debug!( + "Event dispatcher: Successful POST"; "url" => %url + ); + break; + } else { + error!( + "Event dispatcher: Failed POST"; "url" => %url, "response" => ?response + ); } } - }); - - if let Some(response) = response { - if response.status().is_success() { - debug!( - "Event dispatcher: Successful POST"; "url" => %url - ); - break; - } else { - error!( - "Event dispatcher: Failed POST"; "url" => %url, "err" => ?response - ); + Err(err) => { + warn!("Event dispatcher: connection or request failed - {:?}", err); } } sleep(backoff); @@ -1483,6 +1561,10 @@ impl EventDispatcher { #[cfg(test)] mod test { + use std::net::TcpListener; + use std::thread; + use std::time::Instant; + use clarity::vm::costs::ExecutionCost; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; @@ -1494,7 +1576,7 @@ mod test { use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; - use crate::event_dispatcher::EventObserver; + use super::*; #[test] fn build_block_processed_event() { @@ -1615,4 +1697,181 @@ mod test { .expect("Unable to deserialize array of MessageSignature"); assert_eq!(event_signer_signature, signer_signature); } + + #[test] + fn test_send_request_connect_timeout() { + let timeout_duration = Duration::from_secs(3); + + // Start measuring time + let start_time = Instant::now(); + + // Attempt to send a request with a timeout + let result = send_request( + "10.255.255.1", // Non-routable IP for timeout + 80, // HTTP port + b"{}", // Example empty JSON body + &Url::parse("http://10.255.255.1/").expect("Failed to parse URL"), + timeout_duration, + ); + + // Measure the elapsed time + let elapsed_time = start_time.elapsed(); + + // Assert that the connection attempt timed out + assert!( + result.is_err(), + "Expected a timeout error, but got {:?}", + result + ); + assert_eq!( + result.unwrap_err().kind(), + std::io::ErrorKind::TimedOut, + "Expected a TimedOut error" + ); + + // Assert that the elapsed time is within an acceptable range + assert!( + elapsed_time >= timeout_duration, + "Timeout occurred too quickly" + ); + assert!( + elapsed_time < timeout_duration + Duration::from_secs(1), + "Timeout took too long" + ); + } + + #[test] + fn test_send_request_timeout() { + // Set up a TcpListener that accepts a connection but delays response + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind test listener"); + let addr = listener.local_addr().unwrap(); + + // Spawn a thread that will accept the connection and do nothing, simulating a long delay + thread::spawn(move || { + let (stream, _addr) = listener.accept().unwrap(); + // Hold the connection open to simulate a delay + thread::sleep(Duration::from_secs(10)); + drop(stream); // Close the stream + }); + + // Set a timeout shorter than the sleep duration to force a timeout + let connection_timeout = Duration::from_secs(2); + + // Attempt to connect, expecting a timeout error + let result = send_request( + "127.0.0.1", + addr.port(), + b"{}", + &Url::parse("http://127.0.0.1/").unwrap(), + connection_timeout, + ); + + // Assert that the result is an error, specifically a timeout + assert!( + result.is_err(), + "Expected a timeout error, got: {:?}", + result + ); + + if let Err(err) = result { + assert_eq!( + err.kind(), + std::io::ErrorKind::WouldBlock, + "Expected TimedOut error, got: {:?}", + err + ); + } + } + + fn start_mock_server(response: &str, client_done_signal: Receiver<()>) -> String { + // Bind to an available port on localhost + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); + let addr = listener.local_addr().unwrap(); + + debug!("Mock server listening on {}", addr); + + // Start the server in a new thread + let response = response.to_string(); + thread::spawn(move || { + for stream in listener.incoming() { + debug!("Mock server accepted connection"); + let mut stream = stream.expect("Failed to accept connection"); + + // Read the client's request (even if we don't do anything with it) + let mut buffer = [0; 512]; + let _ = stream.read(&mut buffer); + debug!("Mock server received request"); + + // Simulate a basic HTTP response + stream + .write_all(response.as_bytes()) + .expect("Failed to write response"); + stream.flush().expect("Failed to flush stream"); + debug!("Mock server sent response"); + + // Wait for the client to signal that it's done reading + client_done_signal + .recv() + .expect("Failed to receive client done signal"); + + debug!("Mock server closing connection"); + + // Explicitly drop the stream after signaling to ensure the client finishes + drop(stream); + break; // Close after the first request + } + }); + + // Return the address of the mock server + format!("{}:{}", addr.ip(), addr.port()) + } + + fn parse_http_response(response: &str) -> &str { + let parts: Vec<&str> = response.split("\r\n\r\n").collect(); + if parts.len() == 2 { + parts[1] // The body is after the second \r\n\r\n + } else { + "" + } + } + + #[test] + fn test_send_request_success() { + // Prepare the mock server to return a successful HTTP response + let mock_response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\n\r\nHello, world!"; + + // Create a channel to signal when the client is done reading + let (tx_client_done, rx_client_done) = channel(); + let server_addr = start_mock_server(mock_response, rx_client_done); + let timeout_duration = Duration::from_secs(5); + + // Attempt to send a request to the mock server + let result = send_request( + &server_addr.split(':').collect::>()[0], // Host part + server_addr.split(':').collect::>()[1] + .parse() + .unwrap(), // Port part + b"{}", // Example JSON body + &Url::parse(&format!("http://{}/", server_addr)).expect("Failed to parse URL"), + timeout_duration, + ); + debug!("Got result: {:?}", result); + + // Ensure the server only closes after the client has finished processing + if let Ok(response) = &result { + let body = parse_http_response(response); + assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); + } + + tx_client_done + .send(()) + .expect("Failed to send close signal"); + + // Assert that the connection was successful + assert!( + result.is_ok(), + "Expected a successful request, but got {:?}", + result + ); + } } From 175009bc6c11ec99d767a66e9b83cd4d619aaa11 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 21 Aug 2024 11:33:17 -0400 Subject: [PATCH 0849/1400] fix: Make config opts in `stop_bitcoind()` match those in `start_bitcoind()` --- .../stacks-node/src/tests/bitcoin_regtest.rs | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 3fbfa51986..d829b76a83 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -58,12 +58,9 @@ impl BitcoinCoreController { .arg("-server=1") .arg("-listenonion=0") .arg("-rpcbind=127.0.0.1") - .arg(&format!("-port={}", self.config.burnchain.peer_port)) - .arg(&format!( - "-datadir={}", - self.config.get_burnchain_path_str() - )) - .arg(&format!("-rpcport={}", self.config.burnchain.rpc_port)); + .arg(format!("-port={}", self.config.burnchain.peer_port)) + .arg(format!("-datadir={}", self.config.get_burnchain_path_str())) + .arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); match ( &self.config.burnchain.username, @@ -71,8 +68,8 @@ impl BitcoinCoreController { ) { (Some(username), Some(password)) => { command - .arg(&format!("-rpcuser={}", username)) - .arg(&format!("-rpcpassword={}", password)); + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); } _ => {} } @@ -81,7 +78,7 @@ impl BitcoinCoreController { let mut process = match command.spawn() { Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{:?}", e))), + Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), }; let mut out_reader = BufReader::new(process.stdout.take().unwrap()); @@ -111,14 +108,25 @@ impl BitcoinCoreController { command .stdout(Stdio::piped()) .arg("-rpcconnect=127.0.0.1") - .arg("-rpcport=8332") - .arg("-rpcuser=neon-tester") - .arg("-rpcpassword=neon-tester-pass") - .arg("stop"); + .arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); + + match ( + &self.config.burnchain.username, + &self.config.burnchain.password, + ) { + (Some(username), Some(password)) => { + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); + } + _ => {} + } + + command.arg("stop"); let mut process = match command.spawn() { Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{:?}", e))), + Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), }; let mut out_reader = BufReader::new(process.stdout.take().unwrap()); @@ -127,7 +135,7 @@ impl BitcoinCoreController { if bytes_read == 0 { break; } - eprintln!("{}", &line); + eprintln!("{line}"); } } Ok(()) From e42d6abddd4cef3c395f32fe5eb030e50c653735 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 21 Aug 2024 12:06:23 -0400 Subject: [PATCH 0850/1400] test: add `send_payload` integration tests --- Cargo.lock | 1 + testnet/stacks-node/Cargo.toml | 1 + testnet/stacks-node/src/event_dispatcher.rs | 92 +++++++++++++++++++++ 3 files changed, 94 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 357a9def70..1e7e6b6b42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3421,6 +3421,7 @@ dependencies = [ "stackslib", "stx-genesis", "tikv-jemallocator", + "tiny_http", "tokio", "toml 0.5.11", "tracing", diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index aa72f814db..8708aea2ce 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -49,6 +49,7 @@ tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" +tiny_http = "0.12.0" [[bin]] name = "stacks-node" diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index fcfc084969..00920db88d 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1575,6 +1575,7 @@ mod test { use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; + use tiny_http::{Method, Response, Server, StatusCode}; use super::*; @@ -1874,4 +1875,95 @@ mod test { result ); } + + fn get_random_port() -> u16 { + // Bind to a random port by specifying port 0, then retrieve the port assigned by the OS + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind to a random port"); + listener.local_addr().unwrap().port() + } + + #[test] + fn test_send_payload_success() { + let port = get_random_port(); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let request = server.recv().unwrap(); + assert_eq!(request.url(), "/test"); + assert_eq!(request.method(), &Method::Post); + + // Simulate a successful response + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // Notify the test that the request was processed + tx.send(()).unwrap(); + }); + + let observer = EventObserver { + endpoint: format!("127.0.0.1:{}", port), + }; + + let payload = json!({"key": "value"}); + + observer.send_payload(&payload, "/test"); + + // Wait for the server to process the request + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } + + #[test] + fn test_send_payload_retry() { + let port = get_random_port(); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let mut attempt = 0; + while let Ok(request) = server.recv() { + attempt += 1; + if attempt == 1 { + debug!("Mock server received request attempt 1"); + // Simulate a failure on the first attempt + let response = Response::new( + StatusCode(500), + vec![], + "Internal Server Error".as_bytes(), + Some(21), + None, + ); + request.respond(response).unwrap(); + } else { + debug!("Mock server received request attempt 2"); + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // Notify the test that the request was processed successfully + tx.send(()).unwrap(); + break; + } + } + }); + + let observer = EventObserver { + endpoint: format!("127.0.0.1:{}", port), + }; + + let payload = json!({"key": "value"}); + + observer.send_payload(&payload, "/test"); + + // Wait for the server to process the request + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } } From b480038810a356e60f66102f9acb7e0c6a8a98a3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 21 Aug 2024 12:06:47 -0400 Subject: [PATCH 0851/1400] chore: fix warning --- testnet/stacks-node/src/nakamoto_node/peer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index facb1dd835..004023ea26 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -16,8 +16,8 @@ use std::collections::VecDeque; use std::net::SocketAddr; use std::sync::mpsc::TrySendError; +use std::thread; use std::time::Duration; -use std::{cmp, thread}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::PoxConstants; From 7bc0b8be8241fbd9d286a5ed99244a3a922935b7 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 21 Aug 2024 11:52:30 -0400 Subject: [PATCH 0852/1400] refactor: Move some shared code out into separate function --- .../stacks-node/src/tests/bitcoin_regtest.rs | 52 ++++++++----------- 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index d829b76a83..6619152f9f 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -44,6 +44,22 @@ impl BitcoinCoreController { } } + fn add_rpc_cli_args(&self, command: &mut Command) { + command.arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); + + match ( + &self.config.burnchain.username, + &self.config.burnchain.password, + ) { + (Some(username), Some(password)) => { + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); + } + _ => {} + } + } + pub fn start_bitcoind(&mut self) -> BitcoinResult<()> { std::fs::create_dir_all(&self.config.get_burnchain_path_str()).unwrap(); @@ -59,22 +75,11 @@ impl BitcoinCoreController { .arg("-listenonion=0") .arg("-rpcbind=127.0.0.1") .arg(format!("-port={}", self.config.burnchain.peer_port)) - .arg(format!("-datadir={}", self.config.get_burnchain_path_str())) - .arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); + .arg(format!("-datadir={}", self.config.get_burnchain_path_str())); - match ( - &self.config.burnchain.username, - &self.config.burnchain.password, - ) { - (Some(username), Some(password)) => { - command - .arg(format!("-rpcuser={username}")) - .arg(format!("-rpcpassword={password}")); - } - _ => {} - } + self.add_rpc_cli_args(&mut command); - eprintln!("bitcoind spawn: {:?}", command); + eprintln!("bitcoind spawn: {command:?}"); let mut process = match command.spawn() { Ok(child) => child, @@ -105,22 +110,9 @@ impl BitcoinCoreController { pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { if let Some(_) = self.bitcoind_process.take() { let mut command = Command::new("bitcoin-cli"); - command - .stdout(Stdio::piped()) - .arg("-rpcconnect=127.0.0.1") - .arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); - - match ( - &self.config.burnchain.username, - &self.config.burnchain.password, - ) { - (Some(username), Some(password)) => { - command - .arg(format!("-rpcuser={username}")) - .arg(format!("-rpcpassword={password}")); - } - _ => {} - } + command.stdout(Stdio::piped()).arg("-rpcconnect=127.0.0.1"); + + self.add_rpc_cli_args(&mut command); command.arg("stop"); From e4f59e59a2253a31be7ea909eca32595b93dfcf9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 21 Aug 2024 12:55:49 -0400 Subject: [PATCH 0853/1400] CRC: rather than wait, just do a retry for mining Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 65 +++++++++++-------- testnet/stacks-node/src/tests/signer/v0.rs | 34 ++++------ 2 files changed, 50 insertions(+), 49 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index dc57ca16de..cd811a9346 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,8 +45,8 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; -use stacks::util::{get_epoch_time_secs, sleep_ms}; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -318,10 +318,17 @@ impl BlockMinerThread { } } } - self.wait_min_time_between_blocks()?; match self.mine_block(&stackerdbs) { - Ok(x) => break Some(x), + Ok(x) => { + if !self.validate_timestamp(&x)? { + info!("Block mined too quickly. Will try again."; + "block_timestamp" => x.header.timestamp, + ); + continue; + } + break Some(x); + } Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { info!("Miner interrupted while mining, will try again"); // sleep, and try again. if the miner was interrupted because the burnchain @@ -1040,34 +1047,40 @@ impl BlockMinerThread { Some(vrf_proof) } - /// Wait the minimum time between blocks before mining a new block (if necessary) + /// Check that the provided block is not mined too quickly after the parent block. /// This is to ensure that the signers do not reject the block due to the block being mined within the same second as the parent block. - fn wait_min_time_between_blocks(&self) -> Result<(), NakamotoNodeError> { - let burn_db_path = self.config.get_burn_db_file_path(); - let mut burn_db = - SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) - .expect("FATAL: could not open sortition DB"); - - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + fn validate_timestamp(&self, x: &NakamotoBlock) -> Result { + let chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); - let parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; - let time_since_parent_ms = get_epoch_time_secs() - .saturating_sub(parent_block_info.stacks_parent_header.burn_header_timestamp) - / 1000; + let stacks_parent_header = + NakamotoChainState::get_block_header(chain_state.db(), &x.header.parent_block_id) + .map_err(|e| { + error!( + "Could not query header info for parent block ID {}: {:?}", + &x.header.parent_block_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for parent block ID {}", + &x.header.parent_block_id + ); + NakamotoNodeError::ParentNotFound + })?; + let current_timestamp = get_epoch_time_secs(); + let time_since_parent_ms = + current_timestamp.saturating_sub(stacks_parent_header.burn_header_timestamp) * 1000; if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { - let wait_ms = self - .config - .miner - .min_time_between_blocks_ms - .saturating_sub(time_since_parent_ms); - info!("Parent block mined {} ms ago, waiting {} ms before mining a new block", time_since_parent_ms, wait_ms; - "parent_block_id" => %parent_block_info.stacks_parent_header.index_block_hash(), - "parent_block_height" => parent_block_info.stacks_parent_header.stacks_block_height, - "parent_block_timestamp" => parent_block_info.stacks_parent_header.burn_header_timestamp, + debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; + "current_timestamp" => current_timestamp, + "parent_block_id" => %stacks_parent_header.index_block_hash(), + "parent_block_height" => stacks_parent_header.stacks_block_height, + "parent_block_timestamp" => stacks_parent_header.burn_header_timestamp, ); - sleep_ms(wait_ms); + return Ok(false); } - Ok(()) + Ok(true) } // TODO: add tests from mutation testing results #4869 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 049dcad379..1f637b6b8d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2890,11 +2890,6 @@ fn min_gap_between_blocks() { .nakamoto_blocks_proposed .load(Ordering::SeqCst); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); // submit a tx so that the miner will mine a block @@ -2904,19 +2899,6 @@ fn min_gap_between_blocks() { submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal. Ensure it does not arrive before the gap is exceeded"); - let start_time = Instant::now(); - while start_time.elapsed().as_millis() < (time_between_blocks_ms - 1000).into() { - let blocks_proposed = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - assert_eq!( - blocks_proposed, proposals_before, - "Block proposed before gap was exceeded" - ); - std::thread::sleep(Duration::from_millis(100)); - } - let start_time = Instant::now(); loop { let blocks_proposed = signer_test @@ -2924,12 +2906,12 @@ fn min_gap_between_blocks() { .nakamoto_blocks_proposed .load(Ordering::SeqCst); if blocks_proposed > proposals_before { + assert!( + start_time.elapsed().as_millis() >= time_between_blocks_ms.into(), + "Block proposed before gap was exceeded" + ); break; } - assert!( - start_time.elapsed().as_secs() < 30, - "Block not proposed after gap was exceeded within timeout" - ); std::thread::sleep(Duration::from_millis(100)); } @@ -2937,6 +2919,10 @@ fn min_gap_between_blocks() { let start = Instant::now(); let duration = 30; + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); loop { let blocks_mined = signer_test .running_nodes @@ -2944,7 +2930,9 @@ fn min_gap_between_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height { + if blocks_mined > blocks_before + && info.stacks_tip_height == info_before.stacks_tip_height + 1 + { break; } From 1dc17102ea6f5877485c8382c08a341d2f392711 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 21 Aug 2024 13:38:31 -0400 Subject: [PATCH 0854/1400] chore: syntax improvements Co-authored-by: Jeff Bencin --- testnet/stacks-node/src/event_dispatcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 00920db88d..f7371248c6 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -318,7 +318,7 @@ fn send_request( url: &Url, timeout: Duration, ) -> Result { - let addr = format!("{}:{}", host, port) + let addr = format!("{host}:{port}") .to_socket_addrs()? .next() .ok_or_else(|| { From 383f15b1960713ccf066a553ed07d867f7161a4a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 21 Aug 2024 23:53:53 -0400 Subject: [PATCH 0855/1400] fix: fix failing unit test --- stackslib/src/net/download/nakamoto/tenure.rs | 5 ++++- stackslib/src/net/tests/download/nakamoto.rs | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 4fb050e591..80065dc0c6 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -331,7 +331,10 @@ impl TenureStartEnd { first_burn_height, wt_start.burn_height, ) - .expect("FATAL: tenure from before system start"), + .expect(&format!( + "FATAL: tenure from before system start ({} <= {})", + wt_start.burn_height, first_burn_height + )), wt.processed, ); tenure_start_end.fetch_end_block = true; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 6b10a95c92..afba1e90e7 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1086,12 +1086,12 @@ fn test_tenure_start_end_from_inventory() { wanted_tenures.push(WantedTenure::new( ConsensusHash([i as u8; 20]), StacksBlockId([i as u8; 32]), - u64::from(i) + first_burn_height, + u64::from(i) + first_burn_height + 1, )); next_wanted_tenures.push(WantedTenure::new( ConsensusHash([(i + 128) as u8; 20]), StacksBlockId([(i + 128) as u8; 32]), - u64::from(i) + first_burn_height, + u64::from(i) + first_burn_height + 1, )); } let mut all_tenures = wanted_tenures.clone(); From ee28db9f47db4ebc5beee2aa4d40e3ad3cda4010 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 22 Aug 2024 09:19:17 -0400 Subject: [PATCH 0856/1400] Reuse BlockResponse slot for MockSignature message type Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 3 +-- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7d411f89b5..b0f470ee2a 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -185,8 +185,7 @@ impl SignerMessage { pub fn msg_id(&self) -> Option { match self { Self::BlockProposal(_) | Self::BlockPushed(_) => None, - Self::BlockResponse(_) => Some(MessageSlotID::BlockResponse), - Self::MockSignature(_) => Some(MessageSlotID::MockSignature), + Self::BlockResponse(_) | Self::MockSignature(_) => Some(MessageSlotID::BlockResponse), // Mock signature reuses the same slot as block response since its exclusively used in Epoch 2.5 } } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ee2e417ee..210dcf22c9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2212,8 +2212,8 @@ fn mock_sign_epoch_25() { std::thread::sleep(Duration::from_millis(100)); let messages: Vec = StackerDB::get_messages( stackerdb - .get_session_mut(&MessageSlotID::MockSignature) - .expect("Failed to get BlockResponse stackerdb session"), + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), // Epoch 2.5 MockSignatures use the BlockResponse slot &signer_slot_ids, ) .expect("Failed to get message from stackerdb"); From 3303f479449f9f0310dc3b905b6931c363dc24eb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 22 Aug 2024 06:45:53 -0700 Subject: [PATCH 0857/1400] fix: use permanent backoff when stacker set not found --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cd65f7914b..0473b68ee1 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -542,7 +542,7 @@ impl StacksClient { backoff::Error::permanent(e.into()) })?; if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + return Err(backoff::Error::permanent(ClientError::NoSortitionOnChain)); } else { warn!("Got error response ({status}): {}", error_data.err_msg); return Err(backoff::Error::permanent(ClientError::RequestFailure( From 611eec22df944dcdd68619b8554f2dd49607b54b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 13:38:42 -0400 Subject: [PATCH 0858/1400] fix: address PR feedback --- .../download/nakamoto/download_state_machine.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 2e7be7f977..8cef43a9aa 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -252,7 +252,7 @@ impl NakamotoDownloadStateMachine { ) .expect("FATAL: tip.block_height before system start"); - // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at + // careful -- need .saturating_add(1) since this calculation puts the reward cycle start at // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len let first_block_height = if let Some(highest_wanted_tenure) = loaded_so_far.last() { highest_wanted_tenure.burn_height.saturating_add(1) @@ -1505,15 +1505,13 @@ impl NakamotoDownloadStateMachine { /// Run and process all unconfirmed tenure downloads, and highest complete tenure downloads. /// Do the needful bookkeeping to remove dead peers. - /// Returns map of tenure IDs to blocks we fetched, plus whether or not we returned because we - /// were throttled fn download_unconfirmed_tenures( &mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, chainstate: &StacksChainState, highest_processed_block_id: Option, - ) -> (HashMap>, bool) { + ) -> HashMap> { // queue up more downloaders self.update_unconfirmed_tenure_downloaders( network.get_connection_opts(), @@ -1589,7 +1587,7 @@ impl NakamotoDownloadStateMachine { }) .collect(); - (tenure_blocks, false) + tenure_blocks } /// Top-level download state machine execution. @@ -1669,7 +1667,7 @@ impl NakamotoDownloadStateMachine { &network.stacks_tip.block_hash, ); - let (new_blocks, throttled) = self.download_unconfirmed_tenures( + let new_blocks = self.download_unconfirmed_tenures( network, sortdb, chainstate, @@ -1680,11 +1678,6 @@ impl NakamotoDownloadStateMachine { }, ); - if throttled { - // stay in this state - return new_blocks; - } - if !self.tenure_downloads.is_empty() { // need to go get this scheduled tenure debug!( From 76c91359cfb7dca65e3b19539b2038440d7c7ebc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 22 Aug 2024 15:53:09 -0400 Subject: [PATCH 0859/1400] refactor: add release version of `fault_injection` --- stackslib/src/net/relay.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 412d5ed0a1..33e19025b9 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -104,6 +104,17 @@ pub mod fault_injection { } } +#[cfg(not(any(test, feature = "testing")))] +pub mod fault_injection { + pub fn ignore_block(_height: u64, _working_dir: &str) -> bool { + false + } + + pub fn set_ignore_block(_height: u64, _working_dir: &str) {} + + pub fn clear_ignore_block() {} +} + pub struct Relayer { /// Connection to the p2p thread p2p: NetworkHandle, @@ -879,7 +890,6 @@ impl Relayer { &obtained_method, ); - #[cfg(any(test, feature = "testing"))] if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { return Ok(false); } From e347fb8ad2190ab86bd04df66738d41bf69ebb7a Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 22 Aug 2024 23:42:55 +0300 Subject: [PATCH 0860/1400] mutants add manual workflow_dispatch no timeout --- .github/workflows/pr-differences-mutants.yml | 22 ++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index fc4a725687..d53e2ca661 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -9,6 +9,7 @@ on: - ready_for_review paths: - '**.rs' + workflow_dispatch: concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -16,9 +17,26 @@ concurrency: cancel-in-progress: true jobs: + check-access-permissions: + name: Check Access Permissions + runs-on: ubuntu-latest + + steps: + - name: Check Access Permissions To Trigger This + id: check_access_permissions + uses: stacks-network/actions/team-membership@main + with: + username: ${{ github.actor }} + team: 'blockchain-team' + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + + outputs: + ignore_timeout: ${{ steps.check_access_permissions.outputs.is_team_member == 'true' && github.event_name == 'workflow_dispatch' }} + # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: name: Check Packages and Shards + needs: check-access-permissions runs-on: ubuntu-latest @@ -30,10 +48,13 @@ jobs: run_small_packages: ${{ steps.check_packages_and_shards.outputs.run_small_packages }} small_packages_with_shards: ${{ steps.check_packages_and_shards.outputs.small_packages_with_shards }} run_stacks_signer: ${{ steps.check_packages_and_shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ steps.check_packages_and_shards.outputs.too_many_mutants }} steps: - id: check_packages_and_shards uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main + with: + ignore_timeout: ${{ needs.check-access-permissions.outputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: @@ -220,3 +241,4 @@ jobs: small_packages: ${{ needs.check-big-packages-and-shards.outputs.run_small_packages }} shards_for_small_packages: ${{ needs.check-big-packages-and-shards.outputs.small_packages_with_shards }} stacks_signer: ${{ needs.check-big-packages-and-shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ needs.check-big-packages-and-shards.outputs.too_many_mutants }} From afab2837d0ec6b9795db004654f8e09fa9163461 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:38:26 -0400 Subject: [PATCH 0861/1400] chore: use `url` (which we use in dependencies already) instead of `http-types --- Cargo.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.lock b/Cargo.lock index 1e7e6b6b42..10b65eb745 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3426,6 +3426,7 @@ dependencies = [ "toml 0.5.11", "tracing", "tracing-subscriber", + "url", "warp", "wsts", ] From 15d81622dbcb71947e465ac9530a255525e9d9ac Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:38:48 -0400 Subject: [PATCH 0862/1400] fix; expand receive buffer size to 64k --- stackslib/src/net/connection.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 36b1fc18ff..78c15e0833 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -971,7 +971,7 @@ impl ConnectionInbox

{ // NOTE: it's important that buf not be too big, since up to buf.len()-1 bytes may need // to be copied if a message boundary isn't aligned with buf (which is usually the // case). - let mut buf = [0u8; 4096]; + let mut buf = [0u8; 65536]; let num_read = match fd.read(&mut buf) { Ok(0) => { // remote fd is closed, but do try to consume all remaining bytes in the buffer From f45f1d312cf63feec7fc843b1110b81cb8ffeec1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:39:07 -0400 Subject: [PATCH 0863/1400] fix: handle `text/plain; ...` --- stackslib/src/net/http/mod.rs | 2 +- stackslib/src/net/http/response.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index cc6355ca31..33935fdb04 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -178,7 +178,7 @@ impl FromStr for HttpContentType { let s = header.to_string().to_lowercase(); if s == "application/octet-stream" { Ok(HttpContentType::Bytes) - } else if s == "text/plain" { + } else if s == "text/plain" || s.starts_with("text/plain;") { Ok(HttpContentType::Text) } else if s == "application/json" { Ok(HttpContentType::JSON) diff --git a/stackslib/src/net/http/response.rs b/stackslib/src/net/http/response.rs index f6f1776211..77bcaa730f 100644 --- a/stackslib/src/net/http/response.rs +++ b/stackslib/src/net/http/response.rs @@ -582,7 +582,7 @@ impl StacksMessageCodec for HttpResponsePreamble { )); } - if content_type.is_none() || (content_length.is_none() && !chunked_encoding) { + if content_length.is_none() && !chunked_encoding { return Err(CodecError::DeserializeError( "Invalid HTTP response: missing Content-Type, Content-Length".to_string(), )); @@ -593,7 +593,7 @@ impl StacksMessageCodec for HttpResponsePreamble { status_code: status_code, reason: reason, keep_alive: keep_alive, - content_type: content_type.unwrap(), + content_type: content_type.unwrap_or(HttpContentType::Bytes), // per the RFC content_length: content_length, headers: headers, }) From 238fbe6229a05ee87a5a22f4fecc2ffe071bf603 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:39:25 -0400 Subject: [PATCH 0864/1400] feat: implement `StacksHttp::new_client()` so we can use it to request and receive arbitrary well-formed HTTP messages without having to bind a dedicated request handler to them first. --- stackslib/src/net/httpcore.rs | 194 ++++++++++++++++++++++++++-------- 1 file changed, 152 insertions(+), 42 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 88ee0365b2..39ce1e64ac 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -44,11 +44,12 @@ use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::core::{MemPoolDB, StacksEpoch}; use crate::net::connection::ConnectionOptions; -use crate::net::http::common::HTTP_PREAMBLE_MAX_ENCODED_SIZE; +use crate::net::http::common::{parse_raw_bytes, HTTP_PREAMBLE_MAX_ENCODED_SIZE}; use crate::net::http::{ - http_reason, Error as HttpError, HttpBadRequest, HttpContentType, HttpErrorResponse, - HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, + http_reason, parse_bytes, parse_json, Error as HttpError, HttpBadRequest, HttpContentType, + HttpErrorResponse, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, + HttpVersion, }; use crate::net::p2p::PeerNetwork; use crate::net::server::HttpPeer; @@ -855,6 +856,44 @@ struct StacksHttpReplyData { stream: StacksHttpRecvStream, } +/// Default response handler, for when using StacksHttp to issue arbitrary requests +#[derive(Clone)] +struct RPCArbitraryResponseHandler {} +impl HttpResponse for RPCArbitraryResponseHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + match preamble.content_type { + HttpContentType::Bytes => { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } + HttpContentType::JSON => { + if body.len() > MAX_MESSAGE_LEN as usize { + return Err(HttpError::DecodeError( + "Message is too long to decode".into(), + )); + } + + let json = parse_json(preamble, body)?; + Ok(HttpResponsePayload::JSON(json)) + } + HttpContentType::Text => { + let text_bytes = parse_raw_bytes( + preamble, + body, + MAX_MESSAGE_LEN.into(), + HttpContentType::Text, + )?; + let text = String::from_utf8_lossy(&text_bytes).to_string(); + Ok(HttpResponsePayload::Text(text)) + } + } + } +} + /// Stacks HTTP state machine implementation, for bufferring up data. /// One of these exists per Connection. /// There can be at most one HTTP request in-flight (i.e. we don't do pipelining). @@ -890,9 +929,13 @@ pub struct StacksHttp { pub read_only_call_limit: ExecutionCost, /// The authorization token to enable access to privileged features, such as the block proposal RPC endpoint pub auth_token: Option, + /// Allow arbitrary responses to be handled in addition to request handlers + allow_arbitrary_response: bool, } impl StacksHttp { + /// Create an HTTP protocol state machine that handles the built-in RPC API. + /// Used for building the RPC server pub fn new(peer_addr: SocketAddr, conn_opts: &ConnectionOptions) -> StacksHttp { let mut http = StacksHttp { peer_addr, @@ -906,11 +949,31 @@ impl StacksHttp { maximum_call_argument_size: conn_opts.maximum_call_argument_size, read_only_call_limit: conn_opts.read_only_call_limit.clone(), auth_token: conn_opts.auth_token.clone(), + allow_arbitrary_response: false, }; http.register_rpc_methods(); http } + /// Create an HTTP protocol state machine that can handle arbitrary responses. + /// Used for building clients. + pub fn new_client(peer_addr: SocketAddr, conn_opts: &ConnectionOptions) -> StacksHttp { + StacksHttp { + peer_addr, + body_start: None, + num_preamble_bytes: 0, + last_four_preamble_bytes: [0u8; 4], + reply: None, + chunk_size: 8192, + request_handler_index: None, + request_handlers: vec![], + maximum_call_argument_size: conn_opts.maximum_call_argument_size, + read_only_call_limit: conn_opts.read_only_call_limit.clone(), + auth_token: conn_opts.auth_token.clone(), + allow_arbitrary_response: true, + } + } + /// Register an API RPC endpoint pub fn register_rpc_endpoint( &mut self, @@ -1164,7 +1227,7 @@ impl StacksHttp { match preamble { StacksHttpPreamble::Response(ref http_response_preamble) => { // we can only receive a response if we're expecting it - if self.request_handler_index.is_none() { + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { return Err(NetError::DeserializeError( "Unexpected HTTP response: no active request handler".to_string(), )); @@ -1293,13 +1356,15 @@ impl StacksHttp { &ConnectionOptions::default(), ); - let response_handler_index = - http.find_response_handler(verb, request_path) - .ok_or(NetError::SendError(format!( - "No such handler for '{} {}'", - verb, request_path - )))?; - http.request_handler_index = Some(response_handler_index); + if !self.allow_arbitrary_response { + let response_handler_index = + http.find_response_handler(verb, request_path) + .ok_or(NetError::SendError(format!( + "No such handler for '{} {}'", + verb, request_path + )))?; + http.request_handler_index = Some(response_handler_index); + } let (preamble, message_offset) = http.read_preamble(response_buf)?; let is_chunked = match preamble { @@ -1417,9 +1482,9 @@ impl ProtocolFamily for StacksHttp { } // sanity check -- if we're receiving a response, then we must have earlier issued - // a request. Thus, we must already know which response handler to use. - // Otherwise, someone sent us malforemd data. - if self.request_handler_index.is_none() { + // a request, or we must be in client mode. Thus, we must already know which + // response handler to use. Otherwise, someone sent us malforemd data. + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { self.reset(); return Err(NetError::DeserializeError( "Unsolicited HTTP response".to_string(), @@ -1442,18 +1507,28 @@ impl ProtocolFamily for StacksHttp { num_read, ); - // we now know the content-length, so pass it into the parser. - let handler_index = - self.request_handler_index - .ok_or(NetError::DeserializeError( - "Unknown HTTP response handler".to_string(), - ))?; - - let parse_res = self.try_parse_response( - handler_index, - http_response_preamble, - &message_bytes[..], - ); + let parse_res = if self.allow_arbitrary_response { + let arbitrary_parser = RPCArbitraryResponseHandler {}; + let response_payload = arbitrary_parser + .try_parse_response(http_response_preamble, &message_bytes[..])?; + Ok(StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + )) + } else { + // we now know the content-length, so pass it into the parser. + let handler_index = + self.request_handler_index + .ok_or(NetError::DeserializeError( + "Unknown HTTP response handler".to_string(), + ))?; + + self.try_parse_response( + handler_index, + http_response_preamble, + &message_bytes[..], + ) + }; // done parsing self.reset(); @@ -1538,6 +1613,32 @@ impl ProtocolFamily for StacksHttp { // message of known length test_debug!("read http response payload of {} bytes", buf.len(),); + if self.allow_arbitrary_response { + let arbitrary_parser = RPCArbitraryResponseHandler {}; + let response_payload = + arbitrary_parser.try_parse_response(http_response_preamble, buf)?; + if http_response_preamble.status_code >= 400 { + return Ok(( + StacksHttpMessage::Error( + "(client-given)".into(), + StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + ), + ), + buf.len(), + )); + } else { + return Ok(( + StacksHttpMessage::Response(StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + )), + buf.len(), + )); + } + } + // sanity check -- if we're receiving a response, then we must have earlier issued // a request. Thus, we must already know which response handler to use. // Otherwise, someone sent us malformed data. @@ -1576,27 +1677,36 @@ impl ProtocolFamily for StacksHttp { ) -> Result<(), NetError> { match *message { StacksHttpMessage::Request(ref req) => { - // client cannot send more than one request in parallel - if self.request_handler_index.is_some() { - test_debug!("Have pending request already"); - return Err(NetError::InProgress); - } + // the node cannot send more than one request in parallel, unless the client is + // directing it + let handler_index = if !self.allow_arbitrary_response { + if self.request_handler_index.is_some() { + test_debug!("Have pending request already"); + return Err(NetError::InProgress); + } - // find the response handler we'll use - let (decoded_path, _) = decode_request_path(&req.preamble().path_and_query_str)?; - let handler_index = self - .find_response_handler(&req.preamble().verb, &decoded_path) - .ok_or(NetError::SendError(format!( - "No response handler found for `{} {}`", - &req.preamble().verb, - &decoded_path - )))?; + // find the response handler we'll use + let (decoded_path, _) = + decode_request_path(&req.preamble().path_and_query_str)?; + let handler_index = self + .find_response_handler(&req.preamble().verb, &decoded_path) + .ok_or(NetError::SendError(format!( + "No response handler found for `{} {}`", + &req.preamble().verb, + &decoded_path + )))?; + handler_index + } else { + 0 + }; req.send(fd)?; // remember this so we'll know how to decode the response. // The next preamble and message we'll read _must be_ a response! - self.request_handler_index = Some(handler_index); + if !self.allow_arbitrary_response { + self.request_handler_index = Some(handler_index); + } Ok(()) } StacksHttpMessage::Response(ref resp) => resp.send(fd), From 626136e1de0b41f4496bd362a494caefb69ccb2d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:40:33 -0400 Subject: [PATCH 0865/1400] fix: async-h1, async-std, and http-types are only required for the prometheus feature --- testnet/stacks-node/Cargo.toml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 8708aea2ce..e11096fbf2 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -15,9 +15,6 @@ serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] stacks = { package = "stackslib", path = "../../stackslib" } stx-genesis = { path = "../../stx-genesis"} toml = "0.5.6" -async-h1 = "2.3.2" -async-std = { version = "1.6", features = ["attributes"] } -http-types = "2.12" base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2.151" @@ -28,10 +25,13 @@ chrono = "0.4.19" regex = "1" libsigner = { path = "../../libsigner" } wsts = { workspace = true } +url = "2.1.0" rand = { workspace = true } rand_core = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } +async-h1 = { version = "2.3.2", optional = true } +async-std = { version = "1.6", optional = true, features = ["attributes"] } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} @@ -40,7 +40,7 @@ tikv-jemallocator = {workspace = true} ring = "0.16.19" warp = "0.3.5" tokio = "1.15" -reqwest = { version = "0.11", default_features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } +reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } clarity = { path = "../../clarity", features = ["default", "testing"]} stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../../stackslib", features = ["default", "testing"] } @@ -50,6 +50,7 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" +http-types = "2.12" [[bin]] name = "stacks-node" @@ -60,7 +61,7 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom", "dep:async-h1", "dep:async-std"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] From 32cdefef032e5292f5a61f6d7624dcb07141ec56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:41:03 -0400 Subject: [PATCH 0866/1400] feat: `send_request()` function for issuing synchronous HTTP requests using StacksHttp --- testnet/stacks-node/src/event_dispatcher.rs | 376 ++++++++++++++------ 1 file changed, 261 insertions(+), 115 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index f7371248c6..56bba0eadf 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,6 +16,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::io; use std::io::{Read, Write}; use std::net::{TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -27,7 +28,6 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; -use http_types::Url; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; @@ -55,13 +55,20 @@ use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; use stacks::net::atlas::{Attachment, AttachmentInstance}; +use stacks::net::connection::{ConnectionOptions, NetworkConnection}; +use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; +use stacks::net::httpcore::{StacksHttp, StacksHttpMessage, StacksHttpRequest, StacksHttpResponse}; use stacks::net::stackerdb::StackerDBEventDispatcher; +use stacks::net::Error as NetError; use stacks::util::hash::to_hex; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; +use url::Url; use super::config::{EventKeyType, EventObserverConfig}; @@ -311,103 +318,212 @@ impl RewardSetEventPayload { } } -fn send_request( +/// Convert a NetError into an io::Error if appropriate. +fn handle_net_error(e: NetError, msg: &str) -> io::Error { + if let NetError::ReadError(ioe) = e { + ioe + } else if let NetError::WriteError(ioe) = e { + ioe + } else if let NetError::RecvTimeout = e { + io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") + } else { + io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) + } +} + +/// Send an HTTP request to the given host:port. Returns the decoded response. +/// Interanlly, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP +/// response. It is a blocking operation. +/// +/// If the request encounters a network error, then return an error. Don't retry. +/// If the request times out after `timeout`, then return an error. +pub fn send_request( host: &str, port: u16, - body: &[u8], - url: &Url, + request: StacksHttpRequest, timeout: Duration, -) -> Result { - let addr = format!("{host}:{port}") - .to_socket_addrs()? - .next() - .ok_or_else(|| { - std::io::Error::new(std::io::ErrorKind::NotFound, "No valid address found") - })?; - let mut stream = TcpStream::connect_timeout(&addr, timeout)?; +) -> Result { + // Find the host:port that works. + // This is sometimes necessary because `localhost` can resolve to both its ipv4 and ipv6 + // addresses, but usually, Stacks services like event observers are only bound to ipv4 + // addresses. So, be sure to use an address that will lead to a socket connection! + let mut stream_and_addr = None; + let mut last_err = None; + for addr in format!("{host}:{port}").to_socket_addrs()? { + debug!("send_request: connect to {}", &addr); + match TcpStream::connect_timeout(&addr, timeout) { + Ok(sock) => { + stream_and_addr = Some((sock, addr)); + break; + } + Err(e) => { + last_err = Some(e); + } + } + } + + let Some((mut stream, addr)) = stream_and_addr else { + return Err(last_err.unwrap_or(io::Error::new( + io::ErrorKind::Other, + "Unable to connect to {host}:{port}", + ))); + }; + stream.set_read_timeout(Some(timeout))?; stream.set_write_timeout(Some(timeout))?; + stream.set_nodelay(true)?; + + let start = Instant::now(); + + debug!("send_request: Sending request"; "request" => %request.request_path()); + + // Some explanation of what's going on here is in order. + // + // The networking stack in Stacks is designed to operate on non-blocking sockets, and + // furthermore, it operates in a way that the call site in which a network request is issued can + // be in a wholly separate stack (or thread) from the connection. While this is absolutely necessary + // within the Stacks node, using it to issue a single blocking request imposes a lot of + // overhead. + // + // First, we will create the network connection and give it a ProtocolFamily implementation + // (StacksHttp), which gets used by the connection to encode and deocde messages. + // + // Second, we'll create a _handle_ to the network connection into which we will write requests + // and read responses. The connection itself is an opaque black box that, internally, + // implements a state machine around the ProtocolFamily implementation to incrementally read + // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is + // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, + // as well as underfull socket buffers. + // + // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network + // handle (which contains the buffered bytes from the message to be fed into the socket), and + // (2) drive bytes from the handle into the socket iself via the network connection. This is a + // two-step process mainly because the handle is expected to live in a separate stack (or even + // a separate thread). + // + // Fourth, we need to _drive_ data from the socket. We have to repeatedly (1) pull data from + // the socket into the network connection, and (2) drive parsed messages from the connection to + // the handle. Then, the call site that owns the handle simply polls the handle for new + // messages. Once we have received a message, we can proceed to handle it. + // + // Finally, we deal with the kind of HTTP message we got. If it's an error response, we convert + // it into an error. If it's a request (i.e. not a response), we also return an error. We + // only return the message if it was a well-formed non-error HTTP response. + + // Step 1-2: set up the connection and request handle + // NOTE: we don't need anything special for connection options, so just use the default + let conn_opts = ConnectionOptions::default(); + let http = StacksHttp::new_client(addr, &conn_opts); + let mut connection = NetworkConnection::new(http, &conn_opts, None); + let mut request_handle = connection + .make_request_handle(0, get_epoch_time_secs() + timeout.as_secs(), 0) + .map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("Failed to create request handle: {:?}", &e).as_str(), + ) + })?; - let request = format!( - "POST {} HTTP/1.1\r\n\ - Host: {}\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - Connection: close\r\n\ - \r\n", - url.path(), - host, - body.len(), - ); - debug!("Event dispatcher: Sending request"; "request" => &request); - - stream.write_all(request.as_bytes())?; - stream.write_all(body)?; - stream.flush()?; - debug!("Event dispatcher: Request sent"); - - let mut response = Vec::new(); - let mut buffer = [0; 512]; - let mut headers_parsed = false; - let mut content_length = None; - let mut total_read = 0; - - let start_time = Instant::now(); - - while total_read < content_length.unwrap_or(usize::MAX) { - if start_time.elapsed() >= timeout { - return Err(std::io::Error::new( - std::io::ErrorKind::TimedOut, - "Response reading timed out", - )); - } + // Step 3: load up the request with the message we're gonna send, and iteratively dump its + // bytes from the handle into the socket (the connection does internall buffering and + // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send + // anymore because the socket buffer is currently full). + request + .send(&mut request_handle) + .map_err(|e| handle_net_error(e, "Failed to serialize request body"))?; + + debug!("send_request(sending data)"); + loop { + let flushed = request_handle + .try_flush() + .map_err(|e| handle_net_error(e, "Failed to flush request body"))?; + + // send it out + let num_sent = connection + .send_data(&mut stream) + .map_err(|e| handle_net_error(e, "Failed to send socket data"))?; - let bytes_read = stream.read(&mut buffer)?; - if bytes_read == 0 { - // Connection closed + debug!( + "send_request(sending data): flushed = {}, num_sent = {}", + flushed, num_sent + ); + if flushed && num_sent == 0 { break; } + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new(io::ErrorKind::WouldBlock, "Timed out while receiving request")); + } + } - response.extend_from_slice(&buffer[..bytes_read]); - - // Parse headers if not already done - if !headers_parsed { - if let Some(headers_end) = response.windows(4).position(|window| window == b"\r\n\r\n") - { - headers_parsed = true; - // Parse Content-Length header - let headers = &response[..headers_end]; - let headers_str = String::from_utf8_lossy(headers); - if let Some(content_length_line) = headers_str - .lines() - .find(|line| line.to_lowercase().starts_with("content-length:")) - { - let length_str = content_length_line - .split(":") - .nth(1) - // This is safe because we already know the line starts with "Content-Length:" - .expect("unreachable"); - match length_str.trim().parse::() { - Ok(len) => content_length = Some(len), - Err(_) => { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Invalid Content-Length header", - )) - } - } - } - total_read = response[headers_end + 4..].len(); + // Step 4: pull bytes from the socket back into the handle, and see if the connection decoded + // and dispatched any new messages to the request handle. If so, then extract the message and + // check that it's a well-formed HTTP response. + debug!("send_request(receiving data)"); + let response; + loop { + // get back the reply + debug!("send_request(receiving data): try to receive data"); + match connection.recv_data(&mut stream) { + Ok(nr) => { + debug!("send_request(receiving data): received {} bytes", nr); + } + Err(e) => { + return Err(handle_net_error(e, "Failed to receive socket data")); } - } else { - total_read += bytes_read; + } + + // fullfill the request -- send it to its corresponding handle + debug!("send_request(receiving data): drain inbox"); + connection.drain_inbox(); + + // see of we got a message that was fulfilled in our handle + debug!("send_request(receiving data): try receive response"); + let rh = match request_handle.try_recv() { + Ok(resp) => { + response = resp; + break; + } + Err(e) => match e { + Ok(handle) => handle, + Err(e) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); + } + }, + }; + request_handle = rh; + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new(io::ErrorKind::WouldBlock, "Timed out while receiving request")); } } - let response_str = String::from_utf8_lossy(&response).to_string(); - debug!("Event dispatcher: Response received"; "response" => &response_str); + // Step 5: decode the HTTP message and return it if it's not an error. + let response_data = match response { + StacksHttpMessage::Response(response_data) => response_data, + StacksHttpMessage::Error(path, response) => { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "Request did not succeed ({} != 200). Path: '{}'", + response.preamble().status_code, + &path + ) + .as_str(), + )); + } + _ => { + return Err(io::Error::new( + io::ErrorKind::Other, + "Did not receive an HTTP response", + )); + } + }; - Ok(response_str) + Ok(response_data) } impl EventObserver { @@ -416,14 +532,6 @@ impl EventObserver { "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload ); - let body = match serde_json::to_vec(payload) { - Ok(body) => body, - Err(err) => { - error!("Event dispatcher: serialization failed - {:?}", err); - return; - } - }; - let url = { let joined_components = if path.starts_with('/') { format!("{}{}", &self.endpoint, path) @@ -437,25 +545,40 @@ impl EventObserver { let host = url.host_str().expect("Invalid URL: missing host"); let port = url.port_or_known_default().unwrap_or(80); + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); let backoff = Duration::from_millis(1000); // 1 second loop { - match send_request(host, port, &body, &url, backoff) { + let mut request = StacksHttpRequest::new_for_peer( + peerhost.clone(), + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json(payload.clone()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + match send_request(host, port, request, backoff) { Ok(response) => { - if response.starts_with("HTTP/1.1 200") { + if response.preamble().status_code == 200 { debug!( "Event dispatcher: Successful POST"; "url" => %url ); break; } else { error!( - "Event dispatcher: Failed POST"; "url" => %url, "response" => ?response + "Event dispatcher: Failed POST"; "url" => %url, "response" => ?response.preamble() ); } } Err(err) => { - warn!("Event dispatcher: connection or request failed - {:?}", err); + warn!( + "Event dispatcher: connection or request failed to {}:{} - {:?}", + &host, &port, err + ); } } sleep(backoff); @@ -1571,6 +1694,7 @@ mod test { use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; use stacks::chainstate::stacks::events::StacksBlockEventData; use stacks::chainstate::stacks::StacksBlock; + use stacks::net::httpcore::StacksHttpResponse; use stacks::types::chainstate::BlockHeaderHash; use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; @@ -1579,6 +1703,22 @@ mod test { use super::*; + fn json_body(host: &str, port: u16, path: &str, json_bytes: &[u8]) -> StacksHttpRequest { + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + path.into(), + HttpRequestContents::new().payload_json(serde_json::from_slice(json_bytes).unwrap()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + request + } + #[test] fn build_block_processed_event() { let observer = EventObserver { @@ -1710,8 +1850,7 @@ mod test { let result = send_request( "10.255.255.1", // Non-routable IP for timeout 80, // HTTP port - b"{}", // Example empty JSON body - &Url::parse("http://10.255.255.1/").expect("Failed to parse URL"), + json_body("10.255.255.1", 80, "/", b"{}"), timeout_duration, ); @@ -1762,8 +1901,7 @@ mod test { let result = send_request( "127.0.0.1", addr.port(), - b"{}", - &Url::parse("http://127.0.0.1/").unwrap(), + json_body("127.0.0.1", 80, "/", b"{}"), connection_timeout, ); @@ -1814,11 +1952,14 @@ mod test { client_done_signal .recv() .expect("Failed to receive client done signal"); + + // Explicitly drop the stream after signaling to ensure the client finishes + // NOTE: this will cause the test to slow down, since `send_request` expects + // `Connection: close` + drop(stream); debug!("Mock server closing connection"); - // Explicitly drop the stream after signaling to ensure the client finishes - drop(stream); break; // Close after the first request } }); @@ -1827,13 +1968,16 @@ mod test { format!("{}:{}", addr.ip(), addr.port()) } - fn parse_http_response(response: &str) -> &str { - let parts: Vec<&str> = response.split("\r\n\r\n").collect(); - if parts.len() == 2 { - parts[1] // The body is after the second \r\n\r\n - } else { - "" - } + fn parse_http_response(response: StacksHttpResponse) -> String { + let response_txt = match response.destruct().1 { + HttpResponsePayload::Text(s) => s, + HttpResponsePayload::Empty => "".to_string(), + HttpResponsePayload::JSON(js) => serde_json::to_string(&js).unwrap(), + HttpResponsePayload::Bytes(bytes) => { + String::from_utf8_lossy(bytes.as_slice()).to_string() + } + }; + response_txt } #[test] @@ -1846,21 +1990,23 @@ mod test { let server_addr = start_mock_server(mock_response, rx_client_done); let timeout_duration = Duration::from_secs(5); + let host = server_addr.split(':').collect::>()[0]; // Host part + let port = server_addr.split(':').collect::>()[1] + .parse() + .unwrap(); // Port part + // Attempt to send a request to the mock server let result = send_request( - &server_addr.split(':').collect::>()[0], // Host part - server_addr.split(':').collect::>()[1] - .parse() - .unwrap(), // Port part - b"{}", // Example JSON body - &Url::parse(&format!("http://{}/", server_addr)).expect("Failed to parse URL"), + host, + port, + json_body(host, port, "/", b"{}"), timeout_duration, ); debug!("Got result: {:?}", result); // Ensure the server only closes after the client has finished processing if let Ok(response) = &result { - let body = parse_http_response(response); + let body = parse_http_response(response.clone()); assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); } From 184403396ba104d1589d2f4bee1f1224409dfe57 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:41:46 -0400 Subject: [PATCH 0867/1400] feat: drop async_h1 in favor of send_request() --- .../burnchains/bitcoin_regtest_controller.rs | 129 ++++++++---------- 1 file changed, 55 insertions(+), 74 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 39ef40490b..19c948bde9 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1,14 +1,11 @@ -use std::cmp; +use std::convert::From; use std::io::Cursor; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; -use std::time::Instant; +use std::time::{Duration, Instant}; +use std::{cmp, io}; -use async_h1::client; -use async_std::io::ReadExt; -use async_std::net::TcpStream; use base64::encode; -use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; use serde_json::value::RawValue; @@ -38,6 +35,9 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::address::PoxAddress; use stacks::core::{StacksEpoch, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; +use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; +use stacks::net::httpcore::StacksHttpRequest; +use stacks::net::Error as NetError; use stacks_common::codec::StacksMessageCodec; use stacks_common::deps_common::bitcoin::blockdata::opcodes; use stacks_common::deps_common::bitcoin::blockdata::script::{Builder, Script}; @@ -50,9 +50,11 @@ use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_ use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::net::PeerHost; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; +use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; @@ -62,6 +64,7 @@ use crate::config::{ OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, }; +use crate::event_dispatcher::send_request; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -2396,8 +2399,20 @@ pub enum RPCError { type RPCResult = Result; +impl From for RPCError { + fn from(ioe: io::Error) -> Self { + Self::Network(format!("IO Error: {:?}", &ioe)) + } +} + +impl From for RPCError { + fn from(ne: NetError) -> Self { + Self::Network(format!("Net Error: {:?}", &ne)) + } +} + impl BitcoinRPCRequest { - fn build_rpc_request(config: &Config, payload: &BitcoinRPCRequest) -> Request { + fn build_rpc_request(config: &Config, payload: &BitcoinRPCRequest) -> StacksHttpRequest { let url = { // some methods require a wallet ID let wallet_id = match payload.method.as_str() { @@ -2412,16 +2427,35 @@ impl BitcoinRPCRequest { &payload.method, &config.burnchain.username, &config.burnchain.password, &url ); - let mut req = Request::new(Method::Post, url); + let host = url + .host_str() + .expect("Invalid bitcoin RPC URL: missing host"); + let port = url.port_or_known_default().unwrap_or(8333); + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or_else(|_| panic!("FATAL: could not parse URL into PeerHost")); + + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json( + serde_json::to_value(payload).unwrap_or_else(|_| { + panic!("FATAL: failed to encode Bitcoin RPC request as JSON") + }), + ), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); match (&config.burnchain.username, &config.burnchain.password) { (Some(username), Some(password)) => { let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); - req.append_header("Authorization", auth_token); + request.add_header("Authorization".into(), auth_token); } (_, _) => {} }; - req + request } #[cfg(test)] @@ -2506,10 +2540,10 @@ impl BitcoinRPCRequest { .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; let bhh = BurnchainHeaderHash::from_hex(&bhh) .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - Ok(bhh) + bhh } _ => return Err(RPCError::Parsing("Failed to get UTXOs".to_string())), - }?; + }; let min_conf = 0i64; let max_conf = 9999999i64; @@ -2731,71 +2765,18 @@ impl BitcoinRPCRequest { } fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { - let mut request = BitcoinRPCRequest::build_rpc_request(&config, &payload); - - let body = match serde_json::to_vec(&json!(payload)) { - Ok(body) => body, - Err(err) => { - return Err(RPCError::Network(format!("RPC Error: {}", err))); - } - }; + let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); + let timeout = Duration::from_secs(60); - request.append_header("Content-Type", "application/json"); - request.set_body(body); - - let mut response = async_std::task::block_on(async move { - let stream = match TcpStream::connect(config.burnchain.get_rpc_socket_addr()).await { - Ok(stream) => stream, - Err(err) => { - return Err(RPCError::Network(format!( - "Bitcoin RPC: connection failed - {:?}", - err - ))) - } - }; + let host = request.preamble().host.hostname(); + let port = request.preamble().host.port(); - match client::connect(stream, request).await { - Ok(response) => Ok(response), - Err(err) => { - return Err(RPCError::Network(format!( - "Bitcoin RPC: invoking procedure failed - {:?}", - err - ))) - } - } - })?; - - let status = response.status(); - - let (res, buffer) = async_std::task::block_on(async move { - let mut buffer = Vec::new(); - let mut body = response.take_body(); - let res = body.read_to_end(&mut buffer).await; - (res, buffer) - }); - - if !status.is_success() { - return Err(RPCError::Network(format!( - "Bitcoin RPC: status({}) != success, body is '{:?}'", - status, - match serde_json::from_slice::(&buffer[..]) { - Ok(v) => v, - Err(_e) => serde_json::from_str("\"(unparseable)\"") - .expect("Failed to parse JSON literal"), - } - ))); - } - - if res.is_err() { - return Err(RPCError::Network(format!( - "Bitcoin RPC: unable to read body - {:?}", - res - ))); + let response = send_request(&host, port, request, timeout)?; + if let HttpResponsePayload::JSON(js) = response.destruct().1 { + return Ok(js); + } else { + return Err(RPCError::Parsing("Did not get a JSON response".into())); } - - let payload = serde_json::from_slice::(&buffer[..]) - .map_err(|e| RPCError::Parsing(format!("Bitcoin RPC: {}", e)))?; - Ok(payload) } } From adb750334996946af5c6ffe13d0a128d98ac6cf5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 22 Aug 2024 22:43:27 -0400 Subject: [PATCH 0868/1400] chore: cargo fmt --- testnet/stacks-node/src/event_dispatcher.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 56bba0eadf..d3243e8cd4 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -394,7 +394,7 @@ pub fn send_request( // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, // as well as underfull socket buffers. - // + // // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network // handle (which contains the buffered bytes from the message to be fed into the socket), and // (2) drive bytes from the handle into the socket iself via the network connection. This is a @@ -450,9 +450,12 @@ pub fn send_request( if flushed && num_sent == 0 { break; } - + if Instant::now().saturating_duration_since(start) > timeout { - return Err(io::Error::new(io::ErrorKind::WouldBlock, "Timed out while receiving request")); + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); } } @@ -497,7 +500,10 @@ pub fn send_request( request_handle = rh; if Instant::now().saturating_duration_since(start) > timeout { - return Err(io::Error::new(io::ErrorKind::WouldBlock, "Timed out while receiving request")); + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); } } @@ -1952,7 +1958,7 @@ mod test { client_done_signal .recv() .expect("Failed to receive client done signal"); - + // Explicitly drop the stream after signaling to ensure the client finishes // NOTE: this will cause the test to slow down, since `send_request` expects // `Connection: close` From a29ff4266c573484b906c48c8a02097fddb0b0b5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 22 Aug 2024 23:57:27 -0400 Subject: [PATCH 0869/1400] chore: add info to error for debugging --- stackslib/src/net/http/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index 33935fdb04..17bf1d4978 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -183,9 +183,9 @@ impl FromStr for HttpContentType { } else if s == "application/json" { Ok(HttpContentType::JSON) } else { - Err(CodecError::DeserializeError( - "Unsupported HTTP content type".to_string(), - )) + Err(CodecError::DeserializeError(format!( + "Unsupported HTTP content type: {header}" + ))) } } } From 98b1b3f9e9bef039b759ca866a90d75e614d2d9c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 23 Aug 2024 00:07:51 -0400 Subject: [PATCH 0870/1400] chore: minor changes from code review --- testnet/stacks-node/src/event_dispatcher.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index d3243e8cd4..c2d8d3c2c3 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -332,7 +332,7 @@ fn handle_net_error(e: NetError, msg: &str) -> io::Error { } /// Send an HTTP request to the given host:port. Returns the decoded response. -/// Interanlly, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP +/// Internally, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP /// response. It is a blocking operation. /// /// If the request encounters a network error, then return an error. Don't retry. @@ -425,7 +425,7 @@ pub fn send_request( })?; // Step 3: load up the request with the message we're gonna send, and iteratively dump its - // bytes from the handle into the socket (the connection does internall buffering and + // bytes from the handle into the socket (the connection does internal buffering and // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send // anymore because the socket buffer is currently full). request @@ -480,7 +480,7 @@ pub fn send_request( debug!("send_request(receiving data): drain inbox"); connection.drain_inbox(); - // see of we got a message that was fulfilled in our handle + // see if we got a message that was fulfilled in our handle debug!("send_request(receiving data): try receive response"); let rh = match request_handle.try_recv() { Ok(resp) => { @@ -1928,7 +1928,7 @@ mod test { } } - fn start_mock_server(response: &str, client_done_signal: Receiver<()>) -> String { + fn start_mock_server(response: String, client_done_signal: Receiver<()>) -> String { // Bind to an available port on localhost let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); let addr = listener.local_addr().unwrap(); @@ -1936,7 +1936,6 @@ mod test { debug!("Mock server listening on {}", addr); // Start the server in a new thread - let response = response.to_string(); thread::spawn(move || { for stream in listener.incoming() { debug!("Mock server accepted connection"); @@ -1993,13 +1992,12 @@ mod test { // Create a channel to signal when the client is done reading let (tx_client_done, rx_client_done) = channel(); - let server_addr = start_mock_server(mock_response, rx_client_done); + let server_addr = start_mock_server(mock_response.to_string(), rx_client_done); let timeout_duration = Duration::from_secs(5); - let host = server_addr.split(':').collect::>()[0]; // Host part - let port = server_addr.split(':').collect::>()[1] - .parse() - .unwrap(); // Port part + let parts = server_addr.split(':').collect::>(); + let host = parts[0]; + let port = parts[1].parse().unwrap(); // Attempt to send a request to the mock server let result = send_request( From 67a9d791223bf1586ba6637ee297847130145a17 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 23 Aug 2024 00:12:41 -0400 Subject: [PATCH 0871/1400] fix: add support for extended content types For example, `application/json; charset=utf-8` was not handled. --- stackslib/src/net/http/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index 17bf1d4978..ca7a97c5be 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -180,7 +180,7 @@ impl FromStr for HttpContentType { Ok(HttpContentType::Bytes) } else if s == "text/plain" || s.starts_with("text/plain;") { Ok(HttpContentType::Text) - } else if s == "application/json" { + } else if s == "application/json" || s.starts_with("application/json;") { Ok(HttpContentType::JSON) } else { Err(CodecError::DeserializeError(format!( From 0d1ed2873a6dd9818cf3250c6bbbb54658b53e7c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 23 Aug 2024 00:34:11 -0400 Subject: [PATCH 0872/1400] fix: prometheus build --- testnet/stacks-node/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index e11096fbf2..5128f17f03 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -32,6 +32,7 @@ hashbrown = { workspace = true } rusqlite = { workspace = true } async-h1 = { version = "2.3.2", optional = true } async-std = { version = "1.6", optional = true, features = ["attributes"] } +http-types = { version = "2.12", optional = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} @@ -61,7 +62,7 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom", "dep:async-h1", "dep:async-std"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom", "async-h1", "async-std", "http-types"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] From 31ee50db96628d466826c91a5fd33e315f490f56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 10:33:14 -0400 Subject: [PATCH 0873/1400] fix: remove client-only code that didn't compile and wasn't necessary --- stackslib/src/net/httpcore.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 39ce1e64ac..f916a19cc1 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1356,16 +1356,6 @@ impl StacksHttp { &ConnectionOptions::default(), ); - if !self.allow_arbitrary_response { - let response_handler_index = - http.find_response_handler(verb, request_path) - .ok_or(NetError::SendError(format!( - "No such handler for '{} {}'", - verb, request_path - )))?; - http.request_handler_index = Some(response_handler_index); - } - let (preamble, message_offset) = http.read_preamble(response_buf)?; let is_chunked = match preamble { StacksHttpPreamble::Response(ref resp) => resp.is_chunked(), From 68f97c5df1aa0cb94244b21580df20c384d1e8bd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 11:23:33 -0400 Subject: [PATCH 0874/1400] refactor: put send_request into httpcore --- stackslib/src/net/httpcore.rs | 221 +++++++++- stackslib/src/net/tests/httpcore.rs | 170 +++++++- .../burnchains/bitcoin_regtest_controller.rs | 5 +- testnet/stacks-node/src/event_dispatcher.rs | 407 +----------------- 4 files changed, 403 insertions(+), 400 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index f916a19cc1..b173925dc8 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -17,7 +17,8 @@ /// This module binds the http library to Stacks as a `ProtocolFamily` implementation use std::collections::{BTreeMap, HashMap}; use std::io::{Read, Write}; -use std::net::SocketAddr; +use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; +use std::time::{Duration, Instant}; use std::{fmt, io, mem}; use clarity::vm::costs::ExecutionCost; @@ -32,8 +33,8 @@ use stacks_common::types::chainstate::{ use stacks_common::types::net::PeerHost; use stacks_common::types::Address; use stacks_common::util::chunked_encoding::*; -use stacks_common::util::get_epoch_time_ms; use stacks_common::util::retry::{BoundReader, RetryReader}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use url::Url; use super::rpc::ConversationHttp; @@ -43,7 +44,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::core::{MemPoolDB, StacksEpoch}; -use crate::net::connection::ConnectionOptions; +use crate::net::connection::{ConnectionOptions, NetworkConnection}; use crate::net::http::common::{parse_raw_bytes, HTTP_PREAMBLE_MAX_ENCODED_SIZE}; use crate::net::http::{ http_reason, parse_bytes, parse_json, Error as HttpError, HttpBadRequest, HttpContentType, @@ -1764,3 +1765,217 @@ pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { query_str.unwrap_or("").to_string(), )) } + +/// Convert a NetError into an io::Error if appropriate. +fn handle_net_error(e: NetError, msg: &str) -> io::Error { + if let NetError::ReadError(ioe) = e { + ioe + } else if let NetError::WriteError(ioe) = e { + ioe + } else if let NetError::RecvTimeout = e { + io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") + } else { + io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) + } +} + +/// Send an HTTP request to the given host:port. Returns the decoded response. +/// Internally, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP +/// response. It is a blocking operation. +/// +/// If the request encounters a network error, then return an error. Don't retry. +/// If the request times out after `timeout`, then return an error. +pub fn send_http_request( + host: &str, + port: u16, + request: StacksHttpRequest, + timeout: Duration, +) -> Result { + // Find the host:port that works. + // This is sometimes necessary because `localhost` can resolve to both its ipv4 and ipv6 + // addresses, but usually, Stacks services like event observers are only bound to ipv4 + // addresses. So, be sure to use an address that will lead to a socket connection! + let mut stream_and_addr = None; + let mut last_err = None; + for addr in format!("{host}:{port}").to_socket_addrs()? { + debug!("send_request: connect to {}", &addr); + match TcpStream::connect_timeout(&addr, timeout) { + Ok(sock) => { + stream_and_addr = Some((sock, addr)); + break; + } + Err(e) => { + last_err = Some(e); + } + } + } + + let Some((mut stream, addr)) = stream_and_addr else { + return Err(last_err.unwrap_or(io::Error::new( + io::ErrorKind::Other, + "Unable to connect to {host}:{port}", + ))); + }; + + stream.set_read_timeout(Some(timeout))?; + stream.set_write_timeout(Some(timeout))?; + stream.set_nodelay(true)?; + + let start = Instant::now(); + + debug!("send_request: Sending request"; "request" => %request.request_path()); + + // Some explanation of what's going on here is in order. + // + // The networking stack in Stacks is designed to operate on non-blocking sockets, and + // furthermore, it operates in a way that the call site in which a network request is issued can + // be in a wholly separate stack (or thread) from the connection. While this is absolutely necessary + // within the Stacks node, using it to issue a single blocking request imposes a lot of + // overhead. + // + // First, we will create the network connection and give it a ProtocolFamily implementation + // (StacksHttp), which gets used by the connection to encode and deocde messages. + // + // Second, we'll create a _handle_ to the network connection into which we will write requests + // and read responses. The connection itself is an opaque black box that, internally, + // implements a state machine around the ProtocolFamily implementation to incrementally read + // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is + // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, + // as well as underfull socket buffers. + // + // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network + // handle (which contains the buffered bytes from the message to be fed into the socket), and + // (2) drive bytes from the handle into the socket iself via the network connection. This is a + // two-step process mainly because the handle is expected to live in a separate stack (or even + // a separate thread). + // + // Fourth, we need to _drive_ data from the socket. We have to repeatedly (1) pull data from + // the socket into the network connection, and (2) drive parsed messages from the connection to + // the handle. Then, the call site that owns the handle simply polls the handle for new + // messages. Once we have received a message, we can proceed to handle it. + // + // Finally, we deal with the kind of HTTP message we got. If it's an error response, we convert + // it into an error. If it's a request (i.e. not a response), we also return an error. We + // only return the message if it was a well-formed non-error HTTP response. + + // Step 1-2: set up the connection and request handle + // NOTE: we don't need anything special for connection options, so just use the default + let conn_opts = ConnectionOptions::default(); + let http = StacksHttp::new_client(addr, &conn_opts); + let mut connection = NetworkConnection::new(http, &conn_opts, None); + let mut request_handle = connection + .make_request_handle(0, get_epoch_time_secs() + timeout.as_secs(), 0) + .map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("Failed to create request handle: {:?}", &e).as_str(), + ) + })?; + + // Step 3: load up the request with the message we're gonna send, and iteratively dump its + // bytes from the handle into the socket (the connection does internal buffering and + // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send + // anymore because the socket buffer is currently full). + request + .send(&mut request_handle) + .map_err(|e| handle_net_error(e, "Failed to serialize request body"))?; + + debug!("send_request(sending data)"); + loop { + let flushed = request_handle + .try_flush() + .map_err(|e| handle_net_error(e, "Failed to flush request body"))?; + + // send it out + let num_sent = connection + .send_data(&mut stream) + .map_err(|e| handle_net_error(e, "Failed to send socket data"))?; + + debug!( + "send_request(sending data): flushed = {}, num_sent = {}", + flushed, num_sent + ); + if flushed && num_sent == 0 { + break; + } + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); + } + } + + // Step 4: pull bytes from the socket back into the handle, and see if the connection decoded + // and dispatched any new messages to the request handle. If so, then extract the message and + // check that it's a well-formed HTTP response. + debug!("send_request(receiving data)"); + let response; + loop { + // get back the reply + debug!("send_request(receiving data): try to receive data"); + match connection.recv_data(&mut stream) { + Ok(nr) => { + debug!("send_request(receiving data): received {} bytes", nr); + } + Err(e) => { + return Err(handle_net_error(e, "Failed to receive socket data")); + } + } + + // fullfill the request -- send it to its corresponding handle + debug!("send_request(receiving data): drain inbox"); + connection.drain_inbox(); + + // see if we got a message that was fulfilled in our handle + debug!("send_request(receiving data): try receive response"); + let rh = match request_handle.try_recv() { + Ok(resp) => { + response = resp; + break; + } + Err(e) => match e { + Ok(handle) => handle, + Err(e) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); + } + }, + }; + request_handle = rh; + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); + } + } + + // Step 5: decode the HTTP message and return it if it's not an error. + let response_data = match response { + StacksHttpMessage::Response(response_data) => response_data, + StacksHttpMessage::Error(path, response) => { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "Request did not succeed ({} != 200). Path: '{}'", + response.preamble().status_code, + &path + ) + .as_str(), + )); + } + _ => { + return Err(io::Error::new( + io::ErrorKind::Other, + "Did not receive an HTTP response", + )); + } + }; + + Ok(response_data) +} diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index 1837d8e1c4..d9e13883a1 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::io::Write; -use std::net::{SocketAddr, ToSocketAddrs}; -use std::str; +use std::io::{Read, Write}; +use std::net::{SocketAddr, TcpListener, TcpStream, ToSocketAddrs}; +use std::sync::mpsc::{channel, Receiver}; +use std::time::{Duration, Instant}; +use std::{str, thread}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; @@ -38,12 +40,12 @@ use crate::net::api::getneighbors::{RPCNeighbor, RPCNeighborsInfo}; use crate::net::connection::ConnectionOptions; use crate::net::http::{ http_error_from_code_and_text, http_reason, HttpContentType, HttpErrorResponse, - HttpRequestContents, HttpRequestPreamble, HttpReservedHeader, HttpResponsePreamble, - HttpVersion, HTTP_PREAMBLE_MAX_NUM_HEADERS, + HttpRequestContents, HttpRequestPreamble, HttpReservedHeader, HttpResponsePayload, + HttpResponsePreamble, HttpVersion, HTTP_PREAMBLE_MAX_NUM_HEADERS, }; use crate::net::httpcore::{ - HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpMessage, - StacksHttpPreamble, StacksHttpRequest, StacksHttpResponse, + send_http_request, HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, + StacksHttpMessage, StacksHttpPreamble, StacksHttpRequest, StacksHttpResponse, }; use crate::net::rpc::ConversationHttp; use crate::net::{ProtocolFamily, TipRequest}; @@ -1118,3 +1120,157 @@ fn test_metrics_identifiers() { assert_eq!(response_handler_index.is_some(), should_have_handler); } } + +fn json_body(host: &str, port: u16, path: &str, json_bytes: &[u8]) -> StacksHttpRequest { + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + path.into(), + HttpRequestContents::new().payload_json(serde_json::from_slice(json_bytes).unwrap()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + request +} + +#[test] +fn test_send_request_timeout() { + // Set up a TcpListener that accepts a connection but delays response + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind test listener"); + let addr = listener.local_addr().unwrap(); + + // Spawn a thread that will accept the connection and do nothing, simulating a long delay + thread::spawn(move || { + let (stream, _addr) = listener.accept().unwrap(); + // Hold the connection open to simulate a delay + thread::sleep(Duration::from_secs(10)); + drop(stream); // Close the stream + }); + + // Set a timeout shorter than the sleep duration to force a timeout + let connection_timeout = Duration::from_secs(2); + + // Attempt to connect, expecting a timeout error + let result = send_http_request( + "127.0.0.1", + addr.port(), + json_body("127.0.0.1", 80, "/", b"{}"), + connection_timeout, + ); + + // Assert that the result is an error, specifically a timeout + assert!( + result.is_err(), + "Expected a timeout error, got: {:?}", + result + ); + + if let Err(err) = result { + assert_eq!( + err.kind(), + std::io::ErrorKind::WouldBlock, + "Expected TimedOut error, got: {:?}", + err + ); + } +} + +fn start_mock_server(response: String, client_done_signal: Receiver<()>) -> String { + // Bind to an available port on localhost + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); + let addr = listener.local_addr().unwrap(); + + debug!("Mock server listening on {}", addr); + + // Start the server in a new thread + thread::spawn(move || { + for stream in listener.incoming() { + debug!("Mock server accepted connection"); + let mut stream = stream.expect("Failed to accept connection"); + + // Read the client's request (even if we don't do anything with it) + let mut buffer = [0; 512]; + let _ = stream.read(&mut buffer); + debug!("Mock server received request"); + + // Simulate a basic HTTP response + stream + .write_all(response.as_bytes()) + .expect("Failed to write response"); + stream.flush().expect("Failed to flush stream"); + debug!("Mock server sent response"); + + // Wait for the client to signal that it's done reading + client_done_signal + .recv() + .expect("Failed to receive client done signal"); + + // Explicitly drop the stream after signaling to ensure the client finishes + // NOTE: this will cause the test to slow down, since `send_http_request` expects + // `Connection: close` + drop(stream); + + debug!("Mock server closing connection"); + + break; // Close after the first request + } + }); + + // Return the address of the mock server + format!("{}:{}", addr.ip(), addr.port()) +} + +fn parse_http_response(response: StacksHttpResponse) -> String { + let response_txt = match response.destruct().1 { + HttpResponsePayload::Text(s) => s, + HttpResponsePayload::Empty => "".to_string(), + HttpResponsePayload::JSON(js) => serde_json::to_string(&js).unwrap(), + HttpResponsePayload::Bytes(bytes) => String::from_utf8_lossy(bytes.as_slice()).to_string(), + }; + response_txt +} + +#[test] +fn test_send_request_success() { + // Prepare the mock server to return a successful HTTP response + let mock_response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\n\r\nHello, world!"; + + // Create a channel to signal when the client is done reading + let (tx_client_done, rx_client_done) = channel(); + let server_addr = start_mock_server(mock_response.to_string(), rx_client_done); + let timeout_duration = Duration::from_secs(5); + + let parts = server_addr.split(':').collect::>(); + let host = parts[0]; + let port = parts[1].parse().unwrap(); + + // Attempt to send a request to the mock server + let result = send_http_request( + host, + port, + json_body(host, port, "/", b"{}"), + timeout_duration, + ); + debug!("Got result: {:?}", result); + + // Ensure the server only closes after the client has finished processing + if let Ok(response) = &result { + let body = parse_http_response(response.clone()); + assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); + } + + tx_client_done + .send(()) + .expect("Failed to send close signal"); + + // Assert that the connection was successful + assert!( + result.is_ok(), + "Expected a successful request, but got {:?}", + result + ); +} diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 36336bdbf3..0a4b2556bb 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -36,7 +36,7 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::core::{StacksEpoch, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; -use stacks::net::httpcore::StacksHttpRequest; +use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::Error as NetError; use stacks_common::codec::StacksMessageCodec; use stacks_common::deps_common::bitcoin::blockdata::opcodes; @@ -64,7 +64,6 @@ use crate::config::{ OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, }; -use crate::event_dispatcher::send_request; /// The number of bitcoin blocks that can have /// passed since the UTXO cache was last refreshed before @@ -2794,7 +2793,7 @@ impl BitcoinRPCRequest { let host = request.preamble().host.hostname(); let port = request.preamble().host.port(); - let response = send_request(&host, port, request, timeout)?; + let response = send_http_request(&host, port, request, timeout)?; if let HttpResponsePayload::JSON(js) = response.destruct().1 { return Ok(js); } else { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index c2d8d3c2c3..34e42501ac 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,13 +16,10 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; -use std::io; -use std::io::{Read, Write}; -use std::net::{TcpStream, ToSocketAddrs}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Mutex; use std::thread::sleep; -use std::time::{Duration, Instant}; +use std::time::Duration; use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; @@ -55,17 +52,14 @@ use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; use stacks::net::atlas::{Attachment, AttachmentInstance}; -use stacks::net::connection::{ConnectionOptions, NetworkConnection}; -use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; -use stacks::net::httpcore::{StacksHttp, StacksHttpMessage, StacksHttpRequest, StacksHttpResponse}; +use stacks::net::http::HttpRequestContents; +use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; -use stacks::net::Error as NetError; use stacks::util::hash::to_hex; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; use stacks_common::types::net::PeerHost; -use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use url::Url; @@ -318,220 +312,6 @@ impl RewardSetEventPayload { } } -/// Convert a NetError into an io::Error if appropriate. -fn handle_net_error(e: NetError, msg: &str) -> io::Error { - if let NetError::ReadError(ioe) = e { - ioe - } else if let NetError::WriteError(ioe) = e { - ioe - } else if let NetError::RecvTimeout = e { - io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") - } else { - io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) - } -} - -/// Send an HTTP request to the given host:port. Returns the decoded response. -/// Internally, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP -/// response. It is a blocking operation. -/// -/// If the request encounters a network error, then return an error. Don't retry. -/// If the request times out after `timeout`, then return an error. -pub fn send_request( - host: &str, - port: u16, - request: StacksHttpRequest, - timeout: Duration, -) -> Result { - // Find the host:port that works. - // This is sometimes necessary because `localhost` can resolve to both its ipv4 and ipv6 - // addresses, but usually, Stacks services like event observers are only bound to ipv4 - // addresses. So, be sure to use an address that will lead to a socket connection! - let mut stream_and_addr = None; - let mut last_err = None; - for addr in format!("{host}:{port}").to_socket_addrs()? { - debug!("send_request: connect to {}", &addr); - match TcpStream::connect_timeout(&addr, timeout) { - Ok(sock) => { - stream_and_addr = Some((sock, addr)); - break; - } - Err(e) => { - last_err = Some(e); - } - } - } - - let Some((mut stream, addr)) = stream_and_addr else { - return Err(last_err.unwrap_or(io::Error::new( - io::ErrorKind::Other, - "Unable to connect to {host}:{port}", - ))); - }; - - stream.set_read_timeout(Some(timeout))?; - stream.set_write_timeout(Some(timeout))?; - stream.set_nodelay(true)?; - - let start = Instant::now(); - - debug!("send_request: Sending request"; "request" => %request.request_path()); - - // Some explanation of what's going on here is in order. - // - // The networking stack in Stacks is designed to operate on non-blocking sockets, and - // furthermore, it operates in a way that the call site in which a network request is issued can - // be in a wholly separate stack (or thread) from the connection. While this is absolutely necessary - // within the Stacks node, using it to issue a single blocking request imposes a lot of - // overhead. - // - // First, we will create the network connection and give it a ProtocolFamily implementation - // (StacksHttp), which gets used by the connection to encode and deocde messages. - // - // Second, we'll create a _handle_ to the network connection into which we will write requests - // and read responses. The connection itself is an opaque black box that, internally, - // implements a state machine around the ProtocolFamily implementation to incrementally read - // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is - // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, - // as well as underfull socket buffers. - // - // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network - // handle (which contains the buffered bytes from the message to be fed into the socket), and - // (2) drive bytes from the handle into the socket iself via the network connection. This is a - // two-step process mainly because the handle is expected to live in a separate stack (or even - // a separate thread). - // - // Fourth, we need to _drive_ data from the socket. We have to repeatedly (1) pull data from - // the socket into the network connection, and (2) drive parsed messages from the connection to - // the handle. Then, the call site that owns the handle simply polls the handle for new - // messages. Once we have received a message, we can proceed to handle it. - // - // Finally, we deal with the kind of HTTP message we got. If it's an error response, we convert - // it into an error. If it's a request (i.e. not a response), we also return an error. We - // only return the message if it was a well-formed non-error HTTP response. - - // Step 1-2: set up the connection and request handle - // NOTE: we don't need anything special for connection options, so just use the default - let conn_opts = ConnectionOptions::default(); - let http = StacksHttp::new_client(addr, &conn_opts); - let mut connection = NetworkConnection::new(http, &conn_opts, None); - let mut request_handle = connection - .make_request_handle(0, get_epoch_time_secs() + timeout.as_secs(), 0) - .map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("Failed to create request handle: {:?}", &e).as_str(), - ) - })?; - - // Step 3: load up the request with the message we're gonna send, and iteratively dump its - // bytes from the handle into the socket (the connection does internal buffering and - // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send - // anymore because the socket buffer is currently full). - request - .send(&mut request_handle) - .map_err(|e| handle_net_error(e, "Failed to serialize request body"))?; - - debug!("send_request(sending data)"); - loop { - let flushed = request_handle - .try_flush() - .map_err(|e| handle_net_error(e, "Failed to flush request body"))?; - - // send it out - let num_sent = connection - .send_data(&mut stream) - .map_err(|e| handle_net_error(e, "Failed to send socket data"))?; - - debug!( - "send_request(sending data): flushed = {}, num_sent = {}", - flushed, num_sent - ); - if flushed && num_sent == 0 { - break; - } - - if Instant::now().saturating_duration_since(start) > timeout { - return Err(io::Error::new( - io::ErrorKind::WouldBlock, - "Timed out while receiving request", - )); - } - } - - // Step 4: pull bytes from the socket back into the handle, and see if the connection decoded - // and dispatched any new messages to the request handle. If so, then extract the message and - // check that it's a well-formed HTTP response. - debug!("send_request(receiving data)"); - let response; - loop { - // get back the reply - debug!("send_request(receiving data): try to receive data"); - match connection.recv_data(&mut stream) { - Ok(nr) => { - debug!("send_request(receiving data): received {} bytes", nr); - } - Err(e) => { - return Err(handle_net_error(e, "Failed to receive socket data")); - } - } - - // fullfill the request -- send it to its corresponding handle - debug!("send_request(receiving data): drain inbox"); - connection.drain_inbox(); - - // see if we got a message that was fulfilled in our handle - debug!("send_request(receiving data): try receive response"); - let rh = match request_handle.try_recv() { - Ok(resp) => { - response = resp; - break; - } - Err(e) => match e { - Ok(handle) => handle, - Err(e) => { - return Err(handle_net_error( - e, - "Failed to receive message after socket has been drained", - )); - } - }, - }; - request_handle = rh; - - if Instant::now().saturating_duration_since(start) > timeout { - return Err(io::Error::new( - io::ErrorKind::WouldBlock, - "Timed out while receiving request", - )); - } - } - - // Step 5: decode the HTTP message and return it if it's not an error. - let response_data = match response { - StacksHttpMessage::Response(response_data) => response_data, - StacksHttpMessage::Error(path, response) => { - return Err(io::Error::new( - io::ErrorKind::Other, - format!( - "Request did not succeed ({} != 200). Path: '{}'", - response.preamble().status_code, - &path - ) - .as_str(), - )); - } - _ => { - return Err(io::Error::new( - io::ErrorKind::Other, - "Did not receive an HTTP response", - )); - } - }; - - Ok(response_data) -} - impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { debug!( @@ -567,7 +347,7 @@ impl EventObserver { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match send_request(host, port, request, backoff) { + match send_http_request(host, port, request, backoff) { Ok(response) => { if response.preamble().status_code == 200 { debug!( @@ -1700,7 +1480,6 @@ mod test { use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksHeaderInfo}; use stacks::chainstate::stacks::events::StacksBlockEventData; use stacks::chainstate::stacks::StacksBlock; - use stacks::net::httpcore::StacksHttpResponse; use stacks::types::chainstate::BlockHeaderHash; use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; @@ -1709,22 +1488,6 @@ mod test { use super::*; - fn json_body(host: &str, port: u16, path: &str, json_bytes: &[u8]) -> StacksHttpRequest { - let peerhost: PeerHost = format!("{host}:{port}") - .parse() - .unwrap_or(PeerHost::DNS(host.to_string(), port)); - let mut request = StacksHttpRequest::new_for_peer( - peerhost, - "POST".into(), - path.into(), - HttpRequestContents::new().payload_json(serde_json::from_slice(json_bytes).unwrap()), - ) - .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); - request.add_header("Connection".into(), "close".into()); - - request - } - #[test] fn build_block_processed_event() { let observer = EventObserver { @@ -1852,13 +1615,23 @@ mod test { // Start measuring time let start_time = Instant::now(); + let host = "10.255.255.1"; // non-routable IP for timeout + let port = 80; + + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + "/".into(), + HttpRequestContents::new().payload_json(serde_json::from_slice(b"{}").unwrap()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + // Attempt to send a request with a timeout - let result = send_request( - "10.255.255.1", // Non-routable IP for timeout - 80, // HTTP port - json_body("10.255.255.1", 80, "/", b"{}"), - timeout_duration, - ); + let result = send_http_request(host, port, request, timeout_duration); // Measure the elapsed time let elapsed_time = start_time.elapsed(); @@ -1886,146 +1659,6 @@ mod test { ); } - #[test] - fn test_send_request_timeout() { - // Set up a TcpListener that accepts a connection but delays response - let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind test listener"); - let addr = listener.local_addr().unwrap(); - - // Spawn a thread that will accept the connection and do nothing, simulating a long delay - thread::spawn(move || { - let (stream, _addr) = listener.accept().unwrap(); - // Hold the connection open to simulate a delay - thread::sleep(Duration::from_secs(10)); - drop(stream); // Close the stream - }); - - // Set a timeout shorter than the sleep duration to force a timeout - let connection_timeout = Duration::from_secs(2); - - // Attempt to connect, expecting a timeout error - let result = send_request( - "127.0.0.1", - addr.port(), - json_body("127.0.0.1", 80, "/", b"{}"), - connection_timeout, - ); - - // Assert that the result is an error, specifically a timeout - assert!( - result.is_err(), - "Expected a timeout error, got: {:?}", - result - ); - - if let Err(err) = result { - assert_eq!( - err.kind(), - std::io::ErrorKind::WouldBlock, - "Expected TimedOut error, got: {:?}", - err - ); - } - } - - fn start_mock_server(response: String, client_done_signal: Receiver<()>) -> String { - // Bind to an available port on localhost - let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); - let addr = listener.local_addr().unwrap(); - - debug!("Mock server listening on {}", addr); - - // Start the server in a new thread - thread::spawn(move || { - for stream in listener.incoming() { - debug!("Mock server accepted connection"); - let mut stream = stream.expect("Failed to accept connection"); - - // Read the client's request (even if we don't do anything with it) - let mut buffer = [0; 512]; - let _ = stream.read(&mut buffer); - debug!("Mock server received request"); - - // Simulate a basic HTTP response - stream - .write_all(response.as_bytes()) - .expect("Failed to write response"); - stream.flush().expect("Failed to flush stream"); - debug!("Mock server sent response"); - - // Wait for the client to signal that it's done reading - client_done_signal - .recv() - .expect("Failed to receive client done signal"); - - // Explicitly drop the stream after signaling to ensure the client finishes - // NOTE: this will cause the test to slow down, since `send_request` expects - // `Connection: close` - drop(stream); - - debug!("Mock server closing connection"); - - break; // Close after the first request - } - }); - - // Return the address of the mock server - format!("{}:{}", addr.ip(), addr.port()) - } - - fn parse_http_response(response: StacksHttpResponse) -> String { - let response_txt = match response.destruct().1 { - HttpResponsePayload::Text(s) => s, - HttpResponsePayload::Empty => "".to_string(), - HttpResponsePayload::JSON(js) => serde_json::to_string(&js).unwrap(), - HttpResponsePayload::Bytes(bytes) => { - String::from_utf8_lossy(bytes.as_slice()).to_string() - } - }; - response_txt - } - - #[test] - fn test_send_request_success() { - // Prepare the mock server to return a successful HTTP response - let mock_response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\n\r\nHello, world!"; - - // Create a channel to signal when the client is done reading - let (tx_client_done, rx_client_done) = channel(); - let server_addr = start_mock_server(mock_response.to_string(), rx_client_done); - let timeout_duration = Duration::from_secs(5); - - let parts = server_addr.split(':').collect::>(); - let host = parts[0]; - let port = parts[1].parse().unwrap(); - - // Attempt to send a request to the mock server - let result = send_request( - host, - port, - json_body(host, port, "/", b"{}"), - timeout_duration, - ); - debug!("Got result: {:?}", result); - - // Ensure the server only closes after the client has finished processing - if let Ok(response) = &result { - let body = parse_http_response(response.clone()); - assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); - } - - tx_client_done - .send(()) - .expect("Failed to send close signal"); - - // Assert that the connection was successful - assert!( - result.is_ok(), - "Expected a successful request, but got {:?}", - result - ); - } - fn get_random_port() -> u16 { // Bind to a random port by specifying port 0, then retrieve the port assigned by the OS let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind to a random port"); From f968d548f14991cb596ab86ec110d29d56aaeaa3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 13:16:13 -0400 Subject: [PATCH 0875/1400] chore: address PR feedback --- stackslib/src/net/api/getstackers.rs | 2 +- stackslib/src/net/atlas/db.rs | 8 +++- stackslib/src/net/httpcore.rs | 44 ++++++++----------- .../burnchains/bitcoin_regtest_controller.rs | 17 ++++++- 4 files changed, 41 insertions(+), 30 deletions(-) diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 581f989c7e..3b253aeb21 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -120,7 +120,7 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,10})$"#).unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index f971344a28..d6bdbb301e 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -494,8 +494,12 @@ impl AtlasDB { page_index: u32, block_id: &StacksBlockId, ) -> Result, db_error> { - let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; - let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; + let min = page_index + .checked_mul(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; + let max = min + .checked_add(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; let args = params![min, max, block_id,]; let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index b173925dc8..5e90261e89 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1686,17 +1686,17 @@ impl ProtocolFamily for StacksHttp { &req.preamble().verb, &decoded_path )))?; - handler_index + Some(handler_index) } else { - 0 + None }; req.send(fd)?; // remember this so we'll know how to decode the response. // The next preamble and message we'll read _must be_ a response! - if !self.allow_arbitrary_response { - self.request_handler_index = Some(handler_index); + if handler_index.is_some() { + self.request_handler_index = handler_index; } Ok(()) } @@ -1768,14 +1768,10 @@ pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { /// Convert a NetError into an io::Error if appropriate. fn handle_net_error(e: NetError, msg: &str) -> io::Error { - if let NetError::ReadError(ioe) = e { - ioe - } else if let NetError::WriteError(ioe) = e { - ioe - } else if let NetError::RecvTimeout = e { - io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") - } else { - io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) + match e { + NetError::ReadError(ioe) | NetError::WriteError(ioe) => ioe, + NetError::RecvTimeout => io::Error::new(io::ErrorKind::WouldBlock, "recv timeout"), + _ => io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()), } } @@ -1911,8 +1907,7 @@ pub fn send_http_request( // and dispatched any new messages to the request handle. If so, then extract the message and // check that it's a well-formed HTTP response. debug!("send_request(receiving data)"); - let response; - loop { + let response = loop { // get back the reply debug!("send_request(receiving data): try to receive data"); match connection.recv_data(&mut stream) { @@ -1932,18 +1927,15 @@ pub fn send_http_request( debug!("send_request(receiving data): try receive response"); let rh = match request_handle.try_recv() { Ok(resp) => { - response = resp; - break; + break resp; + } + Err(Ok(handle)) => handle, + Err(Err(e)) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); } - Err(e) => match e { - Ok(handle) => handle, - Err(e) => { - return Err(handle_net_error( - e, - "Failed to receive message after socket has been drained", - )); - } - }, }; request_handle = rh; @@ -1953,7 +1945,7 @@ pub fn send_http_request( "Timed out while receiving request", )); } - } + }; // Step 5: decode the HTTP message and return it if it's not an error. let response_data = match response { diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 0a4b2556bb..32d590dd39 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1,4 +1,19 @@ -use std::convert::From; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::io::Cursor; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; From 705ebf3af4e8e5fe6e525aab28c91c42469ee588 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 13:16:13 -0400 Subject: [PATCH 0876/1400] chore: address PR feedback --- stackslib/src/net/api/getstackers.rs | 2 +- stackslib/src/net/atlas/db.rs | 8 +++- stackslib/src/net/httpcore.rs | 44 ++++++++----------- .../burnchains/bitcoin_regtest_controller.rs | 17 ++++++- 4 files changed, 41 insertions(+), 30 deletions(-) diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 69961dbe14..c2605adf61 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -121,7 +121,7 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,10})$"#).unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index f971344a28..d6bdbb301e 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -494,8 +494,12 @@ impl AtlasDB { page_index: u32, block_id: &StacksBlockId, ) -> Result, db_error> { - let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; - let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; + let min = page_index + .checked_mul(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; + let max = min + .checked_add(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; let args = params![min, max, block_id,]; let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index b173925dc8..5e90261e89 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1686,17 +1686,17 @@ impl ProtocolFamily for StacksHttp { &req.preamble().verb, &decoded_path )))?; - handler_index + Some(handler_index) } else { - 0 + None }; req.send(fd)?; // remember this so we'll know how to decode the response. // The next preamble and message we'll read _must be_ a response! - if !self.allow_arbitrary_response { - self.request_handler_index = Some(handler_index); + if handler_index.is_some() { + self.request_handler_index = handler_index; } Ok(()) } @@ -1768,14 +1768,10 @@ pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { /// Convert a NetError into an io::Error if appropriate. fn handle_net_error(e: NetError, msg: &str) -> io::Error { - if let NetError::ReadError(ioe) = e { - ioe - } else if let NetError::WriteError(ioe) = e { - ioe - } else if let NetError::RecvTimeout = e { - io::Error::new(io::ErrorKind::WouldBlock, "recv timeout") - } else { - io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()) + match e { + NetError::ReadError(ioe) | NetError::WriteError(ioe) => ioe, + NetError::RecvTimeout => io::Error::new(io::ErrorKind::WouldBlock, "recv timeout"), + _ => io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()), } } @@ -1911,8 +1907,7 @@ pub fn send_http_request( // and dispatched any new messages to the request handle. If so, then extract the message and // check that it's a well-formed HTTP response. debug!("send_request(receiving data)"); - let response; - loop { + let response = loop { // get back the reply debug!("send_request(receiving data): try to receive data"); match connection.recv_data(&mut stream) { @@ -1932,18 +1927,15 @@ pub fn send_http_request( debug!("send_request(receiving data): try receive response"); let rh = match request_handle.try_recv() { Ok(resp) => { - response = resp; - break; + break resp; + } + Err(Ok(handle)) => handle, + Err(Err(e)) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); } - Err(e) => match e { - Ok(handle) => handle, - Err(e) => { - return Err(handle_net_error( - e, - "Failed to receive message after socket has been drained", - )); - } - }, }; request_handle = rh; @@ -1953,7 +1945,7 @@ pub fn send_http_request( "Timed out while receiving request", )); } - } + }; // Step 5: decode the HTTP message and return it if it's not an error. let response_data = match response { diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 0a4b2556bb..32d590dd39 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1,4 +1,19 @@ -use std::convert::From; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::io::Cursor; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; From f43ca1af89283fd19ba1152048962f6877f2f6c7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 15:03:29 -0400 Subject: [PATCH 0877/1400] fix: allow arbitrary response handler only if there is no handler found, and turn it on by default in the parse_response() helper. Also, it's no longer an error to omit content-type; per the RFC, this defaults to application/octet-stream --- stackslib/src/net/http/tests.rs | 2 -- stackslib/src/net/httpcore.rs | 7 +++++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/http/tests.rs b/stackslib/src/net/http/tests.rs index 508ca55c6e..a17635bc59 100644 --- a/stackslib/src/net/http/tests.rs +++ b/stackslib/src/net/http/tests.rs @@ -368,8 +368,6 @@ fn test_parse_http_response_preamble_err() { "Unsupported HTTP content type"), ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", "Invalid Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "missing Content-Type, Content-Length"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", "missing Content-Type, Content-Length"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 5e90261e89..bd406345b9 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1356,6 +1356,7 @@ impl StacksHttp { "127.0.0.1:20443".parse().unwrap(), &ConnectionOptions::default(), ); + http.allow_arbitrary_response = true; let (preamble, message_offset) = http.read_preamble(response_buf)?; let is_chunked = match preamble { @@ -1498,7 +1499,9 @@ impl ProtocolFamily for StacksHttp { num_read, ); - let parse_res = if self.allow_arbitrary_response { + let parse_res = if self.request_handler_index.is_none() + && self.allow_arbitrary_response + { let arbitrary_parser = RPCArbitraryResponseHandler {}; let response_payload = arbitrary_parser .try_parse_response(http_response_preamble, &message_bytes[..])?; @@ -1604,7 +1607,7 @@ impl ProtocolFamily for StacksHttp { // message of known length test_debug!("read http response payload of {} bytes", buf.len(),); - if self.allow_arbitrary_response { + if self.request_handler_index.is_none() && self.allow_arbitrary_response { let arbitrary_parser = RPCArbitraryResponseHandler {}; let response_payload = arbitrary_parser.try_parse_response(http_response_preamble, buf)?; From 83253a08ac36a2174ade61e40355bab03335d188 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 15:31:44 -0400 Subject: [PATCH 0878/1400] fix: failing unit test (omitted content-type is not a problem anymore) --- stackslib/src/net/tests/httpcore.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index d9e13883a1..d9c62eedf6 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -120,8 +120,6 @@ fn test_parse_stacks_http_preamble_response_err() { "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", From e8672eb215b47170b90830847bb6330e789c219c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:13:41 -0400 Subject: [PATCH 0879/1400] fix: more compact stackerdb output --- libstackerdb/src/libstackerdb.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/libstackerdb/src/libstackerdb.rs b/libstackerdb/src/libstackerdb.rs index 507d2249f7..8b3fe9db0f 100644 --- a/libstackerdb/src/libstackerdb.rs +++ b/libstackerdb/src/libstackerdb.rs @@ -82,7 +82,7 @@ pub struct SlotMetadata { } /// Stacker DB chunk (i.e. as a reply to a chunk request) -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct StackerDBChunkData { /// slot ID pub slot_id: u32, @@ -98,6 +98,17 @@ pub struct StackerDBChunkData { pub data: Vec, } +impl fmt::Debug for StackerDBChunkData { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.data.len() < 128 { + write!(f, "StackerDBChunkData({},{},{},{})", self.slot_id, self.slot_version, &self.sig, &to_hex(&self.data)) + } + else { + write!(f, "StackerDBChunkData({},{},{},{}...({}))", self.slot_id, self.slot_version, &self.sig, &to_hex(&self.data[..128]), self.data.len()) + } + } +} + /// StackerDB post chunk acknowledgement #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StackerDBChunkAckData { From 5879e5a3aa30dc130f5b2ef135825243c853c4f7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:13:51 -0400 Subject: [PATCH 0880/1400] chore: docstring --- stackslib/src/burnchains/db.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 384047ccd4..72ca2e8bf1 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1411,6 +1411,8 @@ impl BurnchainDB { Ok(()) } + /// Stores a newly-parsed burnchain block's relevant data into the DB. + /// The given block's operations will be validated. pub fn store_new_burnchain_block( &mut self, burnchain: &Burnchain, From 498395dae5b43b04b7822a9d8dccd69064223575 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:14:06 -0400 Subject: [PATCH 0881/1400] chore: add test coverage for stackerdb push and modify downloader tests to rely only on the chains coordinator to advance state (thereby testing reward cycle boundary conditions) --- stackslib/src/net/connection.rs | 3 + stackslib/src/net/mod.rs | 14 ++ stackslib/src/net/stackerdb/sync.rs | 8 + stackslib/src/net/stackerdb/tests/sync.rs | 178 ++++++++++++++++++- stackslib/src/net/tests/download/nakamoto.rs | 166 ----------------- 5 files changed, 197 insertions(+), 172 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index afeebe54c3..3577900279 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -439,6 +439,8 @@ pub struct ConnectionOptions { pub disable_inbound_handshakes: bool, /// Disable getting chunks from StackerDB (e.g. to test push-only) pub disable_stackerdb_get_chunks: bool, + /// Disable running stackerdb sync altogether (e.g. to test push-only) + pub disable_stackerdb_sync: bool, /// Unconditionally disconnect a peer after this amount of time pub force_disconnect_interval: Option, /// If set to true, this forces the p2p state machine to believe that it is running in @@ -548,6 +550,7 @@ impl std::default::Default for ConnectionOptions { disable_natpunch: false, disable_inbound_handshakes: false, disable_stackerdb_get_chunks: false, + disable_stackerdb_sync: false, force_disconnect_interval: None, force_nakamoto_epoch_transition: false, diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 5cedc4e068..96c5be3d30 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2918,6 +2918,20 @@ pub mod test { ret } + pub fn get_burnchain_db(&self, readwrite: bool) -> BurnchainDB { + let burnchain_db = + BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), readwrite) + .unwrap(); + burnchain_db + } + + pub fn get_sortition_at_height(&self, height: u64) -> Option { + let sortdb = self.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip.sortition_id); + sort_handle.get_block_snapshot_by_height(height).unwrap() + } + pub fn get_burnchain_block_ops( &self, burn_block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 85e76ea524..53a5e13e48 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -1204,6 +1204,14 @@ impl StackerDBSync { network: &mut PeerNetwork, config: &StackerDBConfig, ) -> Result, net_error> { + if network.get_connection_opts().disable_stackerdb_sync { + test_debug!( + "{:?}: stacker DB sync is disabled", + network.get_local_peer() + ); + return Ok(None); + } + // throttle to write_freq if self.last_run_ts + config.write_freq.max(1) > get_epoch_time_secs() { debug!( diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 69bdad93d9..9227eedecc 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -319,8 +319,8 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { with_timeout(600, || { std::env::set_var("STACKS_TEST_DISABLE_EDGE_TRIGGER_TEST", "1"); - let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT); - let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 2); + let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT + 4); + let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 8); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -532,13 +532,13 @@ fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { #[test] #[ignore] fn test_stackerdb_replica_2_neighbors_10_chunks() { - inner_test_stackerdb_replica_2_neighbors_10_chunks(false, BASE_PORT + 4); + inner_test_stackerdb_replica_2_neighbors_10_chunks(false, BASE_PORT + 10); } #[test] #[ignore] fn test_stackerdb_replica_2_neighbors_10_push_chunks() { - inner_test_stackerdb_replica_2_neighbors_10_chunks(true, BASE_PORT + 8); + inner_test_stackerdb_replica_2_neighbors_10_chunks(true, BASE_PORT + 30); } fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port: u16) { @@ -663,16 +663,182 @@ fn inner_test_stackerdb_replica_2_neighbors_10_chunks(push_only: bool, base_port }) } +/// Verify that the relayer will push stackerdb chunks. +/// Replica A has the data. +/// Replica B receives the data via StackerDB sync +/// Replica C receives the data from B's relayer pushes +#[test] +fn test_stackerdb_push_relayer() { + with_timeout(600, move || { + std::env::set_var("STACKS_TEST_DISABLE_EDGE_TRIGGER_TEST", "1"); + let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT + 100); + let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 102); + let mut peer_3_config = TestPeerConfig::from_port(BASE_PORT + 104); + + peer_1_config.allowed = -1; + peer_2_config.allowed = -1; + peer_3_config.allowed = -1; + + // short-lived walks... + peer_1_config.connection_opts.walk_max_duration = 10; + peer_2_config.connection_opts.walk_max_duration = 10; + peer_3_config.connection_opts.walk_max_duration = 10; + + peer_3_config.connection_opts.disable_stackerdb_sync = true; + + // peer 1 crawls peer 2, and peer 2 crawls peer 1 and peer 3, and peer 3 crawls peer 2 + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_3_config.to_neighbor()); + peer_3_config.add_neighbor(&peer_2_config.to_neighbor()); + + // set up stacker DBs for both peers + let idx_1 = add_stackerdb(&mut peer_1_config, Some(StackerDBConfig::template())); + let idx_2 = add_stackerdb(&mut peer_2_config, Some(StackerDBConfig::template())); + let idx_3 = add_stackerdb(&mut peer_3_config, Some(StackerDBConfig::template())); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + let mut peer_3 = TestPeer::new(peer_3_config); + + // peer 1 gets the DB + setup_stackerdb(&mut peer_1, idx_1, true, 10); + setup_stackerdb(&mut peer_2, idx_2, false, 10); + setup_stackerdb(&mut peer_3, idx_2, false, 10); + + // verify that peer 1 got the data + let peer_1_db_chunks = load_stackerdb(&peer_1, idx_1); + assert_eq!(peer_1_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); + assert!(peer_1_db_chunks[i].1.len() > 0); + } + + // verify that peer 2 and 3 did NOT get the data + let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); + assert_eq!(peer_2_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); + assert!(peer_2_db_chunks[i].1.len() == 0); + } + + let peer_3_db_chunks = load_stackerdb(&peer_3, idx_2); + assert_eq!(peer_3_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_3_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_3_db_chunks[i].0.slot_version, 0); + assert!(peer_3_db_chunks[i].1.len() == 0); + } + + let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); + let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); + let peer_3_db_configs = peer_3.config.get_stacker_db_configs(); + + let mut i = 0; + loop { + // run peer network state-machines + peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); + peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); + peer_3.network.stacker_db_configs = peer_3_db_configs.clone(); + + let res_1 = peer_1.step_with_ibd(false); + let res_2 = peer_2.step_with_ibd(false); + let res_3 = peer_3.step_with_ibd(false); + + if let Ok(res) = res_1 { + check_sync_results(&res); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + if let Ok(res) = res_2 { + check_sync_results(&res); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + if let Ok(res) = res_3 { + check_sync_results(&res); + peer_3 + .relayer + .process_stacker_db_chunks( + &peer_3.network.get_chain_view().rc_consensus_hash, + &peer_3_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_3 + .relayer + .process_pushed_stacker_db_chunks( + &peer_3.network.get_chain_view().rc_consensus_hash, + &peer_3_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + let db1 = load_stackerdb(&peer_1, idx_1); + let db2 = load_stackerdb(&peer_2, idx_2); + let db3 = load_stackerdb(&peer_3, idx_3); + + if db1 == db2 && db2 == db3 { + break; + } + i += 1; + + debug!("StackerDB sync step {}", i); + } + + debug!("Completed stacker DB sync in {} step(s)", i); + }) +} + #[test] #[ignore] fn test_stackerdb_10_replicas_10_neighbors_line_10_chunks() { - inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(false, BASE_PORT + 28); + inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(false, BASE_PORT + 50); } #[test] #[ignore] fn test_stackerdb_10_replicas_10_neighbors_line_push_10_chunks() { - inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(true, BASE_PORT + 68); + inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(true, BASE_PORT + 70); } fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, base_port: u16) { diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index afba1e90e7..31f88b50f8 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -2111,38 +2111,6 @@ fn test_nakamoto_download_run_2_peers() { let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); - let all_burn_block_ops: Vec<(u64, Vec<_>)> = (26..=tip.block_height) - .map(|height| { - ( - height, - peer.get_burnchain_block_ops_at_height(height) - .unwrap_or(vec![]), - ) - }) - .collect(); - - let all_sortitions: Vec = all_burn_block_ops - .iter() - .map(|(height, ops)| { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let sn = ih.get_block_snapshot_by_height(*height).unwrap().unwrap(); - sn - }) - .collect(); - - let mut all_block_headers: HashMap = HashMap::new(); - for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_tenure_start_block_header( - &mut peer.chainstate().index_conn(), - &nakamoto_tip, - &sn.consensus_hash, - ) - .unwrap() - { - all_block_headers.insert(sn.consensus_hash.clone(), header); - } - } - let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); @@ -2178,19 +2146,9 @@ fn test_nakamoto_download_run_2_peers() { let (term_sx, term_rx) = sync_channel(1); thread::scope(|s| { s.spawn(move || { - let mut burnchain_ptr = 0; - - // kick things off - let (_burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) .unwrap(); - let mut last_burnchain_sync = get_epoch_time_secs(); - let deadline = 5; - loop { boot_peer .run_with_ibd(true, Some(&mut boot_dns_client)) @@ -2200,47 +2158,6 @@ fn test_nakamoto_download_run_2_peers() { SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) .unwrap(); - if burnchain_ptr < all_burn_block_ops.len() { - let (burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - let expected_sortition = all_sortitions.get(burnchain_ptr).unwrap(); - if !expected_sortition.sortition { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - if !all_block_headers.contains_key(&expected_sortition.consensus_hash) { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - - let header = all_block_headers - .get(&expected_sortition.consensus_hash) - .unwrap(); - debug!( - "Waiting for Stacks block {} (sortition {} height {} burn height {})", - &header.index_block_hash(), - &expected_sortition.consensus_hash, - &header.anchored_header.height(), - expected_sortition.block_height - ); - - if stacks_tip_ch != last_stacks_tip_ch - || stacks_tip_ch == header.consensus_hash - || last_burnchain_sync + deadline < get_epoch_time_secs() - { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - } - last_stacks_tip_ch = stacks_tip_ch; last_stacks_tip_bhh = stacks_tip_bhh; @@ -2305,38 +2222,6 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); - let all_burn_block_ops: Vec<(u64, Vec<_>)> = (26..=tip.block_height) - .map(|height| { - ( - height, - peer.get_burnchain_block_ops_at_height(height) - .unwrap_or(vec![]), - ) - }) - .collect(); - - let all_sortitions: Vec = all_burn_block_ops - .iter() - .map(|(height, ops)| { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let sn = ih.get_block_snapshot_by_height(*height).unwrap().unwrap(); - sn - }) - .collect(); - - let mut all_block_headers: HashMap = HashMap::new(); - for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_tenure_start_block_header( - &mut peer.chainstate().index_conn(), - &nakamoto_tip, - &sn.consensus_hash, - ) - .unwrap() - { - all_block_headers.insert(sn.consensus_hash.clone(), header); - } - } - let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); @@ -2372,19 +2257,9 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let (term_sx, term_rx) = sync_channel(1); thread::scope(|s| { s.spawn(move || { - let mut burnchain_ptr = 0; - - // kick things off - let (_burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) .unwrap(); - let mut last_burnchain_sync = get_epoch_time_secs(); - let deadline = 5; - loop { boot_peer .run_with_ibd(true, Some(&mut boot_dns_client)) @@ -2394,47 +2269,6 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) .unwrap(); - if burnchain_ptr < all_burn_block_ops.len() { - let (burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - let expected_sortition = all_sortitions.get(burnchain_ptr).unwrap(); - if !expected_sortition.sortition { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - if !all_block_headers.contains_key(&expected_sortition.consensus_hash) { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - - let header = all_block_headers - .get(&expected_sortition.consensus_hash) - .unwrap(); - debug!( - "Waiting for Stacks block {} (sortition {} height {} burn height {})", - &header.index_block_hash(), - &expected_sortition.consensus_hash, - &header.anchored_header.height(), - expected_sortition.block_height - ); - - if stacks_tip_ch != last_stacks_tip_ch - || stacks_tip_ch == header.consensus_hash - || last_burnchain_sync + deadline < get_epoch_time_secs() - { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - } - last_stacks_tip_ch = stacks_tip_ch; last_stacks_tip_bhh = stacks_tip_bhh; From ec1b9138b69f33bb973c7c476264562710d2d4cb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:14:34 -0400 Subject: [PATCH 0882/1400] chore: log block rejection based on signature threshold more faithfully --- stackslib/src/chainstate/nakamoto/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 49ced5f916..1ec2bb4656 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2280,7 +2280,14 @@ impl NakamotoChainState { "signing_weight" => signing_weight); true } else { - debug!("Will not store alternative copy of block {} ({}) with block hash {}, since it has less signing power", &block_id, &block.header.consensus_hash, &block_hash); + if existing_signing_weight > signing_weight { + debug!("Will not store alternative copy of block {} ({}) with block hash {}, since it has less signing power", &block_id, &block.header.consensus_hash, &block_hash); + } else { + debug!( + "Will not store duplicate copy of block {} ({}) with block hash {}", + &block_id, &block.header.consensus_hash, &block_hash + ); + } false }; From fb1e3d61d200893a008eb6f7c2b39c3eba86fa0c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:14:47 -0400 Subject: [PATCH 0883/1400] chore: debug instead of warn on block rejection since the cause is usually normal --- testnet/stacks-node/src/nakamoto_node/miner.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index cd811a9346..fb79c6abc7 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -738,7 +738,11 @@ impl BlockMinerThread { staging_tx.commit()?; if !accepted { - warn!("Did NOT accept block {} we mined", &block.block_id()); + // this can happen if the p2p network and relayer manage to receive this block prior to + // the thread reaching this point -- this can happen because the signers broadcast the + // signed block to the nodes independent of the miner, so the miner itself can receive + // and store its own block outside of this thread. + debug!("Did NOT accept block {} we mined", &block.block_id()); // not much we can do here, but try and mine again and hope we produce a valid one. return Ok(()); From 6475f6510f8c71d905a8c10bdbcde480d39d8993 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 23 Aug 2024 16:37:25 -0400 Subject: [PATCH 0884/1400] chore: rust-fmt --- libstackerdb/src/libstackerdb.rs | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/libstackerdb/src/libstackerdb.rs b/libstackerdb/src/libstackerdb.rs index 8b3fe9db0f..714ef838c4 100644 --- a/libstackerdb/src/libstackerdb.rs +++ b/libstackerdb/src/libstackerdb.rs @@ -101,10 +101,24 @@ pub struct StackerDBChunkData { impl fmt::Debug for StackerDBChunkData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.data.len() < 128 { - write!(f, "StackerDBChunkData({},{},{},{})", self.slot_id, self.slot_version, &self.sig, &to_hex(&self.data)) - } - else { - write!(f, "StackerDBChunkData({},{},{},{}...({}))", self.slot_id, self.slot_version, &self.sig, &to_hex(&self.data[..128]), self.data.len()) + write!( + f, + "StackerDBChunkData({},{},{},{})", + self.slot_id, + self.slot_version, + &self.sig, + &to_hex(&self.data) + ) + } else { + write!( + f, + "StackerDBChunkData({},{},{},{}...({}))", + self.slot_id, + self.slot_version, + &self.sig, + &to_hex(&self.data[..128]), + self.data.len() + ) } } } From 334d0373c32c19a31b180d62a48c79ecc84ef792 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 25 Aug 2024 20:01:19 -0400 Subject: [PATCH 0885/1400] fix: catch error on tx submission --- .../src/tests/neon_integrations.rs | 17 ++++-- testnet/stacks-node/src/tests/signer/v0.rs | 61 ++++++++++++++----- 2 files changed, 57 insertions(+), 21 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0905fb1f60..b651f405c3 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -749,8 +749,8 @@ pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64 return true; } -/// returns Txid string -pub fn submit_tx(http_origin: &str, tx: &Vec) -> String { +/// returns Txid string upon success +pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result { let client = reqwest::blocking::Client::new(); let path = format!("{}/v2/transactions", http_origin); let res = client @@ -768,13 +768,20 @@ pub fn submit_tx(http_origin: &str, tx: &Vec) -> String { .txid() .to_string() ); - return res; + Ok(res) } else { - eprintln!("Submit tx error: {}", res.text().unwrap()); - panic!(""); + Err(res.text().unwrap()) } } +/// returns Txid string +pub fn submit_tx(http_origin: &str, tx: &Vec) -> String { + submit_tx_fallible(http_origin, tx).unwrap_or_else(|e| { + eprintln!("Submit tx error: {}", e); + panic!(""); + }) +} + pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { let client = reqwest::blocking::Client::new(); let path = format!("{}/v2/transactions/unconfirmed/{}", http_origin, txid); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d41483a364..702a136d1e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -69,7 +69,7 @@ use crate::tests::nakamoto_integrations::{ }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, - test_observer, + submit_tx_fallible, test_observer, }; use crate::tests::{self, make_stacks_transfer}; use crate::{nakamoto_node, BurnchainController, Config, Keychain}; @@ -3165,6 +3165,7 @@ fn partial_tenure_fork() { &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_proposed = signer_test.running_nodes.nakamoto_blocks_proposed.clone(); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); @@ -3231,6 +3232,8 @@ fn partial_tenure_fork() { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + let proposed_before = blocks_proposed.load(Ordering::SeqCst); + info!("proposed_blocks: {proposed_before}, proposed_blocks2: {proposed_before_2}"); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3238,6 +3241,10 @@ fn partial_tenure_fork() { let mined_1 = blocks_mined1.load(Ordering::SeqCst); let mined_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + let proposed_1 = blocks_proposed.load(Ordering::SeqCst); + info!( + "Fork initiated: {fork_initiated}, Mined 1 blocks: {mined_1}, Mined 2 blocks {mined_2}, Proposed blocks: {proposed_1}, Proposed blocks 2: {proposed_2}", + ); Ok((fork_initiated && proposed_2 > proposed_before_2) || mined_1 > mined_before_1 @@ -3279,18 +3286,30 @@ fn partial_tenure_fork() { let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - submit_tx(&http_origin, &transfer_tx); - - wait_for(60, || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - - Ok((fork_initiated && proposed_2 > proposed_before_2) - || mined_1 > mined_before_1 - || mined_2 > mined_before_2) - }) - .unwrap(); + // This may fail if the forking miner wins too many tenures and this account's + // nonces get too high (TooMuchChaining) + match submit_tx_fallible(&http_origin, &transfer_tx) { + Ok(_) => { + wait_for(60, || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + + Ok((fork_initiated && proposed_2 > proposed_before_2) + || mined_1 > mined_before_1 + || mined_2 > mined_before_2) + }) + .unwrap(); + } + Err(e) => { + if e.to_string().contains("TooMuchChaining") { + info!("TooMuchChaining error, skipping block"); + continue; + } else { + panic!("Failed to submit tx: {}", e); + } + } + } info!( "Attempted to mine interim block {}:{}", btc_blocks_mined, interim_block_ix @@ -3317,9 +3336,6 @@ fn partial_tenure_fork() { } else if miner_2_tenures == min_miner_2_tenures { // If this is the forking tenure, miner 2 should have mined 0 blocks assert_eq!(mined_2, mined_before_2); - - // Clear the ignore block - clear_ignore_block(); } } } @@ -3364,5 +3380,18 @@ fn partial_tenure_fork() { .unwrap(); assert_eq!(tip.stacks_block_height, ignore_block - 1); + let (chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let blocks = chainstate + .get_stacks_chain_tips_at_height(ignore_block) + .unwrap(); + info!("blocks: {:?}", blocks); + signer_test.shutdown(); } From 06efc09c1dde5f8deae87c16c54cf93ab89a7c21 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 26 Aug 2024 10:38:55 -0400 Subject: [PATCH 0886/1400] WIP: Cleanup test for easier logic Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 51 ++++++++++------------ 1 file changed, 23 insertions(+), 28 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1f637b6b8d..0e38f13ea8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1915,7 +1915,10 @@ fn end_of_tenure() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(20); - + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); // Advance to one before the next reward cycle to ensure we are on the reward cycle boundary @@ -1928,7 +1931,18 @@ fn end_of_tenure() { - 2; // give the system a chance to mine a Nakamoto block - sleep_ms(30_000); + // But it doesn't have to mine one for this test to succeed? + let start = Instant::now(); + while start.elapsed() <= short_timeout { + let mined_blocks = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + if mined_blocks > blocks_before { + break; + } + sleep_ms(100); + } info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( @@ -1936,7 +1950,7 @@ fn end_of_tenure() { final_reward_cycle_height_boundary, num_signers, ); - println!("Advanced to nexct reward cycle boundary: {final_reward_cycle_height_boundary}"); + println!("Advanced to next reward cycle boundary: {final_reward_cycle_height_boundary}"); assert_eq!( signer_test.get_current_reward_cycle(), final_reward_cycle - 1 @@ -1977,39 +1991,20 @@ fn end_of_tenure() { std::thread::sleep(Duration::from_millis(100)); } - info!("Triggering a new block to be mined"); - - // Mine a block into the next reward cycle - let commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 10, - || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > commits_before) - }, - ) - .unwrap(); - - // Mine a few blocks so we are well into the next reward cycle - for _ in 0..2 { + while signer_test.get_current_reward_cycle() != final_reward_cycle { next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 10, || Ok(true), ) .unwrap(); + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting to enter the next reward cycle" + ); + std::thread::sleep(Duration::from_millis(100)); } - sleep_ms(10_000); - assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); - while test_observer::get_burn_blocks() .last() .unwrap() From 24b2d840a1216b7d557e54d454c53d4b2c5c38e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 26 Aug 2024 13:57:36 -0400 Subject: [PATCH 0887/1400] fix: remove deadlock condition between p2p and relayer threads that could arise when they both try to read the reward set. In short -- don't use a StacksDBTx<'a> to read chainstate when a StacksDBConn<'a> will do. Patch the relevant files to make it so the offending code can take a StacksDBIndexed trait impl instead of a transaction. --- .../chainstate/nakamoto/coordinator/mod.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 37 +++++++++++++++---- stackslib/src/chainstate/nakamoto/tenure.rs | 4 +- 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index de884a8d9c..058025ee1c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -188,7 +188,7 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { debug_log: bool, ) -> Result { let Some(reward_set_block) = NakamotoChainState::get_header_by_coinbase_height( - &mut chainstate.index_tx_begin(), + &mut chainstate.index_conn(), block_id, coinbase_height_of_calculation, )? diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 52ab363297..28ba89d59d 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -300,6 +300,13 @@ pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; fn sqlite(&self) -> &Connection; + /// Get the ancestor block hash given a height + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError>; + /// Get the block ID for a specific coinbase height in the fork identified by `tip` fn get_nakamoto_block_id_at_coinbase_height( &mut self, @@ -452,6 +459,14 @@ impl StacksDBIndexed for StacksDBConn<'_> { fn sqlite(&self) -> &Connection { self.conn() } + + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError> { + self.get_ancestor_block_hash(coinbase_height, tip_index_hash) + } } impl StacksDBIndexed for StacksDBTx<'_> { @@ -462,6 +477,14 @@ impl StacksDBIndexed for StacksDBTx<'_> { fn sqlite(&self) -> &Connection { self.tx().deref() } + + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError> { + self.get_ancestor_block_hash(coinbase_height, tip_index_hash) + } } impl<'a> ChainstateTx<'a> { @@ -2406,22 +2429,22 @@ impl NakamotoChainState { /// Return a Nakamoto StacksHeaderInfo at a given coinbase height in the fork identified by `tip_index_hash`. /// * For Stacks 2.x, this is the Stacks block's header /// * For Stacks 3.x (Nakamoto), this is the first block in the miner's tenure. - pub fn get_header_by_coinbase_height( - tx: &mut StacksDBTx, + pub fn get_header_by_coinbase_height( + conn: &mut SDBI, tip_index_hash: &StacksBlockId, coinbase_height: u64, ) -> Result, ChainstateError> { // nakamoto block? if let Some(block_id) = - tx.get_nakamoto_block_id_at_coinbase_height(tip_index_hash, coinbase_height)? + conn.get_nakamoto_block_id_at_coinbase_height(tip_index_hash, coinbase_height)? { - return Self::get_block_header_nakamoto(tx.sqlite(), &block_id); + return Self::get_block_header_nakamoto(conn.sqlite(), &block_id); } // epcoh2 block? - let Some(ancestor_at_height) = tx - .get_ancestor_block_hash(coinbase_height, tip_index_hash)? - .map(|ancestor| Self::get_block_header(tx.tx(), &ancestor)) + let Some(ancestor_at_height) = conn + .get_ancestor_block_id(coinbase_height, tip_index_hash)? + .map(|ancestor| Self::get_block_header(conn.sqlite(), &ancestor)) .transpose()? .flatten() else { diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index bff030be8f..81380cc93d 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -372,7 +372,7 @@ impl NakamotoChainState { let matured_coinbase_height = coinbase_height - MINER_REWARD_MATURITY; let matured_tenure_block_header = Self::get_header_by_coinbase_height( - chainstate_tx, + chainstate_tx.deref_mut(), &tip_index_hash, matured_coinbase_height, )? @@ -964,7 +964,7 @@ impl NakamotoChainState { let total_coinbase = coinbase_at_block.saturating_add(accumulated_rewards); let parent_tenure_start_header: StacksHeaderInfo = Self::get_header_by_coinbase_height( - chainstate_tx, + chainstate_tx.deref_mut(), &block.header.parent_block_id, parent_coinbase_height, )? From 56746d0bfbb660c133fa86749e6c6e7ca57803f5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 26 Aug 2024 15:22:39 -0400 Subject: [PATCH 0888/1400] Fix test compilation Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 1b971869bc..188e2e5a3e 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -1086,8 +1086,6 @@ fn test_nakamoto_chainstate_getters() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_ref().unwrap(); - let (mut stacks_db_tx, _) = chainstate.chainstate_tx_begin().unwrap(); - for coinbase_height in 0..=((tip .anchored_header .as_stacks_nakamoto() @@ -1097,7 +1095,7 @@ fn test_nakamoto_chainstate_getters() { + 1) { let header_opt = NakamotoChainState::get_header_by_coinbase_height( - &mut stacks_db_tx, + &mut chainstate.index_conn(), &tip.index_block_hash(), coinbase_height, ) From 8ba1a3555290f505f4555ebf1de10b1d6eeb9672 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Tue, 27 Aug 2024 11:43:19 +0200 Subject: [PATCH 0889/1400] feat: add `block_time` to `/new_block` event payload --- stackslib/src/chainstate/coordinator/mod.rs | 1 + stackslib/src/chainstate/coordinator/tests.rs | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 3 +++ stackslib/src/chainstate/stacks/db/blocks.rs | 2 ++ stackslib/src/net/mod.rs | 1 + testnet/stacks-node/src/event_dispatcher.rs | 6 ++++++ testnet/stacks-node/src/run_loop/mod.rs | 1 + testnet/stacks-node/src/tests/nakamoto_integrations.rs | 7 +++++++ 8 files changed, 22 insertions(+) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 72e44f981c..d5ffe3e55d 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -179,6 +179,7 @@ pub trait BlockEventDispatcher { pox_constants: &PoxConstants, reward_set_data: &Option, signer_bitvec: &Option>, + block_timestamp: Option, ); /// called whenever a burn block is about to be diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 50127af176..be5f862839 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -430,6 +430,7 @@ impl BlockEventDispatcher for NullEventDispatcher { _pox_constants: &PoxConstants, _reward_set_data: &Option, _signer_bitvec: &Option>, + _block_timestamp: Option, ) { assert!( false, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 28ba89d59d..2ccf6c1157 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2049,6 +2049,8 @@ impl NakamotoChainState { let signer_bitvec = (&next_ready_block).header.pox_treatment.clone(); + let block_timestamp = next_ready_block.header.timestamp; + // set stacks block accepted let mut sort_tx = sort_db.tx_handle_begin(canonical_sortition_tip)?; sort_tx.set_stacks_block_accepted( @@ -2088,6 +2090,7 @@ impl NakamotoChainState { &pox_constants, &reward_set_data, &Some(signer_bitvec), + Some(block_timestamp), ); } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 47cace8c4b..a45a8d60cb 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -190,6 +190,7 @@ impl BlockEventDispatcher for DummyEventDispatcher { _pox_constants: &PoxConstants, _reward_set_data: &Option, _signer_bitvec: &Option>, + _block_timestamp: Option, ) { assert!( false, @@ -6409,6 +6410,7 @@ impl StacksChainState { &pox_constants, &reward_set_data, &None, + None, ); } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3ba4292f1c..ba8575a032 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2017,6 +2017,7 @@ pub mod test { pox_constants: &PoxConstants, reward_set_data: &Option, _signer_bitvec: &Option>, + _block_timestamp: Option, ) { self.blocks.lock().unwrap().push(TestEventObserverBlock { block: block.clone(), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 34e42501ac..99b500dc52 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -594,6 +594,7 @@ impl EventObserver { pox_constants: &PoxConstants, reward_set_data: &Option, signer_bitvec_opt: &Option>, + block_timestamp: Option, ) -> serde_json::Value { // Serialize events to JSON let serialized_events: Vec = filtered_events @@ -631,6 +632,7 @@ impl EventObserver { let mut payload = json!({ "block_hash": format!("0x{}", block.block_hash), "block_height": metadata.stacks_block_height, + "block_time": block_timestamp, "burn_block_hash": format!("0x{}", metadata.burn_header_hash), "burn_block_height": metadata.burn_header_height, "miner_txid": format!("0x{}", winner_txid), @@ -852,6 +854,7 @@ impl BlockEventDispatcher for EventDispatcher { pox_constants: &PoxConstants, reward_set_data: &Option, signer_bitvec: &Option>, + block_timestamp: Option, ) { self.process_chain_tip( block, @@ -869,6 +872,7 @@ impl BlockEventDispatcher for EventDispatcher { pox_constants, reward_set_data, signer_bitvec, + block_timestamp, ); } @@ -1051,6 +1055,7 @@ impl EventDispatcher { pox_constants: &PoxConstants, reward_set_data: &Option, signer_bitvec: &Option>, + block_timestamp: Option, ) { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); @@ -1102,6 +1107,7 @@ impl EventDispatcher { pox_constants, reward_set_data, signer_bitvec, + block_timestamp, ); // Send payload diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index b824793e17..2be02659cd 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -197,5 +197,6 @@ pub fn announce_boot_receipts( pox_constants, &None, &None, + None, ); } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf6..5bd9ba87e7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2271,6 +2271,13 @@ fn correct_burn_outs() { "Blocks should be sorted by cycle number already" ); + let block_times: Vec = new_blocks_with_reward_set + .iter() + .filter_map(|block| block.get("block_time").and_then(|cn| cn.as_u64())) + .collect(); + // Assert that block_times are all greater than 0 + assert!(block_times.iter().all(|&t| t > 0)); + for block in new_blocks_with_reward_set.iter() { let cycle_number = block["cycle_number"].as_u64().unwrap(); let reward_set = block["reward_set"].as_object().unwrap(); From 880b345f2ddcf13a6094303b5b2d4f320156f600 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Tue, 27 Aug 2024 12:09:59 +0200 Subject: [PATCH 0890/1400] chore: fix tests --- testnet/stacks-node/src/event_dispatcher.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 99b500dc52..53a17aca97 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1514,6 +1514,7 @@ mod test { let mblock_confirmed_consumed = ExecutionCost::zero(); let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); + let block_timestamp = Some(123456); let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1531,6 +1532,7 @@ mod test { &pox_constants, &None, &Some(signer_bitvec.clone()), + block_timestamp, ); assert_eq!( payload @@ -1582,6 +1584,7 @@ mod test { let mblock_confirmed_consumed = ExecutionCost::zero(); let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); + let block_timestamp = Some(123456); let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1599,6 +1602,7 @@ mod test { &pox_constants, &None, &Some(signer_bitvec.clone()), + block_timestamp, ); let event_signer_signature = payload From cc9ab54a6e4a99ee8527f93ace1243918180a3dd Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 27 Aug 2024 10:10:35 -0700 Subject: [PATCH 0891/1400] Update changelog for 2.5.0.0.5.2 --- stacks-signer/CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 1476d56ad0..dabe0b346a 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,14 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [2.5.0.0.5.2] + +### Added + +### Changed + +- Reuse BlockResponse slot for MockSignature message type (#5103) + ## [2.5.0.0.5.2-rc1] ### Added From 072be56e5dafd8ab35556d66a11b2cfac2623506 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 28 Aug 2024 09:10:55 -0400 Subject: [PATCH 0892/1400] Fix typo in httpcore.rs StacksHttpMessage Signed-off-by: Jacinta Ferrant --- stackslib/src/net/httpcore.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index bd406345b9..804add6f33 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -764,7 +764,7 @@ impl MessageSequence for StacksHttpMessage { } fn get_message_name(&self) -> &'static str { - "StachsHttpMessage" + "StacksHttpMessage" } } From 30575476e50f35918ce21f68385a2b30e7d5a7c8 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 22 Aug 2024 16:07:58 -0400 Subject: [PATCH 0893/1400] test(signer): Add test with duplicate signer config --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/mod.rs | 56 ++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 97 +++++++++++++++++++-- 3 files changed, 121 insertions(+), 33 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 27e76a646d..894aed3cf3 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -99,6 +99,7 @@ jobs: - tests::signer::v0::reloads_signer_set_in - tests::signer::v0::signers_broadcast_signed_blocks - tests::signer::v0::min_gap_between_blocks + - tests::signer::v0::duplicate_signers - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5761372803..42407a1a76 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -125,7 +125,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest, mut signer_config_modifier: F, mut node_config_modifier: G, - btc_miner_pubkeys: &[Secp256k1PublicKey], + btc_miner_pubkeys: Option>, + signer_stacks_private_keys: Option>, ) -> Self { // Generate Signer Data - let signer_stacks_private_keys = (0..num_signers) - .map(|_| StacksPrivateKey::new()) - .collect::>(); + let signer_stacks_private_keys = signer_stacks_private_keys + .inspect(|keys| { + assert_eq!( + keys.len(), + num_signers, + "Number of private keys does not match number of signers" + ) + }) + .unwrap_or_else(|| (0..num_signers).map(|_| StacksPrivateKey::new()).collect()); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); @@ -159,11 +167,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>(); + + // First two signers have same private key + signer_stacks_private_keys[1] = signer_stacks_private_keys[0]; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + None, + |_| {}, + |_| {}, + None, + Some(signer_stacks_private_keys), + ); + let timeout = Duration::from_secs(30); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); + + signer_test.boot_to_epoch_3(); + + // give the system a chance to reach the Nakamoto start tip + // mine a Nakamoto block + wait_for(30, || { + let blocks_mined = mined_blocks.load(Ordering::SeqCst); + Ok(blocks_mined > blocks_mined_before) + }) + .unwrap(); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + // Test prometheus metrics response + #[cfg(feature = "monitoring_prom")] + { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `5`, even though there is only one block proposed. + let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); + assert!(metrics_response.contains(&expected_result)); + let expected_result = format!( + "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", + num_signers + ); + assert!(metrics_response.contains(&expected_result)); + } +} From c30162159d6573133cc8e7a540bb3afc3780abef Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 27 Aug 2024 08:53:05 -0400 Subject: [PATCH 0894/1400] test(signer): Add checks for duplicate signing keys --- .../src/tests/neon_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 47 ++++++------------- 2 files changed, 16 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0905fb1f60..3dc9669a9e 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -574,7 +574,7 @@ pub mod test_observer { pub fn contains_burn_block_range(range: impl RangeBounds) -> Result<(), String> { // Get set of all burn block heights let burn_block_heights = get_blocks() - .iter() + .into_iter() .map(|x| x.get("burn_block_height").unwrap().as_u64().unwrap()) .collect::>(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 60b6c87937..877c6b71ae 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -321,6 +321,7 @@ impl SignerTest { // Verify that the signers signed the proposed block let mut signer_index = 0; let mut signature_index = 0; + let mut signing_keys = HashSet::new(); let validated = loop { // Since we've already checked `signature.len()`, this means we've // validated all the signatures in this loop @@ -331,6 +332,9 @@ impl SignerTest { error!("Failed to validate the mined nakamoto block: ran out of signers to try to validate signatures"); break false; }; + if !signing_keys.insert(signer.signing_key) { + panic!("Duplicate signing key detected: {:?}", signer.signing_key); + } let stacks_public_key = Secp256k1PublicKey::from_slice(signer.signing_key.as_slice()) .expect("Failed to convert signing key to StacksPublicKey"); let valid = stacks_public_key @@ -488,11 +492,7 @@ fn block_proposal_rejection() { while !found_signer_signature_hash_1 && !found_signer_signature_hash_2 { std::thread::sleep(Duration::from_secs(1)); let chunks = test_observer::get_stackerdb_chunks(); - for chunk in chunks - .into_iter() - .map(|chunk| chunk.modified_slots) - .flatten() - { + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) else { continue; @@ -2982,6 +2982,13 @@ fn duplicate_signers() { // First two signers have same private key signer_stacks_private_keys[1] = signer_stacks_private_keys[0]; + let duplicate_pubkey = Secp256k1PublicKey::from_private(&signer_stacks_private_keys[0]); + let duplicate_pubkey_from_copy = + Secp256k1PublicKey::from_private(&signer_stacks_private_keys[1]); + assert_eq!( + duplicate_pubkey, duplicate_pubkey_from_copy, + "Recovered pubkeys don't match" + ); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -2992,37 +2999,13 @@ fn duplicate_signers() { None, Some(signer_stacks_private_keys), ); - let timeout = Duration::from_secs(30); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); + let timeout = Duration::from_secs(30); - // give the system a chance to reach the Nakamoto start tip - // mine a Nakamoto block - wait_for(30, || { - let blocks_mined = mined_blocks.load(Ordering::SeqCst); - Ok(blocks_mined > blocks_mined_before) - }) - .unwrap(); + info!("------------------------- Try mining one block -------------------------"); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); - // Test prometheus metrics response - #[cfg(feature = "monitoring_prom")] - { - let metrics_response = signer_test.get_signer_metrics(); - - // Because 5 signers are running in the same process, the prometheus metrics - // are incremented once for every signer. This is why we expect the metric to be - // `5`, even though there is only one block proposed. - let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); - assert!(metrics_response.contains(&expected_result)); - let expected_result = format!( - "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", - num_signers - ); - assert!(metrics_response.contains(&expected_result)); - } + signer_test.shutdown(); } From bf860f4d0fc41c34abc418632d778cefcc8fc2c3 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 28 Aug 2024 12:56:01 -0400 Subject: [PATCH 0895/1400] test(signer): Add check that duplicate signers produce identical signatures and recovered pubkeys --- stacks-common/src/util/secp256k1.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 49 ++++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 0274f41b02..034a5a4941 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -38,7 +38,7 @@ use crate::util::hash::{hex_bytes, to_hex}; // per-thread Secp256k1 context thread_local!(static _secp256k1: Secp256k1 = Secp256k1::new()); -#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Hash)] pub struct Secp256k1PublicKey { // serde is broken for secp256k1, so do it ourselves #[serde( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 877c6b71ae..7c7412a6e4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2982,6 +2982,7 @@ fn duplicate_signers() { // First two signers have same private key signer_stacks_private_keys[1] = signer_stacks_private_keys[0]; + let unique_signers = num_signers - 1; let duplicate_pubkey = Secp256k1PublicKey::from_private(&signer_stacks_private_keys[0]); let duplicate_pubkey_from_copy = Secp256k1PublicKey::from_private(&signer_stacks_private_keys[1]); @@ -3007,5 +3008,53 @@ fn duplicate_signers() { signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + info!("------------------------- Read all `BlockResponse::Accepted` messages -------------------------"); + + let mut signer_accepted_responses = vec![]; + let start_polling = Instant::now(); + while start_polling.elapsed() <= timeout { + std::thread::sleep(Duration::from_secs(1)); + let messages = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()).ok() + }) + .filter_map(|message| match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { + info!("Message(accepted): {message:?}"); + Some(m) + } + _ => { + debug!("Message(ignored): {message:?}"); + None + } + }); + signer_accepted_responses.extend(messages); + } + + info!("------------------------- Assert there are {unique_signers} unique signatures and recovered pubkeys -------------------------"); + + // Pick a message hash + let (selected_sighash, _) = signer_accepted_responses + .iter() + .min_by_key(|(sighash, _)| *sighash) + .copied() + .expect("No `BlockResponse::Accepted` messages recieved"); + + // Filter only resonses for selected block and collect unique pubkeys and signatures + let (pubkeys, signatures): (HashSet<_>, HashSet<_>) = signer_accepted_responses + .into_iter() + .filter(|(hash, _)| *hash == selected_sighash) + .map(|(msg, sig)| { + let pubkey = Secp256k1PublicKey::recover_to_pubkey(msg.bits(), &sig) + .expect("Failed to recover pubkey"); + (pubkey, sig) + }) + .unzip(); + + assert_eq!(pubkeys.len(), unique_signers); + assert_eq!(signatures.len(), unique_signers); + signer_test.shutdown(); } From 3bd185f82481b070e3f8431aef38c92221a081f3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:32:11 -0700 Subject: [PATCH 0896/1400] Adding changes for 2.5.0.0.6 --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb6061cb9d..2cca1d273a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,27 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-tenure-info?` added - `get-block-info?` removed +## [2.5.0.0.7] + +### Added + +feat: Neon mock miner replay (#5060) +chore: add warn logs for block validate rejections (#5079) + +### Changed + +- bugfix/boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) +- Fix: Revert BurnchainHeaderHash serialization change (#5094) +- Feat/mock signing revamp (#5070) +- Fix block proposal rejection test (#5084) +- Fix/multi miner fixes jude (#5040) + +## [2.5.0.0.6] + +### Changed + +- If there is a getchunk/putchunk that fails due to a stale (or future) version NACK, the StackerDB sync state machine should immediately retry sync (#5066) + ## [2.5.0.0.5] ### Added From 88c95c6817f3412d5dc7b41c3d0dc8a9b3070ed3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 28 Aug 2024 10:51:47 -0700 Subject: [PATCH 0897/1400] Update changelog for release --- CHANGELOG.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cca1d273a..0f9a419f1d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,16 +21,16 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -feat: Neon mock miner replay (#5060) -chore: add warn logs for block validate rejections (#5079) +- Neon mock miner replay (#5060) +- Add warn logs for block validate rejections (#5079) ### Changed -- bugfix/boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) -- Fix: Revert BurnchainHeaderHash serialization change (#5094) -- Feat/mock signing revamp (#5070) +- boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) +- Revert BurnchainHeaderHash serialization change (#5094) +- Mock signing revamp (#5070) - Fix block proposal rejection test (#5084) -- Fix/multi miner fixes jude (#5040) +- Multi miner fixes jude (#5040) ## [2.5.0.0.6] From 1584e9c422782e6e5e3f342f2e334f0e3d9e41b1 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 28 Aug 2024 10:52:51 -0700 Subject: [PATCH 0898/1400] Update changelog for release --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f9a419f1d..2add5b99f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,15 +21,15 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -- Neon mock miner replay (#5060) - Add warn logs for block validate rejections (#5079) +- Neon mock miner replay (#5060) ### Changed -- boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) - Revert BurnchainHeaderHash serialization change (#5094) -- Mock signing revamp (#5070) +- boot_to_epoch_3 in SignerTest should wait for a new commit (#5087) - Fix block proposal rejection test (#5084) +- Mock signing revamp (#5070) - Multi miner fixes jude (#5040) ## [2.5.0.0.6] From 03b373d837b7b3dc2a38efd0b8304b2d1ea33090 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:49:59 -0400 Subject: [PATCH 0899/1400] chore: remove spurious deadlock condition arising from needlessly opening a transaction whenever we open the sortition DB --- stackslib/src/chainstate/burn/db/sortdb.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3cf13a8a55..808cb73c1f 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3456,6 +3456,14 @@ impl SortitionDB { SortitionDB::apply_schema_9(&tx.deref(), epochs)?; tx.commit()?; } else if version == expected_version { + // this transaction is almost never needed + let validated_epochs = StacksEpoch::validate_epochs(epochs); + let existing_epochs = Self::get_stacks_epochs(self.conn())?; + if existing_epochs == validated_epochs { + return Ok(()); + } + + // epochs are out of date let tx = self.tx_begin()?; SortitionDB::validate_and_replace_epochs(&tx, epochs)?; tx.commit()?; From 3d56c79f28ecac584a4ac0ce843c0d2a26401859 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:50:23 -0400 Subject: [PATCH 0900/1400] fix: invoke both the epoch2 and nakamoto block announcement handlers when in the transition reward cycle --- stackslib/src/chainstate/coordinator/mod.rs | 31 ++++++++++----------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 72e44f981c..2849b74904 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -543,20 +543,24 @@ impl< in_nakamoto_epoch: false, }; - let mut nakamoto_available = false; loop { - if nakamoto_available - || inst - .can_process_nakamoto() - .expect("FATAL: could not determine if Nakamoto is available") - { - // short-circuit to avoid gratuitous I/O - nakamoto_available = true; - if !inst.handle_comms_nakamoto(&comms, miner_status.clone()) { + let bits = comms.wait_on(); + if inst.in_subsequent_nakamoto_reward_cycle() { + debug!("Coordinator: in subsequent Nakamoto reward cycle"); + if !inst.handle_comms_nakamoto(bits, miner_status.clone()) { + return; + } + } else if inst.in_first_nakamoto_reward_cycle() { + debug!("Coordinator: in first Nakamoto reward cycle"); + if !inst.handle_comms_nakamoto(bits, miner_status.clone()) { + return; + } + if !inst.handle_comms_epoch2(bits, miner_status.clone()) { return; } } else { - if !inst.handle_comms_epoch2(&comms, miner_status.clone()) { + debug!("Coordinator: in epoch2 reward cycle"); + if !inst.handle_comms_epoch2(bits, miner_status.clone()) { return; } } @@ -566,13 +570,8 @@ impl< /// This is the Stacks 2.x coordinator loop body, which handles communications /// from the given `comms`. It returns `true` if the coordinator is still running, and `false` /// if not. - pub fn handle_comms_epoch2( - &mut self, - comms: &CoordinatorReceivers, - miner_status: Arc>, - ) -> bool { + pub fn handle_comms_epoch2(&mut self, bits: u8, miner_status: Arc>) -> bool { // timeout so that we handle Ctrl-C a little gracefully - let bits = comms.wait_on(); if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { signal_mining_blocked(miner_status.clone()); debug!("Received new stacks block notice"); From bcb0abc310ceb7f5813024eb0be8438dc7a2b4a7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:50:49 -0400 Subject: [PATCH 0901/1400] fix: remove buggy check to see if we're in nakamoto, and just rely on reward cycles --- .../chainstate/nakamoto/coordinator/mod.rs | 94 +++++++------------ 1 file changed, 35 insertions(+), 59 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index de884a8d9c..cfbf18f062 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -614,38 +614,11 @@ impl< B: BurnchainHeaderReader, > ChainsCoordinator<'a, T, N, U, CE, FE, B> { - /// Check to see if we're in the last of the 2.x epochs, and we have the first PoX anchor block - /// for epoch 3. - /// NOTE: the first block in epoch3 must be after the first block in the reward phase, so as - /// to ensure that the PoX stackers have been selected for this cycle. This means that we - /// don't proceed to process Nakamoto blocks until the reward cycle has begun. Also, the last - /// reward cycle of epoch2 _must_ be PoX so we have stackers who can sign. - pub fn can_process_nakamoto(&mut self) -> Result { - let canonical_sortition_tip = self - .canonical_sortition_tip - .clone() - .expect("FAIL: checking epoch status, but we don't have a canonical sortition tip"); - - let canonical_sn = - SortitionDB::get_block_snapshot(self.sortition_db.conn(), &canonical_sortition_tip)? - .expect("FATAL: canonical sortition tip has no sortition"); + /// Get the first nakamoto reward cycle + fn get_first_nakamoto_reward_cycle(&self) -> u64 { + let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn()) + .unwrap_or_else(|e| panic!("FATAL: failed to query sortition DB for epochs: {:?}", &e)); - // what epoch are we in? - let cur_epoch = - SortitionDB::get_stacks_epoch(self.sortition_db.conn(), canonical_sn.block_height)? - .unwrap_or_else(|| { - panic!( - "BUG: no epoch defined at height {}", - canonical_sn.block_height - ) - }); - - if cur_epoch.epoch_id < StacksEpochId::Epoch30 { - return Ok(false); - } - - // in epoch3 - let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; let epoch_3_idx = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) .expect("FATAL: epoch3 not defined"); @@ -655,32 +628,36 @@ impl< .block_height_to_reward_cycle(epoch3.start_height) .expect("FATAL: epoch3 block height has no reward cycle"); - // NOTE(safety): this is not guaranteed to be the canonical best Stacks tip. - // However, it's safe to use here because we're only interested in loading up the first - // Nakamoto reward set, which uses the epoch2 anchor block selection algorithm. There will - // only be one such reward set in epoch2 rules, since it's tied to a specific block-commit - // (note that this is not true for reward sets generated in Nakamoto prepare phases). - let (local_best_stacks_ch, local_best_stacks_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortition_db.conn())?; - let local_best_stacks_tip = - StacksBlockId::new(&local_best_stacks_ch, &local_best_stacks_bhh); - - // only proceed if we have processed the _anchor block_ for this reward cycle. - let Some((rc_info, _)) = load_nakamoto_reward_set( - self.burnchain - .block_height_to_reward_cycle(canonical_sn.block_height) - .expect("FATAL: snapshot has no reward cycle"), - &canonical_sn.sortition_id, - &self.burnchain, - &mut self.chain_state_db, - &local_best_stacks_tip, - &self.sortition_db, - &OnChainRewardSetProvider::new(), - )? - else { - return Ok(false); - }; - Ok(rc_info.reward_cycle >= first_epoch3_reward_cycle) + first_epoch3_reward_cycle + } + + /// Get the current reward cycle + fn get_current_reward_cycle(&self) -> u64 { + let canonical_sortition_tip = self.canonical_sortition_tip.clone().unwrap_or_else(|| { + panic!("FAIL: checking epoch status, but we don't have a canonical sortition tip") + }); + + let canonical_sn = + SortitionDB::get_block_snapshot(self.sortition_db.conn(), &canonical_sortition_tip) + .unwrap_or_else(|e| panic!("FATAL: failed to query sortition DB: {:?}", &e)) + .unwrap_or_else(|| panic!("FATAL: canonical sortition tip has no sortition")); + + let cur_reward_cycle = self + .burnchain + .block_height_to_reward_cycle(canonical_sn.block_height) + .expect("FATAL: snapshot has no reward cycle"); + + cur_reward_cycle + } + + /// Are we in the first-ever Nakamoto reward cycle? + pub fn in_first_nakamoto_reward_cycle(&self) -> bool { + self.get_current_reward_cycle() == self.get_first_nakamoto_reward_cycle() + } + + /// Are we in the second or later Nakamoto reward cycle? + pub fn in_subsequent_nakamoto_reward_cycle(&self) -> bool { + self.get_current_reward_cycle() > self.get_first_nakamoto_reward_cycle() } /// This is the main loop body for the coordinator in epoch 3. @@ -688,11 +665,10 @@ impl< /// Returns false otherwise. pub fn handle_comms_nakamoto( &mut self, - comms: &CoordinatorReceivers, + bits: u8, miner_status: Arc>, ) -> bool { // timeout so that we handle Ctrl-C a little gracefully - let bits = comms.wait_on(); if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { signal_mining_blocked(miner_status.clone()); debug!("Received new Nakamoto stacks block notice"); From 0946b3dfe8b9808e0f38ba08c1adfb4f5e57ca28 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:51:11 -0400 Subject: [PATCH 0902/1400] chore: doc fix --- stackslib/src/net/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 96c5be3d30..0fa8688abb 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1502,7 +1502,7 @@ pub struct NetworkResult { pub num_connected_peers: usize, /// The observed burnchain height pub burn_height: u64, - /// The consensus hash of the burnchain tip (prefixed `rc_` for historical reasons) + /// The consensus hash of the stacks tip (prefixed `rc_` for historical reasons) pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs pub stacker_db_configs: HashMap, From e7a93c953e74734de75866a53286cd7e4e376fda Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:51:26 -0400 Subject: [PATCH 0903/1400] chore: log stacks tip height --- .../stacks-node/src/nakamoto_node/relayer.rs | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 435305472a..4701656587 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -812,6 +812,8 @@ impl RelayerThread { reason, )?; + debug!("Relayer: starting new tenure thread"); + let new_miner_handle = std::thread::Builder::new() .name(format!("miner.{parent_tenure_start}",)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) @@ -1036,6 +1038,25 @@ impl RelayerThread { return Err(NakamotoNodeError::StacksTipChanged); } + let Some(tip_height) = NakamotoChainState::get_block_header( + self.chainstate.db(), + &StacksBlockId::new(&tip_block_ch, &tip_block_bh), + ) + .map_err(|e| { + warn!( + "Relayer: failed to load tip {}/{}: {:?}", + &tip_block_ch, &tip_block_bh, &e + ); + NakamotoNodeError::ParentNotFound + })? + .map(|header| header.stacks_block_height) else { + warn!( + "Relayer: failed to load height for tip {}/{} (got None)", + &tip_block_ch, &tip_block_bh + ); + return Err(NakamotoNodeError::ParentNotFound); + }; + // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); let txid = self @@ -1057,6 +1078,7 @@ impl RelayerThread { "Relayer: Submitted block-commit"; "tip_consensus_hash" => %tip_block_ch, "tip_block_hash" => %tip_block_bh, + "tip_height" => %tip_height, "tip_block_id" => %StacksBlockId::new(&tip_block_ch, &tip_block_bh), "txid" => %txid, ); From cd4b3cd74bff2cd0c7c44e7c47e2f5949b6f585a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:51:43 -0400 Subject: [PATCH 0904/1400] chore: add integration test to see that a follower can cold-boot into nakamoto --- .../src/tests/nakamoto_integrations.rs | 208 ++++++++++++++++++ 1 file changed, 208 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf6..ea431d9cd5 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3340,6 +3340,214 @@ fn follower_bootup() { follower_thread.join().unwrap(); } +/// This test boots a follower node using the block downloader, but the follower will be multiple +/// Nakamoto reward cycles behind. +#[test] +#[ignore] +fn follower_bootup_across_multiple_cycles() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.burnchain.max_rbf = 10_000_000; + + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // mine two reward cycles + for _ in 0..btc_regtest_controller + .get_burnchain() + .pox_constants + .reward_cycle_length + * 2 + { + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + } + + info!("Nakamoto miner has advanced two reward cycles"); + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + "block_height_pre_3_0" => block_height_pre_3_0 + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + + // spawn follower + let mut follower_conf = naka_conf.clone(); + follower_conf.events_observers.clear(); + follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + let mut rng = rand::thread_rng(); + let mut buf = [0u8; 8]; + rng.fill_bytes(&mut buf); + + let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + + let localhost = "127.0.0.1"; + follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); + follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); + follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.pox_sync_sample_secs = 30; + + let node_info = get_chain_info(&naka_conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + naka_conf.node.p2p_bind + ), + CHAIN_ID_TESTNET, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap(); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); + let follower_coord_channel = follower_run_loop.coordinator_channels(); + + debug!( + "Booting follower-thread ({},{})", + &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind + ); + debug!( + "Booting follower-thread: neighbors = {:?}", + &follower_conf.node.bootstrap_node + ); + + // spawn a follower thread + let follower_thread = thread::Builder::new() + .name("follower-thread".into()) + .spawn(move || follower_run_loop.start(None, 0)) + .unwrap(); + + debug!("Booted follower-thread"); + + wait_for(300, || { + sleep_ms(1000); + let Ok(follower_node_info) = get_chain_info_result(&follower_conf) else { + return Ok(false); + }; + + info!( + "Follower tip is now {}/{}", + &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip + ); + Ok( + follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash + && follower_node_info.stacks_tip == tip.anchored_header.block_hash(), + ) + }) + .unwrap(); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + follower_coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); +} + #[test] #[ignore] fn stack_stx_burn_op_integration_test() { From 8825583e429447a44f3c9269544e0dc186040548 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:52:33 -0400 Subject: [PATCH 0905/1400] chore: add follower cold-boot integration test to CI --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 27e76a646d..7da1fe010d 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -108,6 +108,7 @@ jobs: - tests::nakamoto_integrations::continue_tenure_extend - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners + - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From dd98a2cc5591d722ef662976981da1eb926338cd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 23:22:33 -0400 Subject: [PATCH 0906/1400] fix: some tests don't have epoch3 defined, so don't panic while testing if that's the case --- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index cfbf18f062..309d02dd54 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -619,8 +619,15 @@ impl< let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn()) .unwrap_or_else(|e| panic!("FATAL: failed to query sortition DB for epochs: {:?}", &e)); - let epoch_3_idx = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) - .expect("FATAL: epoch3 not defined"); + let Some(epoch_3_idx) = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) else { + // this is only reachable in tests + if cfg!(any(test, feature = "testing")) { + return u64::MAX; + } + else { + panic!("FATAL: epoch3 not defined"); + } + }; let epoch3 = &all_epochs[epoch_3_idx]; let first_epoch3_reward_cycle = self From 93a5c71de28d0cd1549ff11bab479690838a24be Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 23:34:47 -0400 Subject: [PATCH 0907/1400] fix: cargo fmt --- stackslib/src/chainstate/nakamoto/coordinator/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 309d02dd54..82977cadc1 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -619,12 +619,12 @@ impl< let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn()) .unwrap_or_else(|e| panic!("FATAL: failed to query sortition DB for epochs: {:?}", &e)); - let Some(epoch_3_idx) = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) else { + let Some(epoch_3_idx) = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) + else { // this is only reachable in tests if cfg!(any(test, feature = "testing")) { return u64::MAX; - } - else { + } else { panic!("FATAL: epoch3 not defined"); } }; From fd9f569dc52bf4ef9139079c44d92bae986e7441 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:54:37 -0400 Subject: [PATCH 0908/1400] fix: handle rc_consensus_hash mismatch in one place --- stackslib/src/net/chat.rs | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 95d6fbac82..4becf3891d 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1917,25 +1917,14 @@ impl ConversationP2P { /// Generates a Nack if we don't have this DB, or if the request's consensus hash is invalid. fn make_stacker_db_getchunkinv_response( network: &PeerNetwork, + sortdb: &SortitionDB, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { - let local_peer = network.get_local_peer(); - let burnchain_view = network.get_chain_view(); - - // remote peer's Stacks chain tip is different from ours, meaning it might have a different - // stackerdb configuration view (and we won't be able to authenticate their chunks, and - // vice versa) - if burnchain_view.rc_consensus_hash != getchunkinv.rc_consensus_hash { - debug!( - "{:?}: NACK StackerDBGetChunkInv; {} != {}", - local_peer, &burnchain_view.rc_consensus_hash, &getchunkinv.rc_consensus_hash - ); - return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::StaleView, - ))); - } - - Ok(network.make_StackerDBChunksInv_or_Nack(&getchunkinv.contract_id)) + Ok(network.make_StackerDBChunksInv_or_Nack( + sortdb, + &getchunkinv.contract_id, + &getchunkinv.rc_consensus_hash, + )) } /// Handle an inbound StackerDBGetChunkInv request. @@ -1943,10 +1932,12 @@ impl ConversationP2P { fn handle_stacker_db_getchunkinv( &mut self, network: &PeerNetwork, + sortdb: &SortitionDB, preamble: &Preamble, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { - let response = ConversationP2P::make_stacker_db_getchunkinv_response(network, getchunkinv)?; + let response = + ConversationP2P::make_stacker_db_getchunkinv_response(network, sortdb, getchunkinv)?; self.sign_and_reply( network.get_local_peer(), network.get_chain_view(), @@ -2363,7 +2354,7 @@ impl ConversationP2P { } } StacksMessageType::StackerDBGetChunkInv(ref getchunkinv) => { - self.handle_stacker_db_getchunkinv(network, &msg.preamble, getchunkinv) + self.handle_stacker_db_getchunkinv(network, sortdb, &msg.preamble, getchunkinv) } StacksMessageType::StackerDBGetChunk(ref getchunk) => { self.handle_stacker_db_getchunk(network, &msg.preamble, getchunk) From dcdb3aa2e963ff942dd8fa72009ac9ca35e3730d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:54:51 -0400 Subject: [PATCH 0909/1400] chore: document more connection options, and add maximum stackerdb message buffer size --- stackslib/src/net/connection.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 3577900279..0d4d5aafd6 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -378,11 +378,18 @@ pub struct ConnectionOptions { pub max_microblock_push: u64, pub antientropy_retry: u64, pub antientropy_public: bool, + /// maximum number of Stacks 2.x BlocksAvailable messages that can be buffered before processing pub max_buffered_blocks_available: u64, + /// maximum number of Stacks 2.x MicroblocksAvailable that can be buffered before processing pub max_buffered_microblocks_available: u64, + /// maximum number of Stacks 2.x pushed Block messages we can buffer before processing pub max_buffered_blocks: u64, + /// maximum number of Stacks 2.x pushed Microblock messages we can buffer before processing pub max_buffered_microblocks: u64, + /// maximum number of pushed Nakamoto Block messages we can buffer before processing pub max_buffered_nakamoto_blocks: u64, + /// maximum number of pushed StackerDB chunk messages we can buffer before processing + pub max_buffered_stackerdb_chunks: u64, /// how often to query a remote peer for its mempool, in seconds pub mempool_sync_interval: u64, /// how many transactions to ask for in a mempool query @@ -522,6 +529,7 @@ impl std::default::Default for ConnectionOptions { max_buffered_blocks: 5, max_buffered_microblocks: 1024, max_buffered_nakamoto_blocks: 1024, + max_buffered_stackerdb_chunks: 4096, mempool_sync_interval: 30, // number of seconds in-between mempool sync mempool_max_tx_query: 128, // maximum number of transactions to visit per mempool query mempool_sync_timeout: 180, // how long a mempool sync can go for (3 minutes) From ecb28f6a9c2d1622cc2080f30a03fc5f6b502d3e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:55:40 -0400 Subject: [PATCH 0910/1400] chore: document NACK error codes, and add one for an unrecognized (future) StackerDB view --- stackslib/src/net/mod.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 96c5be3d30..507a514144 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -91,7 +91,7 @@ use crate::net::http::{ use crate::net::httpcore::{ HttpRequestContentsExtensions, StacksHttp, StacksHttpRequest, StacksHttpResponse, TipRequest, }; -use crate::net::p2p::PeerNetwork; +use crate::net::p2p::{PeerNetwork, PendingMessages}; use crate::util_lib::bloom::{BloomFilter, BloomNodeHasher}; use crate::util_lib::boot::boot_code_tx_auth; use crate::util_lib::db::{DBConn, Error as db_error}; @@ -1039,15 +1039,26 @@ pub struct NackData { pub error_code: u32, } pub mod NackErrorCodes { + /// A handshake is required before the protocol can proceed pub const HandshakeRequired: u32 = 1; + /// The protocol could not find a required burnchain block pub const NoSuchBurnchainBlock: u32 = 2; + /// The requester is sending too many requests pub const Throttled: u32 = 3; + /// The state the requester referenced referrs to a PoX fork we do not recognize pub const InvalidPoxFork: u32 = 4; + /// The message is inappropriate for this step of the protocol pub const InvalidMessage: u32 = 5; + /// The referenced StackerDB does not exist on this node pub const NoSuchDB: u32 = 6; + /// The referenced StackerDB chunk is out-of-date with respect to our replica pub const StaleVersion: u32 = 7; + /// The referenced StackerDB state view is out-of-date with respect to our replica pub const StaleView: u32 = 8; + /// The referenced StackerDB chunk is stale locally relative to the requested version pub const FutureVersion: u32 = 9; + /// The referenced StackerDB state view is stale locally relative to the requested version + pub const FutureView: u32 = 10; } #[derive(Debug, Clone, PartialEq)] @@ -1600,11 +1611,8 @@ impl NetworkResult { || self.has_stackerdb_chunks() } - pub fn consume_unsolicited( - &mut self, - unhandled_messages: HashMap>, - ) { - for (neighbor_key, messages) in unhandled_messages.into_iter() { + pub fn consume_unsolicited(&mut self, unhandled_messages: PendingMessages) { + for ((_event_id, neighbor_key), messages) in unhandled_messages.into_iter() { for message in messages.into_iter() { match message.payload { StacksMessageType::Blocks(block_data) => { From 9857504789be8a8c9482edb4fe781d7732fe9da7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:56:02 -0400 Subject: [PATCH 0911/1400] feat: buffer messages to be retried when either the sortition view changes, or the stacks tip's tenure changes --- stackslib/src/net/p2p.rs | 71 +++++++++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 72279e41e2..9ffbda34c2 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -196,7 +196,7 @@ pub enum PeerNetworkWorkState { } pub type PeerMap = HashMap; -pub type PendingMessages = HashMap>; +pub type PendingMessages = HashMap<(usize, NeighborKey), Vec>; pub struct ConnectingPeer { socket: mio_net::TcpStream, @@ -416,9 +416,13 @@ pub struct PeerNetwork { /// Pending messages (BlocksAvailable, MicroblocksAvailable, BlocksData, Microblocks, /// NakamotoBlocks) that we can't process yet, but might be able to process on a subsequent - /// chain view update. + /// burnchain view update. pub pending_messages: PendingMessages, + /// Pending messages (StackerDBPushChunk) that we can't process yet, but might be able + /// to process on a subsequent Stacks view update + pub pending_stacks_messages: PendingMessages, + // fault injection -- force disconnects fault_last_disconnect: u64, @@ -575,6 +579,7 @@ impl PeerNetwork { antientropy_start_reward_cycle: 0, pending_messages: PendingMessages::new(), + pending_stacks_messages: PendingMessages::new(), fault_last_disconnect: 0, @@ -1902,8 +1907,10 @@ impl PeerNetwork { "{:?}: Remove inventory state for Nakamoto {:?}", &self.local_peer, &nk ); - inv_state.del_peer(&NeighborAddress::from_neighbor_key(nk, pubkh)); + inv_state.del_peer(&NeighborAddress::from_neighbor_key(nk.clone(), pubkh)); } + self.pending_messages.remove(&(event_id, nk.clone())); + self.pending_stacks_messages.remove(&(event_id, nk.clone())); } match self.network { @@ -1922,7 +1929,6 @@ impl PeerNetwork { self.relay_handles.remove(&event_id); self.peers.remove(&event_id); - self.pending_messages.remove(&event_id); } /// Deregister by neighbor key @@ -4368,7 +4374,7 @@ impl PeerNetwork { sortdb: &SortitionDB, chainstate: &mut StacksChainState, ibd: bool, - ) -> Result>, net_error> { + ) -> Result { // update burnchain snapshot if we need to (careful -- it's expensive) let canonical_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let (stacks_tip_ch, stacks_tip_bhh, stacks_tip_height) = @@ -4407,8 +4413,6 @@ impl PeerNetwork { )?; } - let mut ret: HashMap> = HashMap::new(); - let (parent_stacks_tip, tenure_start_block_id) = if stacks_tip_changed { let tenure_start_block_id = if let Some(header) = NakamotoChainState::get_nakamoto_tenure_start_block_header( @@ -4576,12 +4580,45 @@ impl PeerNetwork { } // can't fail after this point - + let mut ret = PendingMessages::new(); if burnchain_tip_changed { // try processing previously-buffered messages (best-effort) + debug!( + "{:?}: handle unsolicited stacks messages: burnchain changed {} != {}, {} buffered", + self.get_local_peer(), + &self.burnchain_tip.consensus_hash, + &canonical_sn.consensus_hash, + self.pending_messages + .iter() + .fold(0, |acc, (_, msgs)| acc + msgs.len()) + ); let buffered_messages = mem::replace(&mut self.pending_messages, HashMap::new()); - ret = - self.handle_unsolicited_messages(sortdb, chainstate, buffered_messages, ibd, false); + let unhandled = self.handle_unsolicited_sortition_messages( + sortdb, + chainstate, + buffered_messages, + ibd, + false, + ); + ret.extend(unhandled); + } + + if self.stacks_tip.consensus_hash != stacks_tip_ch { + // try processing previously-buffered messages (best-effort) + debug!( + "{:?}: handle unsolicited stacks messages: tenure changed {} != {}, {} buffered", + self.get_local_peer(), + &self.burnchain_tip.consensus_hash, + &canonical_sn.consensus_hash, + self.pending_stacks_messages + .iter() + .fold(0, |acc, (_, msgs)| acc + msgs.len()) + ); + let buffered_stacks_messages = + mem::replace(&mut self.pending_stacks_messages, HashMap::new()); + let unhandled = + self.handle_unsolicited_stacks_messages(sortdb, buffered_stacks_messages, false); + ret.extend(unhandled); } // update cached stacks chain view for /v2/info and /v3/tenures/info @@ -4657,8 +4694,20 @@ impl PeerNetwork { ); self.deregister_peer(error_event); } + + // filter out unsolicited messages and buffer up ones that might become processable + let unhandled_messages = self.authenticate_unsolicited_messages(unsolicited_messages); + let unhandled_messages = self.handle_unsolicited_sortition_messages( + sortdb, + chainstate, + unhandled_messages, + ibd, + true, + ); + let unhandled_messages = - self.handle_unsolicited_messages(sortdb, chainstate, unsolicited_messages, ibd, true); + self.handle_unsolicited_stacks_messages(sortdb, unhandled_messages, true); + network_result.consume_unsolicited(unhandled_messages); // schedule now-authenticated inbound convos for pingback From 9f308aea7b397369c653321c56b285dd7e281ac7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:56:35 -0400 Subject: [PATCH 0912/1400] feat: consider buffering an unsolicited stackerdb pushed chunk if its rc_consensus_hash is potentially in the future --- stackslib/src/net/stackerdb/mod.rs | 77 +++++++++++++++++++++++++----- 1 file changed, 65 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index d310998a19..901a0c0047 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -497,11 +497,16 @@ impl PeerNetwork { Ok(results) } - /// Create a StackerDBChunksInv, or a Nack if the requested DB isn't replicated here + /// Create a StackerDBChunksInv, or a Nack if the requested DB isn't replicated here. + /// Runs in response to a received StackerDBGetChunksInv or a StackerDBPushChunk pub fn make_StackerDBChunksInv_or_Nack( &self, + sortdb: &SortitionDB, contract_id: &QualifiedContractIdentifier, + rc_consensus_hash: &ConsensusHash, ) -> StacksMessageType { + // N.B. check that the DB exists first, since we want to report StaleView only if the DB + // exists let slot_versions = match self.stackerdbs.get_slot_versions(contract_id) { Ok(versions) => versions, Err(e) => { @@ -517,6 +522,20 @@ impl PeerNetwork { } }; + // this DB exists, but is the view of this message recent? + if &self.get_chain_view().rc_consensus_hash != rc_consensus_hash { + // do we know about this consensus hash? + if let Ok(true) = + SortitionDB::has_block_snapshot_consensus(sortdb.conn(), rc_consensus_hash) + { + debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (remote is stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); + return StacksMessageType::Nack(NackData::new(NackErrorCodes::StaleView)); + } else { + debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (local is potentially stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); + return StacksMessageType::Nack(NackData::new(NackErrorCodes::FutureView)); + } + } + let num_outbound_replicas = self.count_outbound_stackerdb_replicas(contract_id) as u32; debug!( @@ -598,8 +617,11 @@ impl PeerNetwork { } /// Handle unsolicited StackerDBPushChunk messages. - /// Generate a reply handle for a StackerDBChunksInv to be sent to the remote peer, in which - /// the inventory vector is updated with this chunk's data. + /// Check to see that the message can be stored or buffered. + /// + /// Optionally, make a reply handle for a StackerDBChunksInv to be sent to the remote peer, in which + /// the inventory vector is updated with this chunk's data. Or, send a NACK if the chunk + /// cannot be buffered or stored. /// /// Note that this can happen *during* a StackerDB sync's execution, so be very careful about /// modifying a state machine's contents! The only modification possible here is to wakeup @@ -609,17 +631,30 @@ impl PeerNetwork { /// which this chunk arrived will have already bandwidth-throttled the remote peer, and because /// messages can be arbitrarily delayed (and bunched up) by the network anyway. /// - /// Return Ok(true) if we should store the chunk - /// Return Ok(false) if we should drop it. + /// Returns (true, x) if we should buffer the message and try processing it again later. + /// Returns (false, x) if we should *not* buffer this message, because it either *won't* be valid + /// later, or if it can be stored right now. + /// + /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. + /// Returns (x, false) if we should *not* forward the message to the relayer, because it will + /// *not* be processed. pub fn handle_unsolicited_StackerDBPushChunk( &mut self, + sortdb: &SortitionDB, event_id: usize, preamble: &Preamble, chunk_data: &StackerDBPushChunkData, - ) -> Result { - let mut payload = self.make_StackerDBChunksInv_or_Nack(&chunk_data.contract_id); + send_reply: bool, + ) -> Result<(bool, bool), net_error> { + let mut payload = self.make_StackerDBChunksInv_or_Nack( + sortdb, + &chunk_data.contract_id, + &chunk_data.rc_consensus_hash, + ); match payload { StacksMessageType::StackerDBChunkInv(ref mut data) => { + // this message corresponds to an existing DB, and comes from the same view of the + // stacks chain tip let stackerdb_config = if let Some(config) = self.get_stacker_db_configs().get(&chunk_data.contract_id) { @@ -630,7 +665,7 @@ impl PeerNetwork { "StackerDBChunk for {} ID {} is not available locally", &chunk_data.contract_id, chunk_data.chunk_data.slot_id ); - return Ok(false); + return Ok((false, false)); }; // sanity check @@ -640,7 +675,7 @@ impl PeerNetwork { &chunk_data.chunk_data, &data.slot_versions, )? { - return Ok(false); + return Ok((false, false)); } // patch inventory -- we'll accept this chunk @@ -654,10 +689,28 @@ impl PeerNetwork { } } } - _ => {} + StacksMessageType::Nack(ref nack_data) => { + if nack_data.error_code == NackErrorCodes::FutureView { + // chunk corresponds to a known DB but the view of the sender is potentially in + // the future. + // We should buffer this in case it becomes storable, but don't + // store it yet. + return Ok((true, false)); + } else { + return Ok((false, false)); + } + } + _ => { + // don't recognize the message, so don't buffer + return Ok((false, false)); + } + } + + if !send_reply { + return Ok((false, true)); } - // this is a reply to the pushed chunk + // this is a reply to the pushed chunk, and we can store it right now (so don't buffer it) let resp = self.sign_for_p2p_reply(event_id, preamble.seq, payload)?; let handle = self.send_p2p_message( event_id, @@ -665,6 +718,6 @@ impl PeerNetwork { self.connection_opts.neighbor_request_timeout, )?; self.add_relay_handle(event_id, handle); - Ok(true) + Ok((false, true)) } } From 6c53f9a408298090ea90ffb3cbc937cdbf8df529 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 15:57:13 -0400 Subject: [PATCH 0913/1400] chore: add test to verify that a Stacks node can receive buffered StackerDB pushed chunks --- stackslib/src/net/stackerdb/tests/sync.rs | 211 ++++++++++++++++++++++ 1 file changed, 211 insertions(+) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 9227eedecc..e9474d9abf 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -32,6 +32,7 @@ use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::stacks::db::StacksChainState; use crate::net::p2p::PeerNetwork; use crate::net::relay::Relayer; use crate::net::stackerdb::db::SlotValidation; @@ -829,6 +830,216 @@ fn test_stackerdb_push_relayer() { }) } +/// Verify that the relayer will push stackerdb chunks, AND, those chunks will get buffered if the +/// recipient has not yet processed the sortition. +/// Replica A has the data. +/// Replica B receives the data via StackerDB sync +/// Replica C receives the data from B's relayer pushes, but is not yet at the Stacks tip that A +/// and B are on. +/// Replica C processes them all when the Stacks tip advances +#[test] +fn test_stackerdb_push_relayer_late_chunks() { + with_timeout(600, move || { + std::env::set_var("STACKS_TEST_DISABLE_EDGE_TRIGGER_TEST", "1"); + let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT + 106); + let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 108); + let mut peer_3_config = TestPeerConfig::from_port(BASE_PORT + 110); + + peer_1_config.allowed = -1; + peer_2_config.allowed = -1; + peer_3_config.allowed = -1; + + // short-lived walks... + peer_1_config.connection_opts.walk_max_duration = 10; + peer_2_config.connection_opts.walk_max_duration = 10; + peer_3_config.connection_opts.walk_max_duration = 10; + + peer_3_config.connection_opts.disable_stackerdb_sync = true; + + // peer 1 crawls peer 2, and peer 2 crawls peer 1 and peer 3, and peer 3 crawls peer 2 + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_3_config.to_neighbor()); + peer_3_config.add_neighbor(&peer_2_config.to_neighbor()); + + // set up stacker DBs for both peers + let idx_1 = add_stackerdb(&mut peer_1_config, Some(StackerDBConfig::template())); + let idx_2 = add_stackerdb(&mut peer_2_config, Some(StackerDBConfig::template())); + let idx_3 = add_stackerdb(&mut peer_3_config, Some(StackerDBConfig::template())); + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + let mut peer_3 = TestPeer::new(peer_3_config); + + // advance peers 1 and 2, but not 3 + let mut peer_1_nonce = 0; + let mut peer_2_nonce = 0; + let mut peer_3_nonce = 0; + peer_1.tenure_with_txs(&vec![], &mut peer_1_nonce); + peer_2.tenure_with_txs(&vec![], &mut peer_2_nonce); + + // sanity check -- peer 1 and 2 are at the same tip, but not 3 + let sn1 = SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb().conn()).unwrap(); + let sn2 = SortitionDB::get_canonical_burn_chain_tip(peer_2.sortdb().conn()).unwrap(); + let sn3 = SortitionDB::get_canonical_burn_chain_tip(peer_3.sortdb().conn()).unwrap(); + assert_eq!(sn1.consensus_hash, sn2.consensus_hash); + assert_eq!(sn1.block_height, sn2.block_height); + + assert_ne!(sn1.consensus_hash, sn3.consensus_hash); + assert_ne!(sn2.consensus_hash, sn3.consensus_hash); + assert!(sn3.block_height < sn1.block_height); + assert!(sn3.block_height < sn2.block_height); + + let st1 = SortitionDB::get_canonical_stacks_chain_tip_hash(peer_1.sortdb().conn()).unwrap(); + let st2 = SortitionDB::get_canonical_stacks_chain_tip_hash(peer_2.sortdb().conn()).unwrap(); + let st3 = SortitionDB::get_canonical_stacks_chain_tip_hash(peer_3.sortdb().conn()).unwrap(); + + assert_eq!(st1, st2); + assert_ne!(st1, st3); + assert_ne!(st2, st3); + + // peer 1 gets the DB + setup_stackerdb(&mut peer_1, idx_1, true, 10); + setup_stackerdb(&mut peer_2, idx_2, false, 10); + setup_stackerdb(&mut peer_3, idx_2, false, 10); + + // verify that peer 1 got the data + let peer_1_db_chunks = load_stackerdb(&peer_1, idx_1); + assert_eq!(peer_1_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_1_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_1_db_chunks[i].0.slot_version, 1); + assert!(peer_1_db_chunks[i].1.len() > 0); + } + + // verify that peer 2 and 3 did NOT get the data + let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); + assert_eq!(peer_2_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_2_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_2_db_chunks[i].0.slot_version, 0); + assert!(peer_2_db_chunks[i].1.len() == 0); + } + + let peer_3_db_chunks = load_stackerdb(&peer_3, idx_2); + assert_eq!(peer_3_db_chunks.len(), 10); + for i in 0..10 { + assert_eq!(peer_3_db_chunks[i].0.slot_id, i as u32); + assert_eq!(peer_3_db_chunks[i].0.slot_version, 0); + assert!(peer_3_db_chunks[i].1.len() == 0); + } + + let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); + let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); + let peer_3_db_configs = peer_3.config.get_stacker_db_configs(); + + let mut i = 0; + let mut advanced_tenure = false; + loop { + // run peer network state-machines + peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); + peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); + peer_3.network.stacker_db_configs = peer_3_db_configs.clone(); + + let res_1 = peer_1.step_with_ibd(false); + let res_2 = peer_2.step_with_ibd(false); + let res_3 = peer_3.step_with_ibd(false); + + if let Ok(res) = res_1 { + check_sync_results(&res); + peer_1 + .relayer + .process_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_1 + .relayer + .process_pushed_stacker_db_chunks( + &peer_1.network.get_chain_view().rc_consensus_hash, + &peer_1_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + if let Ok(res) = res_2 { + check_sync_results(&res); + peer_2 + .relayer + .process_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_2 + .relayer + .process_pushed_stacker_db_chunks( + &peer_2.network.get_chain_view().rc_consensus_hash, + &peer_2_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + if let Ok(res) = res_3 { + check_sync_results(&res); + peer_3 + .relayer + .process_stacker_db_chunks( + &peer_3.network.get_chain_view().rc_consensus_hash, + &peer_3_db_configs, + res.stacker_db_sync_results, + None, + ) + .unwrap(); + peer_3 + .relayer + .process_pushed_stacker_db_chunks( + &peer_3.network.get_chain_view().rc_consensus_hash, + &peer_3_db_configs, + res.pushed_stackerdb_chunks, + None, + ) + .unwrap(); + } + + let db1 = load_stackerdb(&peer_1, idx_1); + let db2 = load_stackerdb(&peer_2, idx_2); + let db3 = load_stackerdb(&peer_3, idx_3); + + if db1 == db2 && db2 == db3 { + break; + } + i += 1; + + debug!("StackerDB sync step {}", i); + + let num_pending = peer_3 + .network + .pending_stacks_messages + .iter() + .fold(0, |acc, (_, msgs)| acc + msgs.len()); + debug!("peer_3.network.pending_stacks_messages: {}", num_pending); + + if num_pending >= 10 && !advanced_tenure { + debug!("======= Advancing peer 3 tenure ========"); + peer_3.tenure_with_txs(&vec![], &mut peer_3_nonce); + advanced_tenure = true; + } + } + + debug!("Completed stacker DB sync in {} step(s)", i); + }) +} + #[test] #[ignore] fn test_stackerdb_10_replicas_10_neighbors_line_10_chunks() { From fe41be243f653f8fab9d7261fc21eefe52930fbd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:23:17 -0400 Subject: [PATCH 0914/1400] chore: API sync --- stackslib/src/net/chat.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 4becf3891d..926340d7fe 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1917,11 +1917,11 @@ impl ConversationP2P { /// Generates a Nack if we don't have this DB, or if the request's consensus hash is invalid. fn make_stacker_db_getchunkinv_response( network: &PeerNetwork, - sortdb: &SortitionDB, + chainstate: &mut StacksChainState, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { Ok(network.make_StackerDBChunksInv_or_Nack( - sortdb, + chainstate, &getchunkinv.contract_id, &getchunkinv.rc_consensus_hash, )) @@ -1932,12 +1932,15 @@ impl ConversationP2P { fn handle_stacker_db_getchunkinv( &mut self, network: &PeerNetwork, - sortdb: &SortitionDB, + chainstate: &mut StacksChainState, preamble: &Preamble, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { - let response = - ConversationP2P::make_stacker_db_getchunkinv_response(network, sortdb, getchunkinv)?; + let response = ConversationP2P::make_stacker_db_getchunkinv_response( + network, + chainstate, + getchunkinv, + )?; self.sign_and_reply( network.get_local_peer(), network.get_chain_view(), @@ -2354,7 +2357,7 @@ impl ConversationP2P { } } StacksMessageType::StackerDBGetChunkInv(ref getchunkinv) => { - self.handle_stacker_db_getchunkinv(network, sortdb, &msg.preamble, getchunkinv) + self.handle_stacker_db_getchunkinv(network, chainstate, &msg.preamble, getchunkinv) } StacksMessageType::StackerDBGetChunk(ref getchunk) => { self.handle_stacker_db_getchunk(network, &msg.preamble, getchunk) From f3298c06bf11045b5bd725b2e4d09688830e7cc1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:23:30 -0400 Subject: [PATCH 0915/1400] chore: API sync --- stackslib/src/net/p2p.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 9ffbda34c2..f0693c10a0 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4616,8 +4616,11 @@ impl PeerNetwork { ); let buffered_stacks_messages = mem::replace(&mut self.pending_stacks_messages, HashMap::new()); - let unhandled = - self.handle_unsolicited_stacks_messages(sortdb, buffered_stacks_messages, false); + let unhandled = self.handle_unsolicited_stacks_messages( + chainstate, + buffered_stacks_messages, + false, + ); ret.extend(unhandled); } @@ -4706,7 +4709,7 @@ impl PeerNetwork { ); let unhandled_messages = - self.handle_unsolicited_stacks_messages(sortdb, unhandled_messages, true); + self.handle_unsolicited_stacks_messages(chainstate, unhandled_messages, true); network_result.consume_unsolicited(unhandled_messages); From d295f45f734493fd12764fe0efe2bbf5aa896019 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:23:38 -0400 Subject: [PATCH 0916/1400] fix: treat a stackerdb chunk as potentially from the future if its rc_consensus_hash does not correspond to a processed Stacks block (but it may correspond to a sortition) --- stackslib/src/net/stackerdb/mod.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 901a0c0047..a2de124793 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -501,7 +501,7 @@ impl PeerNetwork { /// Runs in response to a received StackerDBGetChunksInv or a StackerDBPushChunk pub fn make_StackerDBChunksInv_or_Nack( &self, - sortdb: &SortitionDB, + chainstate: &mut StacksChainState, contract_id: &QualifiedContractIdentifier, rc_consensus_hash: &ConsensusHash, ) -> StacksMessageType { @@ -524,10 +524,13 @@ impl PeerNetwork { // this DB exists, but is the view of this message recent? if &self.get_chain_view().rc_consensus_hash != rc_consensus_hash { - // do we know about this consensus hash? - if let Ok(true) = - SortitionDB::has_block_snapshot_consensus(sortdb.conn(), rc_consensus_hash) - { + // is there a Stacks block (or tenure) with this consensus hash? + let tip_block_id = self.stacks_tip.block_id(); + if let Ok(Some(_)) = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &tip_block_id, + &rc_consensus_hash, + ) { debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (remote is stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); return StacksMessageType::Nack(NackData::new(NackErrorCodes::StaleView)); } else { @@ -640,14 +643,14 @@ impl PeerNetwork { /// *not* be processed. pub fn handle_unsolicited_StackerDBPushChunk( &mut self, - sortdb: &SortitionDB, + chainstate: &mut StacksChainState, event_id: usize, preamble: &Preamble, chunk_data: &StackerDBPushChunkData, send_reply: bool, ) -> Result<(bool, bool), net_error> { let mut payload = self.make_StackerDBChunksInv_or_Nack( - sortdb, + chainstate, &chunk_data.contract_id, &chunk_data.rc_consensus_hash, ); From 4b73229ff20c28b9c22b0699b9e066ae7e573dd6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:24:08 -0400 Subject: [PATCH 0917/1400] chore: API sync --- stackslib/src/net/tests/relay/epoch2x.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index e6a69f5dc0..b234460ddc 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -2592,7 +2592,9 @@ fn test_get_blocks_and_microblocks_2_peers_buffered_messages() { ret }; let mut update_sortition = false; - for (event_id, pending) in peers[1].network.pending_messages.iter() { + for ((event_id, _neighbor_key), pending) in + peers[1].network.pending_messages.iter() + { debug!("Pending at {} is ({}, {})", *i, event_id, pending.len()); if pending.len() >= 1 { update_sortition = true; @@ -3086,7 +3088,7 @@ fn process_new_blocks_rejects_problematic_asts() { }, ]; let mut unsolicited = HashMap::new(); - unsolicited.insert(nk.clone(), bad_msgs.clone()); + unsolicited.insert((1, nk.clone()), bad_msgs.clone()); let mut network_result = NetworkResult::new( peer.network.stacks_tip.block_id(), From 0012f74b043e6cf13460a3e66adcb5c1013febfd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:31:03 -0400 Subject: [PATCH 0918/1400] chore: API sync --- stackslib/src/net/tests/relay/nakamoto.rs | 87 ++++++++++++++++++----- 1 file changed, 68 insertions(+), 19 deletions(-) diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 9691a628e2..fb9db70d5b 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -388,6 +388,7 @@ fn test_buffer_data_message() { let (mut peer, _followers) = make_nakamoto_peers_from_invs(function_name!(), &observer, 10, 5, bitvecs.clone(), 1); + let peer_nk = peer.to_neighbor().addr; let nakamoto_block = NakamotoBlock { header: NakamotoBlockHeader { version: 1, @@ -472,43 +473,89 @@ fn test_buffer_data_message() { blocks: vec![nakamoto_block], }), ); + let stackerdb_chunk = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x01; 32]), + 7, + &BurnchainHeaderHash([0x07; 32]), + StacksMessageType::StackerDBPushChunk(StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::parse( + "ST000000000000000000002AMW42H.signers-1-4", + ) + .unwrap(), + rc_consensus_hash: ConsensusHash([0x01; 20]), + chunk_data: StackerDBChunkData { + slot_id: 0, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![1, 2, 3, 4, 5], + }, + }), + ); for _ in 0..peer.network.connection_opts.max_buffered_blocks_available { assert!(peer .network - .buffer_data_message(0, blocks_available.clone())); + .buffer_sortition_data_message(0, &peer_nk, blocks_available.clone())); } assert!(!peer .network - .buffer_data_message(0, blocks_available.clone())); + .buffer_sortition_data_message(0, &peer_nk, blocks_available.clone())); for _ in 0..peer .network .connection_opts .max_buffered_microblocks_available { + assert!(peer.network.buffer_sortition_data_message( + 0, + &peer_nk, + microblocks_available.clone() + )); + } + assert!(!peer.network.buffer_sortition_data_message( + 0, + &peer_nk, + microblocks_available.clone() + )); + + for _ in 0..peer.network.connection_opts.max_buffered_blocks { assert!(peer .network - .buffer_data_message(0, microblocks_available.clone())); + .buffer_sortition_data_message(0, &peer_nk, block.clone())); } assert!(!peer .network - .buffer_data_message(0, microblocks_available.clone())); - - for _ in 0..peer.network.connection_opts.max_buffered_blocks { - assert!(peer.network.buffer_data_message(0, block.clone())); - } - assert!(!peer.network.buffer_data_message(0, block.clone())); + .buffer_sortition_data_message(0, &peer_nk, block.clone())); for _ in 0..peer.network.connection_opts.max_buffered_microblocks { - assert!(peer.network.buffer_data_message(0, microblocks.clone())); + assert!(peer + .network + .buffer_sortition_data_message(0, &peer_nk, microblocks.clone())); } - assert!(!peer.network.buffer_data_message(0, microblocks.clone())); + assert!(!peer + .network + .buffer_sortition_data_message(0, &peer_nk, microblocks.clone())); for _ in 0..peer.network.connection_opts.max_buffered_nakamoto_blocks { - assert!(peer.network.buffer_data_message(0, nakamoto_block.clone())); + assert!(peer + .network + .buffer_sortition_data_message(0, &peer_nk, nakamoto_block.clone())); } - assert!(!peer.network.buffer_data_message(0, nakamoto_block.clone())); + assert!(!peer + .network + .buffer_sortition_data_message(0, &peer_nk, nakamoto_block.clone())); + + for _ in 0..peer.network.connection_opts.max_buffered_stackerdb_chunks { + assert!(peer + .network + .buffer_stacks_data_message(0, &peer_nk, stackerdb_chunk.clone())); + } + assert!(!peer + .network + .buffer_stacks_data_message(0, &peer_nk, stackerdb_chunk.clone())); } /// Verify that Nakmaoto blocks whose sortitions are known will *not* be buffered, but instead @@ -686,7 +733,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { blocks: blocks.clone(), }), ); - unsolicited.insert(peer_nk.clone(), vec![msg]); + unsolicited.insert((1, peer_nk.clone()), vec![msg]); if let Some(mut network_result) = network_result.take() { network_result.consume_unsolicited(unsolicited); @@ -882,7 +929,8 @@ fn test_buffer_nonready_nakamoto_blocks() { // pass this and other blocks to the p2p network's unsolicited message handler, // so they can be buffered up and processed. - let mut unsolicited_msgs: HashMap> = HashMap::new(); + let mut unsolicited_msgs: HashMap<(usize, NeighborKey), Vec> = + HashMap::new(); for (event_id, convo) in follower.network.peers.iter() { for blks in all_blocks.iter() { let msg = StacksMessage::from_chain_view( @@ -893,16 +941,17 @@ fn test_buffer_nonready_nakamoto_blocks() { blocks: blks.clone(), }), ); - - if let Some(msgs) = unsolicited_msgs.get_mut(event_id) { + let nk = convo.to_neighbor_key(); + if let Some(msgs) = unsolicited_msgs.get_mut(&(*event_id, nk)) { msgs.push(msg); } else { - unsolicited_msgs.insert(*event_id, vec![msg]); + unsolicited_msgs + .insert((*event_id, convo.to_neighbor_key()), vec![msg]); } } } - follower.network.handle_unsolicited_messages( + follower.network.handle_unsolicited_sortition_messages( &sortdb, &node.chainstate, unsolicited_msgs, From efc2563704e7a5a427512f42079229427fd4ed16 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:31:15 -0400 Subject: [PATCH 0919/1400] feat: add code path for buffering unsolicited messages that might become processable after the Stacks tenure changes --- stackslib/src/net/unsolicited.rs | 424 +++++++++++++++++++++++-------- 1 file changed, 317 insertions(+), 107 deletions(-) diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index f9ab5de87e..d10a6ee368 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -22,7 +22,7 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{Error as ChainstateError, StacksBlockHeader}; -use crate::net::p2p::{PeerNetwork, PeerNetworkWorkState}; +use crate::net::p2p::{PeerNetwork, PeerNetworkWorkState, PendingMessages}; use crate::net::{ BlocksAvailableData, BlocksData, BlocksDatum, Error as NetError, MicroblocksData, NakamotoBlocksData, NeighborKey, Preamble, StacksMessage, StacksMessageType, @@ -62,7 +62,7 @@ impl PeerNetwork { else { test_debug!( "{:?}: No such neighbor event={}", - &self.local_peer, + &self.get_local_peer(), event_id ); return None; @@ -72,7 +72,7 @@ impl PeerNetwork { // drop -- a correct peer will have authenticated before sending this message test_debug!( "{:?}: Unauthenticated neighbor {:?}", - &self.local_peer, + &self.get_local_peer(), &remote_neighbor_key ); return None; @@ -116,7 +116,9 @@ impl PeerNetwork { Ok(None) => { debug!( "{:?}: We already know the inventory state in {} for {}", - &self.local_peer, outbound_neighbor_key, consensus_hash + &self.get_local_peer(), + outbound_neighbor_key, + consensus_hash ); return Ok(None); } @@ -124,12 +126,12 @@ impl PeerNetwork { // is this remote node simply ahead of us? if let Some(convo) = self.peers.get(&event_id) { if self.chain_view.burn_block_height < convo.burnchain_tip_height { - debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.local_peer, consensus_hash, outbound_neighbor_key); + debug!("{:?}: Unrecognized consensus hash {}; it is possible that {} is ahead of us", &self.get_local_peer(), consensus_hash, outbound_neighbor_key); return Err(NetError::NotFoundError); } } // not ahead of us -- it's a bad consensus hash - debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.local_peer, consensus_hash, outbound_neighbor_key); + debug!("{:?}: Unrecognized consensus hash {}; assuming that {} has a different chain view", &self.get_local_peer(), consensus_hash, outbound_neighbor_key); return Ok(None); } Err(NetError::InvalidMessage) => { @@ -178,6 +180,7 @@ impl PeerNetwork { let mut blocks_data = 0; let mut microblocks_data = 0; let mut nakamoto_blocks_data = 0; + let mut stackerdb_chunks_data = 0; for stored_msg in msgs.iter() { match &stored_msg.payload { StacksMessageType::BlocksAvailable(_) => { @@ -187,7 +190,7 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer BlocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_available + &self.get_local_peer(), event_id, blocks_available ); return false; } @@ -200,7 +203,7 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer MicroblocksAvailable from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_available + &self.get_local_peer(), event_id, microblocks_available ); return false; } @@ -212,7 +215,7 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer BlocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, blocks_data + &self.get_local_peer(), event_id, blocks_data ); return false; } @@ -224,7 +227,7 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer MicroblocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, microblocks_data + &self.get_local_peer(), event_id, microblocks_data ); return false; } @@ -236,7 +239,20 @@ impl PeerNetwork { { debug!( "{:?}: Cannot buffer NakamotoBlocksData from event {} -- already have {} buffered", - &self.local_peer, event_id, nakamoto_blocks_data + &self.get_local_peer(), event_id, nakamoto_blocks_data + ); + return false; + } + } + StacksMessageType::StackerDBPushChunk(_) => { + stackerdb_chunks_data += 1; + if matches!(&msg.payload, StacksMessageType::StackerDBPushChunk(..)) + && stackerdb_chunks_data + >= self.connection_opts.max_buffered_stackerdb_chunks + { + debug!( + "{:?}: Cannot buffer StackerDBPushChunks from event {} -- already have {} buffered", + self.get_local_peer(), event_id, stackerdb_chunks_data ); return false; } @@ -253,12 +269,19 @@ impl PeerNetwork { /// If there is no space for the message, then silently drop it. /// Returns true if buffered. /// Returns false if not. - pub(crate) fn buffer_data_message(&mut self, event_id: usize, msg: StacksMessage) -> bool { - let Some(msgs) = self.pending_messages.get(&event_id) else { - self.pending_messages.insert(event_id, vec![msg]); + pub(crate) fn buffer_sortition_data_message( + &mut self, + event_id: usize, + neighbor_key: &NeighborKey, + msg: StacksMessage, + ) -> bool { + let key = (event_id, neighbor_key.clone()); + let Some(msgs) = self.pending_messages.get(&key) else { + self.pending_messages.insert(key.clone(), vec![msg]); debug!( "{:?}: Event {} has 1 messages buffered", - &self.local_peer, event_id + &self.get_local_peer(), + event_id ); return true; }; @@ -269,15 +292,71 @@ impl PeerNetwork { return false; } - if let Some(msgs) = self.pending_messages.get_mut(&event_id) { + let debug_msg = format!( + "{:?}: buffer message from event {} (buffered: {}): {:?}", + self.get_local_peer(), + event_id, + msgs.len() + 1, + &msg + ); + if let Some(msgs) = self.pending_messages.get_mut(&key) { // should always be reachable + debug!("{}", &debug_msg); msgs.push(msg); + } + true + } + + #[cfg_attr(test, mutants::skip)] + /// Buffer a message for re-processing once the stacks view updates. + /// If there is no space for the message, then silently drop it. + /// Returns true if buffered. + /// Returns false if not. + pub(crate) fn buffer_stacks_data_message( + &mut self, + event_id: usize, + neighbor_key: &NeighborKey, + msg: StacksMessage, + ) -> bool { + let key = (event_id, neighbor_key.clone()); + let Some(msgs) = self.pending_stacks_messages.get(&key) else { + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + if !self.can_buffer_data_message(event_id, &[], &msg) { + return false; + } debug!( - "{:?}: Event {} has {} messages buffered", - &self.local_peer, + "{:?}: buffer message from event {}: {:?}", + self.get_local_peer(), event_id, - msgs.len() + &msg + ); + self.pending_stacks_messages.insert(key.clone(), vec![msg]); + debug!( + "{:?}: Event {} has 1 messages buffered", + &self.get_local_peer(), + event_id ); + return true; + }; + + // check limits against connection opts, and if the limit is not met, then buffer up the + // message. + if !self.can_buffer_data_message(event_id, msgs, &msg) { + return false; + } + + let debug_msg = format!( + "{:?}: buffer message from event {} (buffered: {}): {:?}", + self.get_local_peer(), + event_id, + msgs.len() + 1, + &msg + ); + if let Some(msgs) = self.pending_stacks_messages.get_mut(&key) { + // should always be reachable + debug!("{}", &debug_msg); + msgs.push(msg); } true } @@ -341,7 +420,7 @@ impl PeerNetwork { debug!( "{:?}: Process BlocksAvailable from {:?} with {} entries", - &self.local_peer, + &self.get_local_peer(), &outbound_neighbor_key, new_blocks.available.len() ); @@ -361,7 +440,7 @@ impl PeerNetwork { } Err(NetError::NotFoundError) => { if buffer { - debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + debug!("{:?}: Will buffer BlocksAvailable for {} until the next burnchain view update", &self.get_local_peer(), &consensus_hash); to_buffer = true; } continue; @@ -369,7 +448,11 @@ impl PeerNetwork { Err(e) => { info!( "{:?}: Failed to handle BlocksAvailable({}/{}) from {}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + &self.get_local_peer(), + &consensus_hash, + &block_hash, + &outbound_neighbor_key, + &e ); continue; } @@ -408,7 +491,7 @@ impl PeerNetwork { // advance straight to download state if we're in inv state if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.local_peer, block_sortition_height); + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); } self.have_data_to_download = true; } @@ -453,7 +536,7 @@ impl PeerNetwork { debug!( "{:?}: Process MicroblocksAvailable from {:?} with {} entries", - &self.local_peer, + &self.get_local_peer(), outbound_neighbor_key, new_mblocks.available.len() ); @@ -473,7 +556,7 @@ impl PeerNetwork { } Err(NetError::NotFoundError) => { if buffer { - debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.local_peer, &consensus_hash); + debug!("{:?}: Will buffer MicroblocksAvailable for {} until the next burnchain view update", &self.get_local_peer(), &consensus_hash); to_buffer = true; } continue; @@ -481,7 +564,11 @@ impl PeerNetwork { Err(e) => { info!( "{:?}: Failed to handle MicroblocksAvailable({}/{}) from {:?}: {:?}", - &self.local_peer, &consensus_hash, &block_hash, &outbound_neighbor_key, &e + &self.get_local_peer(), + &consensus_hash, + &block_hash, + &outbound_neighbor_key, + &e ); continue; } @@ -516,7 +603,7 @@ impl PeerNetwork { // advance straight to download state if we're in inv state if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.local_peer, mblock_sortition_height); + debug!("{:?}: advance directly to block download with knowledge of microblock stream {}", &self.get_local_peer(), mblock_sortition_height); } self.have_data_to_download = true; } @@ -551,7 +638,7 @@ impl PeerNetwork { debug!( "{:?}: Process BlocksData from {:?} with {} entries", - &self.local_peer, + &self.get_local_peer(), outbound_neighbor_key_opt .clone() .or_else(|| { self.check_peer_authenticated(event_id) }), @@ -570,7 +657,7 @@ impl PeerNetwork { if buffer { debug!( "{:?}: Will buffer unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, + &self.get_local_peer(), &consensus_hash, &block.block_hash(), StacksBlockHeader::make_index_block_hash( @@ -582,7 +669,7 @@ impl PeerNetwork { } else { debug!( "{:?}: Will drop unsolicited BlocksData({}/{}) ({}) -- consensus hash not (yet) recognized", - &self.local_peer, + &self.get_local_peer(), &consensus_hash, &block.block_hash(), StacksBlockHeader::make_index_block_hash( @@ -596,7 +683,9 @@ impl PeerNetwork { Err(e) => { info!( "{:?}: Failed to query block snapshot for {}: {:?}", - &self.local_peer, consensus_hash, &e + &self.get_local_peer(), + consensus_hash, + &e ); continue; } @@ -605,7 +694,8 @@ impl PeerNetwork { if !sn.pox_valid { info!( "{:?}: Failed to query snapshot for {}: not on the valid PoX fork", - &self.local_peer, consensus_hash + &self.get_local_peer(), + consensus_hash ); continue; } @@ -613,7 +703,7 @@ impl PeerNetwork { if sn.winning_stacks_block_hash != block.block_hash() { info!( "{:?}: Ignoring block {} -- winning block was {} (sortition: {})", - &self.local_peer, + &self.get_local_peer(), block.block_hash(), sn.winning_stacks_block_hash, sn.sortition @@ -667,7 +757,7 @@ impl PeerNetwork { debug!( "{:?}: Process MicroblocksData from {:?} for {} with {} entries", - &self.local_peer, + &self.get_local_peer(), outbound_neighbor_key_opt.or_else(|| { self.check_peer_authenticated(event_id) }), &new_microblocks.index_anchor_block, new_microblocks.microblocks.len() @@ -677,20 +767,22 @@ impl PeerNetwork { match chainstate.get_block_header_hashes(&new_microblocks.index_anchor_block) { Ok(Some(_)) => { // yup; can process now - debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.local_peer, &new_microblocks.index_anchor_block); + debug!("{:?}: have microblock parent anchored block {}, so can process its microblocks", &self.get_local_peer(), &new_microblocks.index_anchor_block); !buffer } Ok(None) => { if buffer { debug!( "{:?}: Will buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block + &self.get_local_peer(), + &new_microblocks.index_anchor_block ); true } else { debug!( "{:?}: Will not buffer unsolicited MicroblocksData({})", - &self.local_peer, &new_microblocks.index_anchor_block + &self.get_local_peer(), + &new_microblocks.index_anchor_block ); false } @@ -698,7 +790,9 @@ impl PeerNetwork { Err(e) => { warn!( "{:?}: Failed to get header hashes for {:?}: {:?}", - &self.local_peer, &new_microblocks.index_anchor_block, &e + &self.get_local_peer(), + &new_microblocks.index_anchor_block, + &e ); false } @@ -811,7 +905,7 @@ impl PeerNetwork { { debug!( "{:?}: Aleady have Nakamoto block {}", - &self.local_peer, + &self.get_local_peer(), &nakamoto_block.block_id() ); return false; @@ -850,7 +944,7 @@ impl PeerNetwork { ) -> bool { debug!( "{:?}: Process NakamotoBlocksData from {:?} with {} entries", - &self.local_peer, + &self.get_local_peer(), &remote_neighbor_key_opt, nakamoto_blocks.blocks.len() ); @@ -860,7 +954,7 @@ impl PeerNetwork { if self.is_nakamoto_block_bufferable(sortdb, chainstate, nakamoto_block) { debug!( "{:?}: Will buffer unsolicited NakamotoBlocksData({}) ({})", - &self.local_peer, + &self.get_local_peer(), &nakamoto_block.block_id(), &nakamoto_block.header.consensus_hash, ); @@ -905,7 +999,12 @@ impl PeerNetwork { /// Handle an unsolicited message, with either the intention of just processing it (in which /// case, `buffer` will be `false`), or with the intention of not only processing it, but also /// determining if it can be bufferred and retried later (in which case, `buffer` will be - /// `true`). + /// `true`). This applies to messages that can be reprocessed after the next sortition (not + /// the next Stacks tenure) + /// + /// This code gets called with `buffer` set to true when the message is first received. If + /// this method returns (true, x), then this code gets called with the same message a + /// subsequent time when the sortition changes (and in that case, `buffer` will be false). /// /// Returns (true, x) if we should buffer the message and try processing it again later. /// Returns (false, x) if we should *not* buffer this message, because it *won't* be valid @@ -914,12 +1013,11 @@ impl PeerNetwork { /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. /// Returns (x, false) if we should *not* forward the message to the relayer, because it will /// *not* be processed. - fn handle_unsolicited_message( + fn handle_unsolicited_sortition_message( &mut self, sortdb: &SortitionDB, chainstate: &StacksChainState, event_id: usize, - preamble: &Preamble, payload: &StacksMessageType, ibd: bool, buffer: bool, @@ -984,54 +1082,78 @@ impl PeerNetwork { (to_buffer, true) } + _ => (false, true), + } + } + + #[cfg_attr(test, mutants::skip)] + /// Handle an unsolicited message, with either the intention of just processing it (in which + /// case, `buffer` will be `false`), or with the intention of not only processing it, but also + /// determining if it can be bufferred and retried later (in which case, `buffer` will be + /// `true`). This applies to messages that can be reprocessed after the next Stacks tenure. + /// + /// This code gets called with `buffer` set to true when the message is first received. If + /// this method returns (true, x), then this code gets called with the same message a + /// subsequent time when the sortition changes (and in that case, `buffer` will be false). + /// + /// Returns (true, x) if we should buffer the message and try processing it again later. + /// Returns (false, x) if we should *not* buffer this message, because it *won't* be valid + /// later. + /// + /// Returns (x, true) if we should forward the message to the relayer, so it can be processed. + /// Returns (x, false) if we should *not* forward the message to the relayer, because it will + /// *not* be processed. + fn handle_unsolicited_stacks_message( + &mut self, + chainstate: &mut StacksChainState, + event_id: usize, + preamble: &Preamble, + payload: &StacksMessageType, + buffer: bool, + ) -> (bool, bool) { + match payload { StacksMessageType::StackerDBPushChunk(ref data) => { - match self.handle_unsolicited_StackerDBPushChunk(event_id, preamble, data) { - Ok(x) => { - // don't buffer, but do reject if invalid - (false, x) - } - Err(e) => { + // N.B. send back a reply if we're calling to buffer, since this would be the first + // time we're seeing this message (instead of a subsequent time on follow-up + // processing). + let (can_buffer, can_store) = self + .handle_unsolicited_StackerDBPushChunk( + chainstate, event_id, preamble, data, buffer, + ) + .unwrap_or_else(|e| { info!( - "{:?}: failed to handle unsolicited {:?}: {:?}", - &self.local_peer, payload, &e + "{:?}: failed to handle unsolicited {:?} when buffer = {}: {:?}", + self.get_local_peer(), + payload, + buffer, + &e ); (false, false) - } + }); + if buffer && can_buffer && !can_store { + debug!( + "{:?}: Buffering {:?} to retry on next sortition", + self.get_local_peer(), + &payload + ); } + (can_buffer, can_store) } _ => (false, true), } } - #[cfg_attr(test, mutants::skip)] - /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. - /// Return messages that we couldn't handle here, but key them by neighbor, not event, so the - /// relayer can do something useful with them. - /// - /// Invalid messages are dropped silently, with an log message. - /// - /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent - /// call if the handler for it deems the message valid. - /// - /// If `buffer` is false, then if the message handler deems the message valid, it will be - /// forwraded to the relayer. - /// - /// Returns the messages to be forward to the relayer, keyed by sender. - pub fn handle_unsolicited_messages( - &mut self, - sortdb: &SortitionDB, - chainstate: &StacksChainState, + /// Authenticate unsolicited messages -- find the address of the neighbor that sent them. + pub fn authenticate_unsolicited_messages( + &self, unsolicited: HashMap>, - ibd: bool, - buffer: bool, - ) -> HashMap> { - let mut unhandled: HashMap> = HashMap::new(); - for (event_id, messages) in unsolicited.into_iter() { + ) -> PendingMessages { + unsolicited.into_iter().filter_map(|(event_id, messages)| { if messages.len() == 0 { // no messages for this event - continue; + return None; } - if buffer && self.check_peer_authenticated(event_id).is_none() { + if self.check_peer_authenticated(event_id).is_none() { if cfg!(test) && self .connection_opts @@ -1039,14 +1161,11 @@ impl PeerNetwork { { test_debug!( "{:?}: skip unsolicited message authentication", - &self.local_peer + &self.get_local_peer() ); } else { - // do not buffer messages from unknown peers - // (but it's fine to process messages that were previosuly buffered, since the peer - // may have since disconnected) debug!("Will not handle unsolicited messages from unauthenticated or dead event {}", event_id); - continue; + return None; } }; let neighbor_key = if let Some(convo) = self.peers.get(&event_id) { @@ -1054,62 +1173,153 @@ impl PeerNetwork { } else { debug!( "{:?}: No longer such neighbor event={}, dropping {} unsolicited messages", - &self.local_peer, + &self.get_local_peer(), event_id, messages.len() ); - continue; + return None; }; + Some(((event_id, neighbor_key), messages)) + }) + .collect() + } - debug!("{:?}: Process {} unsolicited messages from {:?}", &self.local_peer, messages.len(), &neighbor_key; "buffer" => %buffer); - - for message in messages.into_iter() { + #[cfg_attr(test, mutants::skip)] + /// Handle unsolicited messages propagated up to us from our ongoing ConversationP2Ps. + /// Return messages that we couldn't handle here, but key them by neighbor, not event, so the + /// relayer can do something useful with them. + /// + /// This applies only to messages that might be processable after the next sortition. It does + /// *NOT* apply to messages that might be processable after the next tenure. + /// + /// Invalid messages are dropped silently, with an log message. + /// + /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent + /// call if the handler for it deems the message valid. + /// + /// If `buffer` is false, then if the message handler deems the message valid, it will be + /// forwraded to the relayer. + /// + /// Returns messages we could not buffer, keyed by sender and event ID. This can be fed + /// directly into `handle_unsolicited_stacks_messages()` + pub fn handle_unsolicited_sortition_messages( + &mut self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + mut unsolicited: PendingMessages, + ibd: bool, + buffer: bool, + ) -> HashMap<(usize, NeighborKey), Vec> { + unsolicited.retain(|(event_id, neighbor_key), messages| { + debug!("{:?}: Process {} unsolicited sortition-bound messages from {:?}", &self.get_local_peer(), messages.len(), neighbor_key; "buffer" => %buffer); + messages.retain(|message| { if buffer && !self.can_buffer_data_message( - event_id, - self.pending_messages.get(&event_id).unwrap_or(&vec![]), + *event_id, + self.pending_messages.get(&(*event_id, neighbor_key.clone())).unwrap_or(&vec![]), &message, ) { - // asked to buffer, but we don't have space - continue; + // unable to store this due to quota being exceeded + return false; } if !buffer { debug!( - "{:?}: Re-try handling buffered message {} from {:?}", - &self.local_peer, + "{:?}: Re-try handling buffered sortition-bound message {} from {:?}", + &self.get_local_peer(), &message.payload.get_message_description(), &neighbor_key ); } - let (to_buffer, relay) = self.handle_unsolicited_message( + let (to_buffer, relay) = self.handle_unsolicited_sortition_message( sortdb, chainstate, - event_id, - &message.preamble, + *event_id, &message.payload, ibd, buffer, ); if buffer && to_buffer { - self.buffer_data_message(event_id, message); - } else if relay { + self.buffer_sortition_data_message(*event_id, neighbor_key, message.clone()); + return false; + } + if relay { // forward to relayer for processing debug!( "{:?}: Will forward message {} from {:?} to relayer", - &self.local_peer, + &self.get_local_peer(), &message.payload.get_message_description(), &neighbor_key ); - if let Some(msgs) = unhandled.get_mut(&neighbor_key) { - msgs.push(message); - } else { - unhandled.insert(neighbor_key.clone(), vec![message]); - } } + true + }); + messages.len() > 0 + }); + unsolicited + } + + #[cfg_attr(test, mutants::skip)] + /// Handle unsolicited and unhandled messages returned by + /// `handle_unsolicited_sortition_messages()`, to see if any of them could be processed at the + /// start of the next Stacks tenure. That is, the `unsolicited` map contains messages that + /// came from authenticated peers and do not exceed buffer quotas. + /// + /// Invalid messages are dropped silently, with a log message. + /// + /// If `buffer` is true, then this message will be buffered up and tried again in a subsequent + /// call if the handler for it deems the message valid. + /// + /// If `buffer` is false, then if the message handler deems the message valid, it will be + /// forwraded to the relayer. + /// + /// Returns messages we could not buffer, keyed by sender. + pub fn handle_unsolicited_stacks_messages( + &mut self, + chainstate: &mut StacksChainState, + mut unsolicited: PendingMessages, + buffer: bool, + ) -> HashMap<(usize, NeighborKey), Vec> { + unsolicited.retain(|(event_id, neighbor_key), messages| { + if messages.len() == 0 { + // no messages for this node + return false; } - } - unhandled + debug!("{:?}: Process {} unsolicited tenure-bound messages from {:?}", &self.get_local_peer(), messages.len(), &neighbor_key; "buffer" => %buffer); + messages.retain(|message| { + if !buffer { + debug!( + "{:?}: Re-try handling buffered tenure-bound message {} from {:?}", + &self.get_local_peer(), + &message.payload.get_message_description(), + neighbor_key + ); + } + let (to_buffer, relay) = self.handle_unsolicited_stacks_message( + chainstate, + *event_id, + &message.preamble, + &message.payload, + buffer, + ); + if buffer && to_buffer { + self.buffer_stacks_data_message(*event_id, neighbor_key, message.clone()); + return false; + } + if relay { + // forward to relayer for processing + debug!( + "{:?}: Will forward message {} from {:?} to relayer", + &self.get_local_peer(), + &message.payload.get_message_description(), + &neighbor_key + ); + } + true + }); + messages.len() > 0 + }); + unsolicited } } From 1e1ae4d314628062afa38f7031d998c9e9cdddcd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 27 Aug 2024 16:31:38 -0400 Subject: [PATCH 0920/1400] chore: add fault injection to slow down burnchain block processing in order to test this branch end-to-end --- .../src/burnchains/bitcoin_regtest_controller.rs | 14 ++++++++++++++ testnet/stacks-node/src/config.rs | 8 ++++++++ 2 files changed, 22 insertions(+) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 32d590dd39..4a4f0cad8c 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -606,9 +606,23 @@ impl BitcoinRegtestController { received_at: Instant::now(), }; + let received = self + .chain_tip + .as_ref() + .map(|tip| tip.block_snapshot.block_height) + .unwrap_or(0) + == burnchain_tip.block_snapshot.block_height; self.chain_tip = Some(burnchain_tip.clone()); debug!("Done receiving blocks"); + if self.config.burnchain.fault_injection_burnchain_block_delay > 0 && received { + info!( + "Fault injection: delaying burnchain blocks by {} milliseconds", + self.config.burnchain.fault_injection_burnchain_block_delay + ); + sleep_ms(self.config.burnchain.fault_injection_burnchain_block_delay); + } + Ok((burnchain_tip, burnchain_height)) } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d1b115d9cf..e4751a1010 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1440,6 +1440,9 @@ pub struct BurnchainConfig { pub wallet_name: String, pub ast_precheck_size_height: Option, pub affirmation_overrides: HashMap, + /// fault injection to simulate a slow burnchain peer. + /// Delay burnchain block downloads by the given number of millseconds + pub fault_injection_burnchain_block_delay: u64, } impl BurnchainConfig { @@ -1479,6 +1482,7 @@ impl BurnchainConfig { wallet_name: "".to_string(), ast_precheck_size_height: None, affirmation_overrides: HashMap::new(), + fault_injection_burnchain_block_delay: 0, } } pub fn get_rpc_url(&self, wallet: Option) -> String { @@ -1573,6 +1577,7 @@ pub struct BurnchainConfigFile { pub wallet_name: Option, pub ast_precheck_size_height: Option, pub affirmation_overrides: Option>, + pub fault_injection_burnchain_block_delay: Option, } impl BurnchainConfigFile { @@ -1785,6 +1790,9 @@ impl BurnchainConfigFile { .pox_prepare_length .or(default_burnchain_config.pox_prepare_length), affirmation_overrides, + fault_injection_burnchain_block_delay: self + .fault_injection_burnchain_block_delay + .unwrap_or(default_burnchain_config.fault_injection_burnchain_block_delay), }; if let BitcoinNetworkType::Mainnet = config.get_bitcoin_network().1 { From 3cedc0d5134ea2e61d0a6223b667f98015e80b03 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 15:59:40 -0400 Subject: [PATCH 0921/1400] fix: fix failing integration test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf6..c0d0593857 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3942,6 +3942,9 @@ fn forked_tenure_is_ignored() { info!("Starting Tenure C."); + // force the timestamp to be different + sleep_ms(2000); + // Submit a block commit op for tenure C. // It should also build on block A, since the node has paused processing of block B. let commits_before = commits_submitted.load(Ordering::SeqCst); @@ -3973,6 +3976,7 @@ fn forked_tenure_is_ignored() { let block_c = blocks.last().unwrap(); info!("Tenure C tip block: {}", &block_tenure_c.index_block_hash()); info!("Tenure C last block: {}", &block_c.block_id); + assert_ne!(block_tenure_b.block_id(), block_tenure_c.index_block_hash()); // Block C was built AFTER Block B was built, but BEFORE it was broadcasted (processed), so it should be built off of Block A assert_eq!( From 45c9c2cc5ca86671405a3edf67aa5c8859eca63b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 16:00:33 -0400 Subject: [PATCH 0922/1400] chore: fix comment --- stackslib/src/net/stackerdb/tests/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index e9474d9abf..f45e3acb93 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -862,7 +862,7 @@ fn test_stackerdb_push_relayer_late_chunks() { peer_2_config.add_neighbor(&peer_3_config.to_neighbor()); peer_3_config.add_neighbor(&peer_2_config.to_neighbor()); - // set up stacker DBs for both peers + // set up stacker DBs for all peers let idx_1 = add_stackerdb(&mut peer_1_config, Some(StackerDBConfig::template())); let idx_2 = add_stackerdb(&mut peer_2_config, Some(StackerDBConfig::template())); let idx_3 = add_stackerdb(&mut peer_3_config, Some(StackerDBConfig::template())); From 1f67900e512e7723a7e53059c2197cf4703e71d5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 22:12:14 -0400 Subject: [PATCH 0923/1400] fix: fix timed-out unit test --- stackslib/src/net/stackerdb/sync.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 53a5e13e48..32d7a7e37e 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -729,7 +729,9 @@ impl StackerDBSync { data.error_code ); self.connected_replicas.remove(&naddr); - if data.error_code == NackErrorCodes::StaleView { + if data.error_code == NackErrorCodes::StaleView + || data.error_code == NackErrorCodes::FutureView + { self.stale_neighbors.insert(naddr); } continue; @@ -846,7 +848,9 @@ impl StackerDBSync { data.error_code ); self.connected_replicas.remove(&naddr); - if data.error_code == NackErrorCodes::StaleView { + if data.error_code == NackErrorCodes::StaleView + || data.error_code == NackErrorCodes::FutureView + { self.stale_neighbors.insert(naddr); } continue; @@ -983,7 +987,9 @@ impl StackerDBSync { &self.smart_contract_id, data.error_code ); - if data.error_code == NackErrorCodes::StaleView { + if data.error_code == NackErrorCodes::StaleView + || data.error_code == NackErrorCodes::FutureView + { self.stale_neighbors.insert(naddr); } else if data.error_code == NackErrorCodes::StaleVersion { // try again immediately, without throttling @@ -1129,7 +1135,9 @@ impl StackerDBSync { &naddr, data.error_code ); - if data.error_code == NackErrorCodes::StaleView { + if data.error_code == NackErrorCodes::StaleView + || data.error_code == NackErrorCodes::FutureView + { self.stale_neighbors.insert(naddr); } continue; From 0cca6e163bdef46846242295890a523352ecc76d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 09:41:52 -0400 Subject: [PATCH 0924/1400] Add a sleep to ensure that the proposed stacks block has a different timestamp than its parent Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf6..680fa42843 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -711,6 +711,7 @@ pub fn next_block_and_wait_for_commits( (0..commits_before.len()).map(|_| None).collect(); let mut commit_sent_time: Vec> = (0..commits_before.len()).map(|_| None).collect(); + sleep_ms(2000); // Make sure that the proposed stacks block has a different timestamp than its parent next_block_and(btc_controller, timeout_secs, || { for i in 0..commits_submitted.len() { let commits_sent = commits_submitted[i].load(Ordering::SeqCst); @@ -739,6 +740,7 @@ pub fn next_block_and_wait_for_commits( .as_ref() .ok_or("TEST-ERROR: Processed time wasn't set")?; if commits_sent <= commits_before[i] { + info!("NO COMMITS"); return Ok(false); } let commit_sent_time = commit_sent_time[i] @@ -746,22 +748,28 @@ pub fn next_block_and_wait_for_commits( .ok_or("TEST-ERROR: Processed time wasn't set")?; // try to ensure the commit was sent after the block was processed if commit_sent_time > block_processed_time { + info!("COMMIT NOT SENT AFTER BLOCK PROCESSED TIME"); continue; } // if two commits have been sent, one of them must have been after if commits_sent >= commits_before[i] + 2 { + info!("MORE THAN ENOUGH COMMITS"); continue; } // otherwise, just timeout if the commit was sent and its been long enough // for a new commit pass to have occurred if block_processed_time.elapsed() > Duration::from_secs(10) { + info!("TIMEOUT COMMIT"); continue; } + info!("CONDITIONS OF COMMIT CHECK NOT MET"); return Ok(false); } else { + info!("NO BLOCK PROCESSED IN COMMIT CHECK"); return Ok(false); } } + info!("ALL CONDITIONS MET IN COMMIT CHECK"); Ok(true) }) } From bd1fa7a0cc1b3bc6d8b85d546cc93ab3db45a535 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 29 Aug 2024 14:18:41 -0400 Subject: [PATCH 0925/1400] chore: `test_debug!` -> `debug!` --- .../burn/operations/leader_key_register.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index b892f7efd8..0a3c0057a6 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -136,32 +136,30 @@ impl LeaderKeyRegisterOp { let num_outputs = tx.num_recipients(); if num_inputs == 0 { - test_debug!( + debug!( "Invalid tx: inputs: {}, outputs: {}", - num_inputs, - num_outputs, + num_inputs, num_outputs, ); return Err(op_error::InvalidInput); } if num_outputs < 1 { - test_debug!( + debug!( "Invalid tx: inputs: {}, outputs: {}", - num_inputs, - num_outputs + num_inputs, num_outputs ); return Err(op_error::InvalidInput); } if tx.opcode() != Opcodes::LeaderKeyRegister as u8 { - test_debug!("Invalid tx: invalid opcode {}", tx.opcode()); + debug!("Invalid tx: invalid opcode {}", tx.opcode()); return Err(op_error::InvalidInput); } let data = match LeaderKeyRegisterOp::parse_data(&tx.data()) { Some(data) => data, None => { - test_debug!("Invalid tx data"); + debug!("Invalid tx data"); return Err(op_error::ParseError); } }; From f680c3168f0b9ce25a48d8de6d58b7f7933be9cb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 29 Aug 2024 14:24:29 -0400 Subject: [PATCH 0926/1400] refactor: combine `if` statements --- .../chainstate/burn/operations/leader_key_register.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 0a3c0057a6..44402adc0c 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -135,7 +135,7 @@ impl LeaderKeyRegisterOp { let num_inputs = tx.num_signers(); let num_outputs = tx.num_recipients(); - if num_inputs == 0 { + if num_inputs == 0 || num_outputs < 1 { debug!( "Invalid tx: inputs: {}, outputs: {}", num_inputs, num_outputs, @@ -143,14 +143,6 @@ impl LeaderKeyRegisterOp { return Err(op_error::InvalidInput); } - if num_outputs < 1 { - debug!( - "Invalid tx: inputs: {}, outputs: {}", - num_inputs, num_outputs - ); - return Err(op_error::InvalidInput); - } - if tx.opcode() != Opcodes::LeaderKeyRegister as u8 { debug!("Invalid tx: invalid opcode {}", tx.opcode()); return Err(op_error::InvalidInput); From 5780503c2fe7b723a78aab4d39fc1b9d34b1705a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 29 Aug 2024 15:49:15 -0400 Subject: [PATCH 0927/1400] fix: ensure that key register ops have change output --- .../stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 32d590dd39..145e73a389 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -889,7 +889,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, - false, + true, // key register op requires change output to exist )?; increment_btc_ops_sent_counter(); @@ -1466,7 +1466,7 @@ impl BitcoinRegtestController { fee_rate, &mut utxos, signer, - true, // only block commit op requires change output to exist + true, // block commit op requires change output to exist )?; let serialized_tx = SerializedTx::new(tx.clone()); From 0508758cc26cba42c149b2f1e906e3ea8eb58964 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 19:13:14 -0400 Subject: [PATCH 0928/1400] Add a monitor-signers cli command for polling signers stackerdb messages every x number of seconds Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 17 +++ stacks-signer/src/client/stacks_client.rs | 38 +++++-- stacks-signer/src/main.rs | 130 +++++++++++++++++++++- stacks-signer/src/runloop.rs | 40 +------ stacks-signer/src/signerdb.rs | 13 +-- stacks-signer/src/v0/signer.rs | 22 ++-- 6 files changed, 194 insertions(+), 66 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 74e2cd2344..c83239828b 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::io::{self, Read}; +use std::net::SocketAddr; use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; @@ -97,6 +98,8 @@ pub enum Command { GenerateVote(GenerateVoteArgs), /// Verify the vote for a specified SIP against a public key and vote info VerifyVote(VerifyVoteArgs), + /// Verify signer signatures by checking stackerdb slots contain the correct data + MonitorSigners(MonitorSignersArgs), } /// Basic arguments for all cyrptographic and stacker-db functionality @@ -258,6 +261,20 @@ impl TryFrom for Vote { } } +#[derive(Parser, Debug, Clone)] +/// Arguments for the MonitorSigners command +pub struct MonitorSignersArgs { + /// The Stacks node to connect to + #[arg(long)] + pub host: SocketAddr, + /// Whether the node is mainnet. Default is true + #[arg(long, default_value = "true")] + pub mainnet: bool, + /// Set the polling interval in seconds. Default is 60 seconds. + #[arg(long, short, default_value = "60")] + pub interval: u64, +} + #[derive(Clone, Debug, PartialEq)] /// Wrapper around `Pox4SignatureTopic` to implement `ValueEnum` pub struct StackingSignatureMethod(Pox4SignatureTopic); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cd65f7914b..b6e9c8a381 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,4 +1,4 @@ -use std::collections::VecDeque; +use std::collections::{HashMap, VecDeque}; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -19,7 +19,7 @@ use std::net::SocketAddr; use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use blockstack_lib::chainstate::stacks::{ StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, @@ -56,6 +56,7 @@ use stacks_common::types::StacksEpochId; use stacks_common::{debug, warn}; use wsts::curve::point::{Compressed, Point}; +use super::SignerSlotID; use crate::client::{retry_with_exponential_backoff, ClientError}; use crate::config::GlobalConfig; use crate::runloop::RewardCycleInfo; @@ -158,7 +159,7 @@ impl StacksClient { } /// Helper function that attempts to deserialize a clarity hext string as a list of signer slots and their associated number of signer slots - pub fn parse_signer_slots( + fn parse_signer_slots( &self, value: ClarityValue, ) -> Result, ClientError> { @@ -180,6 +181,29 @@ impl StacksClient { Ok(signer_slots) } + /// Get the stackerdb signer slots for a specific reward cycle + pub fn get_parsed_signer_slots( + &self, + reward_cycle: u64, + ) -> Result, ClientError> { + let signer_set = + u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); + // Get the signer writers from the stacker-db to find the signer slot id + let stackerdb_signer_slots = + self.get_stackerdb_signer_slots(&signer_stackerdb_contract_id, signer_set)?; + let mut signer_slot_ids = HashMap::with_capacity(stackerdb_signer_slots.len()); + for (index, (address, _)) in stackerdb_signer_slots.into_iter().enumerate() { + signer_slot_ids.insert( + address, + SignerSlotID( + u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), + ), + ); + } + Ok(signer_slot_ids) + } + /// Get the vote for a given round, reward cycle, and signer address pub fn get_vote_for_aggregate_public_key( &self, @@ -541,13 +565,13 @@ impl StacksClient { warn!("Failed to parse the GetStackers error response: {e}"); backoff::Error::permanent(e.into()) })?; - if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + if error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { + Err(backoff::Error::transient(ClientError::NoSortitionOnChain)) } else { warn!("Got error response ({status}): {}", error_data.err_msg); - return Err(backoff::Error::permanent(ClientError::RequestFailure( + Err(backoff::Error::permanent(ClientError::RequestFailure( status, - ))); + ))) } }; let stackers_response = diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 184876373b..f0f907bfd9 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -26,22 +26,30 @@ extern crate serde; extern crate serde_json; extern crate toml; +use std::collections::HashMap; use std::io::{self, Write}; +use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; +use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; -use clarity::types::chainstate::StacksPublicKey; +use clarity::codec::read_next; +use clarity::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use clarity::util::sleep_ms; use clarity::vm::types::QualifiedContractIdentifier; +use libsigner::v0::messages::{MessageSlotID, SignerMessage}; use libsigner::{SignerSession, StackerDBSession}; use libstackerdb::StackerDBChunkData; -use slog::slog_debug; -use stacks_common::debug; +use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::{debug, info, warn}; use stacks_signer::cli::{ Cli, Command, GenerateStackingSignatureArgs, GenerateVoteArgs, GetChunkArgs, - GetLatestChunkArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, + GetLatestChunkArgs, MonitorSignersArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, + VerifyVoteArgs, }; +use stacks_signer::client::StacksClient; use stacks_signer::config::GlobalConfig; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -188,6 +196,119 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { valid_vote } +fn handle_monitor_signers(args: MonitorSignersArgs) { + info!("Monitoring signers stackerdb..."); + let interval_ms = args.interval * 1000; + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle + args.host, + "FOO".to_string(), // We don't care about authorized paths. Just accessing public info + args.mainnet, + ); + let mut reward_cycle = stacks_client + .get_current_reward_cycle_info() + .unwrap() + .reward_cycle; + let mut contract_name = + NakamotoSigners::make_signers_db_name(reward_cycle, MessageSlotID::BlockResponse.to_u32()); + let mut contract_id = boot_code_id(contract_name.as_str(), args.mainnet); + let mut session = stackerdb_session(&args.host.to_string(), contract_id); + let mut signers_slots = stacks_client + .get_parsed_signer_slots(reward_cycle) + .expect("Failed to get signer slots"); + let mut signers_addresses = HashMap::with_capacity(signers_slots.len()); + for (signer_address, slot_id) in signers_slots.iter() { + signers_addresses.insert(*slot_id, *signer_address); + } + let mut slot_ids: Vec<_> = signers_slots.values().map(|value| value.0).collect(); + + // Poll stackerdb slots every 200 ms to check for new mock signatures. + let mut last_messages = HashMap::with_capacity(slot_ids.len()); + let mut signer_last_write_time = HashMap::with_capacity(slot_ids.len()); + loop { + let mut missing_signers = Vec::with_capacity(slot_ids.len()); + let mut stale_signers = Vec::with_capacity(slot_ids.len()); + + let next_reward_cycle = stacks_client + .get_current_reward_cycle_info() + .unwrap() + .reward_cycle; + if next_reward_cycle != reward_cycle { + reward_cycle = next_reward_cycle; + contract_name = NakamotoSigners::make_signers_db_name( + reward_cycle, + MessageSlotID::BlockResponse.to_u32(), + ); + contract_id = boot_code_id(contract_name.as_str(), args.mainnet); + session = stackerdb_session(&args.host.to_string(), contract_id); + signers_slots = stacks_client + .get_parsed_signer_slots(reward_cycle) + .expect("Failed to get signer slots"); + slot_ids = signers_slots.values().map(|value| value.0).collect(); + last_messages = HashMap::with_capacity(slot_ids.len()); + } + let new_messages: Vec> = session + .get_latest_chunks(&slot_ids) + .expect("Failed to get latest signer messages") + .into_iter() + .map(|chunk_opt| { + chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) + }) + .collect(); + for ((signer_address, slot_id), signer_message_opt) in + signers_slots.clone().into_iter().zip(new_messages) + { + if let Some(signer_message) = signer_message_opt { + if let Some(last_message) = last_messages.get(&slot_id) { + if last_message == &signer_message { + continue; + } + match last_message { + SignerMessage::MockSignature(_) => { + if args.mainnet { + warn!("Mock signature found for signer {signer_address} in slot {slot_id} but we are on mainnet"); + continue; + } + } + SignerMessage::BlockResponse(_) => { + if args.mainnet { + warn!("Block response found for signer {signer_address} in slot {slot_id} but we are on mainnet"); + continue; + } + } + _ => { + warn!("Unexpected message found for signer {signer_address} in slot {slot_id}"); + continue; + } + } + } + last_messages.insert(slot_id, signer_message); + signer_last_write_time.insert(slot_id, std::time::Instant::now()); + } else { + missing_signers.push(signer_address); + } + } + if !missing_signers.is_empty() { + warn!( + "Missing messages for {} signers: {missing_signers:?}", + missing_signers.len() + ); + } + for (slot_id, last_write_time) in signer_last_write_time.iter() { + if last_write_time.elapsed().as_secs() > 600 { + let address = signers_addresses + .get(slot_id) + .expect("BUG: missing signer address for given slot id"); + stale_signers.push(*address); + } + } + if !stale_signers.is_empty() { + warn!("The following {} signers have not written to stackerdb in over 10 minutes: {stale_signers:?}", stale_signers.len()); + } + sleep_ms(interval_ms); + } +} + fn main() { let cli = Cli::parse(); @@ -224,6 +345,7 @@ fn main() { Command::VerifyVote(args) => { handle_verify_vote(args, true); } + Command::MonitorSigners(args) => handle_monitor_signers(args), } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 9e1083047b..a9901c354f 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -18,19 +18,16 @@ use std::fmt::Debug; use std::sync::mpsc::Sender; use std::time::Duration; -use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; -use blockstack_lib::util_lib::boot::boot_code_id; use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; -use stacks_common::types::chainstate::StacksAddress; use stacks_common::{debug, error, info, warn}; use wsts::common::MerkleRoot; use wsts::state_machine::OperationResult; use crate::chainstate::SortitionsView; -use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, StacksClient}; +use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; use crate::config::{GlobalConfig, SignerConfig}; use crate::Signer as SignerTrait; @@ -246,30 +243,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(Some(entries)) } - /// Get the stackerdb signer slots for a specific reward cycle - pub fn get_parsed_signer_slots( - &self, - stacks_client: &StacksClient, - reward_cycle: u64, - ) -> Result, ClientError> { - let signer_set = - u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); - let signer_stackerdb_contract_id = - boot_code_id(SIGNERS_NAME, self.config.network.is_mainnet()); - // Get the signer writers from the stacker-db to find the signer slot id - let stackerdb_signer_slots = - stacks_client.get_stackerdb_signer_slots(&signer_stackerdb_contract_id, signer_set)?; - let mut signer_slot_ids = HashMap::with_capacity(stackerdb_signer_slots.len()); - for (index, (address, _)) in stackerdb_signer_slots.into_iter().enumerate() { - signer_slot_ids.insert( - address, - SignerSlotID( - u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), - ), - ); - } - Ok(signer_slot_ids) - } /// Get a signer configuration for a specific reward cycle from the stacks node fn get_signer_config( &mut self, @@ -284,8 +257,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo return Err(e); } }; - let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) - { + let signer_slot_ids = match self.stacks_client.get_parsed_signer_slots(reward_cycle) { Ok(x) => x, Err(e) => { warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); @@ -431,10 +403,10 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo if !Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle) { self.refresh_signer_config(current_reward_cycle); } - if is_in_next_prepare_phase { - if !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) { - self.refresh_signer_config(next_reward_cycle); - } + if is_in_next_prepare_phase + && !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) + { + self.refresh_signer_config(next_reward_cycle); } self.cleanup_stale_signers(current_reward_cycle); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 2d2e9cc22a..b920b8b176 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -490,7 +490,7 @@ impl SignerDb { .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); - let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, &hash)?; + let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, @@ -534,7 +534,7 @@ impl SignerDb { let qry = "INSERT OR REPLACE INTO block_signatures (signer_signature_hash, signature) VALUES (?1, ?2);"; let args = params![ block_sighash, - serde_json::to_string(signature).map_err(|e| DBError::SerializationError(e))? + serde_json::to_string(signature).map_err(DBError::SerializationError)? ]; debug!("Inserting block signature."; @@ -590,7 +590,7 @@ impl SignerDb { if broadcasted == 0 { return Ok(None); } - Ok(u64::try_from(broadcasted).ok()) + Ok(Some(broadcasted)) } } @@ -880,15 +880,12 @@ mod tests { assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![]); db.add_block_signature(&block_id, &sig1).unwrap(); - assert_eq!( - db.get_block_signatures(&block_id).unwrap(), - vec![sig1.clone()] - ); + assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![sig1]); db.add_block_signature(&block_id, &sig2).unwrap(); assert_eq!( db.get_block_signatures(&block_id).unwrap(), - vec![sig1.clone(), sig2.clone()] + vec![sig1, sig2] ); } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53a288b7f5..cb1d4f8a6d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -244,7 +244,7 @@ impl From for Signer { .signer_entries .signer_ids .iter() - .map(|(addr, id)| (*id, addr.clone())) + .map(|(addr, id)| (*id, *addr)) .collect(); let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); @@ -262,7 +262,7 @@ impl From for Signer { signer_id, addr ); }; - (addr.clone(), key_ids.len()) + (*addr, key_ids.len()) }) .collect(); @@ -484,7 +484,7 @@ impl Signer { ( BlockResponse::accepted(signer_signature_hash, signature), block_info, - Some(signature.clone()), + Some(signature), ) } BlockValidateResponse::Reject(block_validate_reject) => { @@ -550,7 +550,7 @@ impl Signer { addrs: impl Iterator, ) -> u32 { let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { - let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); + let stacker_weight = self.signer_weights.get(stacker_address).unwrap_or(&0); signing_weight.saturating_add(*stacker_weight) }); u32::try_from(signing_weight) @@ -607,16 +607,12 @@ impl Signer { }; // authenticate the signature -- it must be signed by one of the stacking set - let is_valid_sig = self - .signer_addresses - .iter() - .find(|addr| { - let stacker_address = StacksAddress::p2pkh(true, &public_key); + let is_valid_sig = self.signer_addresses.iter().any(|addr| { + let stacker_address = StacksAddress::p2pkh(true, &public_key); - // it only matters that the address hash bytes match - stacker_address.bytes == addr.bytes - }) - .is_some(); + // it only matters that the address hash bytes match + stacker_address.bytes == addr.bytes + }); if !is_valid_sig { debug!("{self}: Receive invalid signature {signature}. Will not store."); From 5b6a96989fc1f6790793b52a19944053fb7fb318 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 20:04:05 -0400 Subject: [PATCH 0929/1400] Add epoch gating and make message timeout configurable Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 3 +++ stacks-signer/src/main.rs | 51 +++++++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 21 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index c83239828b..9707a847d6 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -273,6 +273,9 @@ pub struct MonitorSignersArgs { /// Set the polling interval in seconds. Default is 60 seconds. #[arg(long, short, default_value = "60")] pub interval: u64, + /// Max age in seconds before a signer message is considered stale. Default is 600 seconds. + #[arg(long, short, default_value = "600")] + pub max_age: u64, } #[derive(Clone, Debug, PartialEq)] diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index f0f907bfd9..78ce3d2d6b 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -35,6 +35,7 @@ use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_ke use clap::Parser; use clarity::codec::read_next; use clarity::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use clarity::types::StacksEpochId; use clarity::util::sleep_ms; use clarity::vm::types::QualifiedContractIdentifier; use libsigner::v0::messages::{MessageSlotID, SignerMessage}; @@ -197,7 +198,6 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { } fn handle_monitor_signers(args: MonitorSignersArgs) { - info!("Monitoring signers stackerdb..."); let interval_ms = args.interval * 1000; let stacks_client = StacksClient::new( StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle @@ -205,6 +205,12 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { "FOO".to_string(), // We don't care about authorized paths. Just accessing public info args.mainnet, ); + + let epoch = stacks_client.get_node_epoch().unwrap(); + assert!( + epoch >= StacksEpochId::Epoch25, + "Cannot Monitor Signers before Epoch 2.5. Current epoch: {epoch:?}", + ); let mut reward_cycle = stacks_client .get_current_reward_cycle_info() .unwrap() @@ -225,7 +231,13 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { // Poll stackerdb slots every 200 ms to check for new mock signatures. let mut last_messages = HashMap::with_capacity(slot_ids.len()); let mut signer_last_write_time = HashMap::with_capacity(slot_ids.len()); + + info!( + "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs", + args.interval, args.max_age + ); loop { + info!("Polling signers stackerdb for new messages..."); let mut missing_signers = Vec::with_capacity(slot_ids.len()); let mut stale_signers = Vec::with_capacity(slot_ids.len()); @@ -234,6 +246,10 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { .unwrap() .reward_cycle; if next_reward_cycle != reward_cycle { + info!( + "Reward cycle has changed from {} to {}.", + reward_cycle, next_reward_cycle + ); reward_cycle = next_reward_cycle; contract_name = NakamotoSigners::make_signers_db_name( reward_cycle, @@ -263,23 +279,16 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { if last_message == &signer_message { continue; } - match last_message { - SignerMessage::MockSignature(_) => { - if args.mainnet { - warn!("Mock signature found for signer {signer_address} in slot {slot_id} but we are on mainnet"); - continue; - } - } - SignerMessage::BlockResponse(_) => { - if args.mainnet { - warn!("Block response found for signer {signer_address} in slot {slot_id} but we are on mainnet"); - continue; - } - } - _ => { - warn!("Unexpected message found for signer {signer_address} in slot {slot_id}"); - continue; - } + if epoch == StacksEpochId::Epoch25 + && !matches!(last_message, SignerMessage::MockSignature(_)) + { + warn!("Epoch 2.5 Signers Should be Sending MockSignature messages. Unexpected message found for signer {signer_address} in slot {slot_id}"); + continue; + } else if epoch > StacksEpochId::Epoch25 + && !matches!(last_message, SignerMessage::BlockResponse(_)) + { + warn!("Nakamoto Signers Should be Sending BlockResponse messages. Unexpected message found for signer {signer_address} in slot {slot_id}"); + continue; } } last_messages.insert(slot_id, signer_message); @@ -290,12 +299,12 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } if !missing_signers.is_empty() { warn!( - "Missing messages for {} signers: {missing_signers:?}", + "Missing expected messages for {} signers: {missing_signers:?}", missing_signers.len() ); } for (slot_id, last_write_time) in signer_last_write_time.iter() { - if last_write_time.elapsed().as_secs() > 600 { + if last_write_time.elapsed().as_secs() > args.max_age { let address = signers_addresses .get(slot_id) .expect("BUG: missing signer address for given slot id"); @@ -303,7 +312,7 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } if !stale_signers.is_empty() { - warn!("The following {} signers have not written to stackerdb in over 10 minutes: {stale_signers:?}", stale_signers.len()); + warn!("The following {} signers have not written to stackerdb in over {} seconds: {stale_signers:?}", stale_signers.len(), args.max_age); } sleep_ms(interval_ms); } From 80fb2935ee3cbf76697fed61c1778cd8dddce0bf Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 20:07:35 -0400 Subject: [PATCH 0930/1400] Update signer addresses per reward cycle Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 78ce3d2d6b..4998c704f3 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -262,6 +262,9 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { .expect("Failed to get signer slots"); slot_ids = signers_slots.values().map(|value| value.0).collect(); last_messages = HashMap::with_capacity(slot_ids.len()); + for (signer_address, slot_id) in signers_slots.iter() { + signers_addresses.insert(*slot_id, *signer_address); + } } let new_messages: Vec> = session .get_latest_chunks(&slot_ids) From 79b72e4af8afb1ea5445e93daf20ac104a52d108 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 20:25:17 -0400 Subject: [PATCH 0931/1400] Clear last_messages and last_updates every reward cycle Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 52 ++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 4998c704f3..51359da7ce 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -30,7 +30,6 @@ use std::collections::HashMap; use std::io::{self, Write}; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; use clarity::codec::read_next; @@ -215,10 +214,6 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { .get_current_reward_cycle_info() .unwrap() .reward_cycle; - let mut contract_name = - NakamotoSigners::make_signers_db_name(reward_cycle, MessageSlotID::BlockResponse.to_u32()); - let mut contract_id = boot_code_id(contract_name.as_str(), args.mainnet); - let mut session = stackerdb_session(&args.host.to_string(), contract_id); let mut signers_slots = stacks_client .get_parsed_signer_slots(reward_cycle) .expect("Failed to get signer slots"); @@ -228,10 +223,18 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } let mut slot_ids: Vec<_> = signers_slots.values().map(|value| value.0).collect(); - // Poll stackerdb slots every 200 ms to check for new mock signatures. + // Poll stackerdb slots to check for new expected messages let mut last_messages = HashMap::with_capacity(slot_ids.len()); - let mut signer_last_write_time = HashMap::with_capacity(slot_ids.len()); + let mut last_updates = HashMap::with_capacity(slot_ids.len()); + let mut session = stackerdb_session( + &args.host.to_string(), + NakamotoSigners::make_signers_db_contract_id( + reward_cycle, + MessageSlotID::BlockResponse.to_u32(), + args.mainnet, + ), + ); info!( "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs", args.interval, args.max_age @@ -247,26 +250,31 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { .reward_cycle; if next_reward_cycle != reward_cycle { info!( - "Reward cycle has changed from {} to {}.", + "Reward cycle has changed from {} to {}. Updating stacker db session.", reward_cycle, next_reward_cycle ); reward_cycle = next_reward_cycle; - contract_name = NakamotoSigners::make_signers_db_name( - reward_cycle, - MessageSlotID::BlockResponse.to_u32(), - ); - contract_id = boot_code_id(contract_name.as_str(), args.mainnet); - session = stackerdb_session(&args.host.to_string(), contract_id); signers_slots = stacks_client .get_parsed_signer_slots(reward_cycle) .expect("Failed to get signer slots"); slot_ids = signers_slots.values().map(|value| value.0).collect(); - last_messages = HashMap::with_capacity(slot_ids.len()); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); } + session = stackerdb_session( + &args.host.to_string(), + NakamotoSigners::make_signers_db_contract_id( + reward_cycle, + MessageSlotID::BlockResponse.to_u32(), + args.mainnet, + ), + ); + + // Clear the last messages and signer last update times. + last_messages.clear(); + last_updates.clear(); } - let new_messages: Vec> = session + let new_messages: Vec<_> = session .get_latest_chunks(&slot_ids) .expect("Failed to get latest signer messages") .into_iter() @@ -295,7 +303,7 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } last_messages.insert(slot_id, signer_message); - signer_last_write_time.insert(slot_id, std::time::Instant::now()); + last_updates.insert(slot_id, std::time::Instant::now()); } else { missing_signers.push(signer_address); } @@ -306,8 +314,8 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { missing_signers.len() ); } - for (slot_id, last_write_time) in signer_last_write_time.iter() { - if last_write_time.elapsed().as_secs() > args.max_age { + for (slot_id, last_update_time) in last_updates.iter() { + if last_update_time.elapsed().as_secs() > args.max_age { let address = signers_addresses .get(slot_id) .expect("BUG: missing signer address for given slot id"); @@ -315,7 +323,11 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } if !stale_signers.is_empty() { - warn!("The following {} signers have not written to stackerdb in over {} seconds: {stale_signers:?}", stale_signers.len(), args.max_age); + warn!( + "No new updates from {} signers in over {} seconds: {stale_signers:?}", + stale_signers.len(), + args.max_age + ); } sleep_ms(interval_ms); } From 090a4a6b6ce433dccaa557baf86c73165717d1c6 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 29 Aug 2024 22:17:07 -0400 Subject: [PATCH 0932/1400] Retry on errors Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 4 +++ stacks-signer/src/main.rs | 55 ++++++++++++++++++--------------- 2 files changed, 34 insertions(+), 25 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 32951d7990..cfc3bfcb27 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -23,6 +23,7 @@ use std::time::Duration; use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::serialization::SerializationError; +use libsigner::RPCError; use libstackerdb::Error as StackerDBError; use slog::slog_debug; pub use stackerdb::*; @@ -94,6 +95,9 @@ pub enum ClientError { /// A successful sortition's info response should be parseable into a SortitionState #[error("A successful sortition's info response should be parseable into a SortitionState")] UnexpectedSortitionInfo, + /// An RPC libsigner error occurred + #[error("A libsigner RPC error occurred: {0}")] + RPCError(#[from] RPCError), } /// Retry a function F with an exponential backoff and notification on transient failure diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 51359da7ce..a6357ba760 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -40,16 +40,16 @@ use clarity::vm::types::QualifiedContractIdentifier; use libsigner::v0::messages::{MessageSlotID, SignerMessage}; use libsigner::{SignerSession, StackerDBSession}; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_info, slog_warn}; +use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::{debug, info, warn}; +use stacks_common::{debug, error, info, warn}; use stacks_signer::cli::{ Cli, Command, GenerateStackingSignatureArgs, GenerateVoteArgs, GetChunkArgs, GetLatestChunkArgs, MonitorSignersArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, }; -use stacks_signer::client::StacksClient; +use stacks_signer::client::{ClientError, StacksClient}; use stacks_signer::config::GlobalConfig; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -197,7 +197,6 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { } fn handle_monitor_signers(args: MonitorSignersArgs) { - let interval_ms = args.interval * 1000; let stacks_client = StacksClient::new( StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle args.host, @@ -205,18 +204,30 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { args.mainnet, ); - let epoch = stacks_client.get_node_epoch().unwrap(); - assert!( - epoch >= StacksEpochId::Epoch25, - "Cannot Monitor Signers before Epoch 2.5. Current epoch: {epoch:?}", - ); - let mut reward_cycle = stacks_client - .get_current_reward_cycle_info() - .unwrap() - .reward_cycle; - let mut signers_slots = stacks_client - .get_parsed_signer_slots(reward_cycle) - .expect("Failed to get signer slots"); + loop { + if let Err(e) = start_monitoring_signers(&stacks_client, &args) { + error!( + "Error occurred monitoring signers: {:?}. Waiting and trying again.", + e + ); + sleep_ms(1000); + } + } +} + +fn start_monitoring_signers( + stacks_client: &StacksClient, + args: &MonitorSignersArgs, +) -> Result<(), ClientError> { + let interval_ms = args.interval * 1000; + let epoch = stacks_client.get_node_epoch()?; + if epoch < StacksEpochId::Epoch25 { + return Err(ClientError::UnsupportedStacksFeature( + "Signer monitoring is only supported for Epoch 2.5 and later".into(), + )); + } + let mut reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; + let mut signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; let mut signers_addresses = HashMap::with_capacity(signers_slots.len()); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); @@ -244,19 +255,14 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { let mut missing_signers = Vec::with_capacity(slot_ids.len()); let mut stale_signers = Vec::with_capacity(slot_ids.len()); - let next_reward_cycle = stacks_client - .get_current_reward_cycle_info() - .unwrap() - .reward_cycle; + let next_reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; if next_reward_cycle != reward_cycle { info!( "Reward cycle has changed from {} to {}. Updating stacker db session.", reward_cycle, next_reward_cycle ); reward_cycle = next_reward_cycle; - signers_slots = stacks_client - .get_parsed_signer_slots(reward_cycle) - .expect("Failed to get signer slots"); + signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; slot_ids = signers_slots.values().map(|value| value.0).collect(); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); @@ -275,8 +281,7 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { last_updates.clear(); } let new_messages: Vec<_> = session - .get_latest_chunks(&slot_ids) - .expect("Failed to get latest signer messages") + .get_latest_chunks(&slot_ids)? .into_iter() .map(|chunk_opt| { chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) From e829158752cd6a40143054c943e4c19481437276 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 00:21:31 -0400 Subject: [PATCH 0933/1400] Cleanup logs and set mainnet default to false Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 7 +- stacks-signer/src/client/stacks_client.rs | 5 +- stacks-signer/src/main.rs | 105 +++++++++++------- stacks-signer/src/tests/chainstate.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 10 +- testnet/stacks-node/src/tests/signer/v1.rs | 11 +- 6 files changed, 74 insertions(+), 66 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 9707a847d6..4006e0a7d1 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::io::{self, Read}; -use std::net::SocketAddr; use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; @@ -266,9 +265,9 @@ impl TryFrom for Vote { pub struct MonitorSignersArgs { /// The Stacks node to connect to #[arg(long)] - pub host: SocketAddr, - /// Whether the node is mainnet. Default is true - #[arg(long, default_value = "true")] + pub host: String, + /// Whether the node is mainnet. Default is false. + #[arg(long, default_value = "false")] pub mainnet: bool, /// Set the polling interval in seconds. Default is 60 seconds. #[arg(long, short, default_value = "60")] diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b6e9c8a381..a303c59bd1 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,4 +1,5 @@ use std::collections::{HashMap, VecDeque}; + // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -14,8 +15,6 @@ use std::collections::{HashMap, VecDeque}; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::net::SocketAddr; - use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ @@ -107,7 +106,7 @@ impl StacksClient { /// Create a new signer StacksClient with the provided private key, stacks node host endpoint, version, and auth password pub fn new( stacks_private_key: StacksPrivateKey, - node_host: SocketAddr, + node_host: String, auth_password: String, mainnet: bool, ) -> Self { diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index a6357ba760..75893cb2a0 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -29,7 +29,6 @@ extern crate toml; use std::collections::HashMap; use std::io::{self, Write}; -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; use clarity::codec::read_next; @@ -197,9 +196,11 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { } fn handle_monitor_signers(args: MonitorSignersArgs) { + // Verify that the host is a valid URL + url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); let stacks_client = StacksClient::new( StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle - args.host, + args.host.clone(), "FOO".to_string(), // We don't care about authorized paths. Just accessing public info args.mainnet, ); @@ -238,27 +239,22 @@ fn start_monitoring_signers( let mut last_messages = HashMap::with_capacity(slot_ids.len()); let mut last_updates = HashMap::with_capacity(slot_ids.len()); - let mut session = stackerdb_session( - &args.host.to_string(), - NakamotoSigners::make_signers_db_contract_id( - reward_cycle, - MessageSlotID::BlockResponse.to_u32(), - args.mainnet, - ), - ); + let contract = MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle); + let mut session = stackerdb_session(&args.host.to_string(), contract.clone()); info!( - "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs", + "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", args.interval, args.max_age ); loop { info!("Polling signers stackerdb for new messages..."); let mut missing_signers = Vec::with_capacity(slot_ids.len()); let mut stale_signers = Vec::with_capacity(slot_ids.len()); + let mut unexpected_messages = HashMap::new(); let next_reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; if next_reward_cycle != reward_cycle { info!( - "Reward cycle has changed from {} to {}. Updating stacker db session.", + "Reward cycle has changed from {} to {}. Updating stacker db session to StackerDB contract {contract}.", reward_cycle, next_reward_cycle ); reward_cycle = next_reward_cycle; @@ -269,11 +265,7 @@ fn start_monitoring_signers( } session = stackerdb_session( &args.host.to_string(), - NakamotoSigners::make_signers_db_contract_id( - reward_cycle, - MessageSlotID::BlockResponse.to_u32(), - args.mainnet, - ), + MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle), ); // Clear the last messages and signer last update times. @@ -295,17 +287,14 @@ fn start_monitoring_signers( if last_message == &signer_message { continue; } - if epoch == StacksEpochId::Epoch25 - && !matches!(last_message, SignerMessage::MockSignature(_)) - { - warn!("Epoch 2.5 Signers Should be Sending MockSignature messages. Unexpected message found for signer {signer_address} in slot {slot_id}"); - continue; - } else if epoch > StacksEpochId::Epoch25 - && !matches!(last_message, SignerMessage::BlockResponse(_)) - { - warn!("Nakamoto Signers Should be Sending BlockResponse messages. Unexpected message found for signer {signer_address} in slot {slot_id}"); - continue; - } + } + if (epoch == StacksEpochId::Epoch25 + && !matches!(signer_message, SignerMessage::MockSignature(_))) + || (epoch > StacksEpochId::Epoch25 + && !matches!(signer_message, SignerMessage::BlockResponse(_))) + { + unexpected_messages.insert(signer_address, (signer_message, slot_id)); + continue; } last_messages.insert(slot_id, signer_message); last_updates.insert(slot_id, std::time::Instant::now()); @@ -313,12 +302,6 @@ fn start_monitoring_signers( missing_signers.push(signer_address); } } - if !missing_signers.is_empty() { - warn!( - "Missing expected messages for {} signers: {missing_signers:?}", - missing_signers.len() - ); - } for (slot_id, last_update_time) in last_updates.iter() { if last_update_time.elapsed().as_secs() > args.max_age { let address = signers_addresses @@ -327,12 +310,56 @@ fn start_monitoring_signers( stale_signers.push(*address); } } - if !stale_signers.is_empty() { - warn!( - "No new updates from {} signers in over {} seconds: {stale_signers:?}", - stale_signers.len(), - args.max_age + if missing_signers.is_empty() + && stale_signers.is_empty() + && unexpected_messages.is_empty() + && !signers_addresses.is_empty() + { + info!( + "All {} signers are sending messages as expected.", + signers_addresses.len() ); + } else { + if !missing_signers.is_empty() { + let formatted_signers = missing_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + warn!( + "Missing messages for {} of {} signer(s). ", missing_signers.len(), signers_addresses.len(); + "signers" => formatted_signers + ); + } + if !stale_signers.is_empty() { + let formatted_signers = stale_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + warn!( + "No new updates from {} of {} signer(s) in over {} seconds", + stale_signers.len(), + signers_addresses.len(), + args.max_age; + "signers" => formatted_signers + ); + } + if !unexpected_messages.is_empty() { + let formatted_signers = unexpected_messages + .iter() + .map(|(addr, (msg, slot))| { + format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") + }) + .collect::>() + .join(", "); + warn!( + "Unexpected messages from {} of {} Epoch {epoch} signer(s).", + unexpected_messages.len(), + signers_addresses.len(); + "signers" => formatted_signers + ); + } } sleep_ms(interval_ms); } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index d0c7f1d9f3..b552e8a0a0 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -92,7 +92,7 @@ fn setup_test_environment( let stacks_client = StacksClient::new( StacksPrivateKey::new(), - SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).into(), + SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).to_string(), "FOO".into(), false, ); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf6..5140f54597 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::{BTreeMap, HashMap, HashSet}; -use std::net::ToSocketAddrs; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::{Arc, Mutex}; @@ -5176,16 +5175,9 @@ fn signer_chainstate() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted.clone()); - let socket = naka_conf - .node - .rpc_bind - .to_socket_addrs() - .unwrap() - .next() - .unwrap(); let signer_client = stacks_signer::client::StacksClient::new( StacksPrivateKey::from_seed(&[0, 1, 2, 3]), - socket, + naka_conf.node.rpc_bind.clone(), naka_conf .connection_options .auth_token diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 6e9ed71f36..4134eb7c02 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -13,7 +13,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::HashSet; -use std::net::ToSocketAddrs; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use std::{env, thread}; @@ -248,15 +247,7 @@ impl SignerTest { } fn generate_invalid_transactions(&self) -> Vec { - let host = self - .running_nodes - .conf - .node - .rpc_bind - .to_socket_addrs() - .unwrap() - .next() - .unwrap(); + let host = self.running_nodes.conf.node.rpc_bind.clone(); // Get the signer indices let reward_cycle = self.get_current_reward_cycle(); From cb9946e02a5ee977c2198a3d3e5955052a432c82 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 09:37:00 -0400 Subject: [PATCH 0934/1400] Add additional log messages to know the total reward set Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 75893cb2a0..0fd5543bb4 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -245,6 +245,9 @@ fn start_monitoring_signers( "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", args.interval, args.max_age ); + info!("Confirming messages for {} registered signers", signers_addresses.len(); + "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") + ); loop { info!("Polling signers stackerdb for new messages..."); let mut missing_signers = Vec::with_capacity(slot_ids.len()); @@ -260,9 +263,13 @@ fn start_monitoring_signers( reward_cycle = next_reward_cycle; signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; slot_ids = signers_slots.values().map(|value| value.0).collect(); + signers_addresses.clear(); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); } + info!("Confirming messages for {} registered signers", signers_addresses.len(); + "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") + ); session = stackerdb_session( &args.host.to_string(), MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle), @@ -328,7 +335,7 @@ fn start_monitoring_signers( .join(", "); warn!( "Missing messages for {} of {} signer(s). ", missing_signers.len(), signers_addresses.len(); - "signers" => formatted_signers + "signer_addresses" => formatted_signers ); } if !stale_signers.is_empty() { @@ -342,7 +349,7 @@ fn start_monitoring_signers( stale_signers.len(), signers_addresses.len(), args.max_age; - "signers" => formatted_signers + "signer_addresses" => formatted_signers ); } if !unexpected_messages.is_empty() { @@ -357,7 +364,7 @@ fn start_monitoring_signers( "Unexpected messages from {} of {} Epoch {epoch} signer(s).", unexpected_messages.len(), signers_addresses.len(); - "signers" => formatted_signers + "signer_addresses" => formatted_signers ); } } @@ -401,7 +408,9 @@ fn main() { Command::VerifyVote(args) => { handle_verify_vote(args, true); } - Command::MonitorSigners(args) => handle_monitor_signers(args), + Command::MonitorSigners(args) => { + handle_monitor_signers(args); + } } } From edaf99dba8af259150b531f454421fc9d7e25575 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 10:17:04 -0400 Subject: [PATCH 0935/1400] Change max_age default to 20 minutes Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 4006e0a7d1..37e9218a9d 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -272,8 +272,8 @@ pub struct MonitorSignersArgs { /// Set the polling interval in seconds. Default is 60 seconds. #[arg(long, short, default_value = "60")] pub interval: u64, - /// Max age in seconds before a signer message is considered stale. Default is 600 seconds. - #[arg(long, short, default_value = "600")] + /// Max age in seconds before a signer message is considered stale. Default is 1200 seconds. + #[arg(long, short, default_value = "1200")] pub max_age: u64, } From 7be5a9147b2061c417d0729b62276323524ef243 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 11:49:46 -0400 Subject: [PATCH 0936/1400] Print the public keys of the signer addresses as well Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 68 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 63 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 0fd5543bb4..18b9e4277c 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -32,7 +32,7 @@ use std::io::{self, Write}; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; use clarity::codec::read_next; -use clarity::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use clarity::types::StacksEpochId; use clarity::util::sleep_ms; use clarity::vm::types::QualifiedContractIdentifier; @@ -229,6 +229,16 @@ fn start_monitoring_signers( } let mut reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; let mut signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; + let entries = stacks_client + .get_reward_set_signers(reward_cycle)? + .unwrap_or_else(|| panic!("No signers found for the current reward cycle {reward_cycle}")); + let mut signers_keys = HashMap::with_capacity(entries.len()); + for entry in entries { + let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let stacks_address = StacksAddress::p2pkh(args.mainnet, &public_key); + signers_keys.insert(stacks_address, public_key); + } let mut signers_addresses = HashMap::with_capacity(signers_slots.len()); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); @@ -256,6 +266,8 @@ fn start_monitoring_signers( let next_reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; if next_reward_cycle != reward_cycle { + signers_addresses.clear(); + signers_keys.clear(); info!( "Reward cycle has changed from {} to {}. Updating stacker db session to StackerDB contract {contract}.", reward_cycle, next_reward_cycle @@ -263,10 +275,20 @@ fn start_monitoring_signers( reward_cycle = next_reward_cycle; signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; slot_ids = signers_slots.values().map(|value| value.0).collect(); - signers_addresses.clear(); for (signer_address, slot_id) in signers_slots.iter() { signers_addresses.insert(*slot_id, *signer_address); } + let entries = stacks_client + .get_reward_set_signers(reward_cycle)? + .unwrap_or_else(|| { + panic!("No signers found for the current reward cycle {reward_cycle}") + }); + for entry in entries { + let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let stacks_address = StacksAddress::p2pkh(args.mainnet, &public_key); + signers_keys.insert(stacks_address, public_key); + } info!("Confirming messages for {} registered signers", signers_addresses.len(); "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") ); @@ -333,9 +355,21 @@ fn start_monitoring_signers( .map(|addr| format!("{addr}")) .collect::>() .join(", "); + let formatted_keys = signers_keys + .iter() + .filter_map(|(addr, key)| { + if missing_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); warn!( "Missing messages for {} of {} signer(s). ", missing_signers.len(), signers_addresses.len(); - "signer_addresses" => formatted_signers + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys ); } if !stale_signers.is_empty() { @@ -344,12 +378,24 @@ fn start_monitoring_signers( .map(|addr| format!("{addr}")) .collect::>() .join(", "); + let formatted_keys = signers_keys + .iter() + .filter_map(|(addr, key)| { + if stale_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); warn!( "No new updates from {} of {} signer(s) in over {} seconds", stale_signers.len(), signers_addresses.len(), args.max_age; - "signer_addresses" => formatted_signers + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys ); } if !unexpected_messages.is_empty() { @@ -360,11 +406,23 @@ fn start_monitoring_signers( }) .collect::>() .join(", "); + let formatted_keys = signers_keys + .iter() + .filter_map(|(addr, key)| { + if unexpected_messages.contains_key(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); warn!( "Unexpected messages from {} of {} Epoch {epoch} signer(s).", unexpected_messages.len(), signers_addresses.len(); - "signer_addresses" => formatted_signers + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys ); } } From d37e1873e00c061cf8ab86ba00be76e88555f35f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 30 Aug 2024 16:37:56 -0400 Subject: [PATCH 0937/1400] Cleanup signer monitor function Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 461 ++++++++++++++++++++++---------------- 1 file changed, 266 insertions(+), 195 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 18b9e4277c..64e5c48a96 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -48,7 +48,7 @@ use stacks_signer::cli::{ GetLatestChunkArgs, MonitorSignersArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, }; -use stacks_signer::client::{ClientError, StacksClient}; +use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::GlobalConfig; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; @@ -197,16 +197,9 @@ fn handle_verify_vote(args: VerifyVoteArgs, do_print: bool) -> bool { fn handle_monitor_signers(args: MonitorSignersArgs) { // Verify that the host is a valid URL - url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); - let stacks_client = StacksClient::new( - StacksPrivateKey::new(), // We don't need a private key to retrieve the reward cycle - args.host.clone(), - "FOO".to_string(), // We don't care about authorized paths. Just accessing public info - args.mainnet, - ); - + let mut signer_monitor = SignerMonitor::new(args); loop { - if let Err(e) = start_monitoring_signers(&stacks_client, &args) { + if let Err(e) = signer_monitor.start() { error!( "Error occurred monitoring signers: {:?}. Waiting and trying again.", e @@ -216,107 +209,259 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } -fn start_monitoring_signers( - stacks_client: &StacksClient, - args: &MonitorSignersArgs, -) -> Result<(), ClientError> { - let interval_ms = args.interval * 1000; - let epoch = stacks_client.get_node_epoch()?; - if epoch < StacksEpochId::Epoch25 { - return Err(ClientError::UnsupportedStacksFeature( - "Signer monitoring is only supported for Epoch 2.5 and later".into(), - )); +struct SignerMonitor { + stacks_client: StacksClient, + cycle_state: RewardCycleState, + args: MonitorSignersArgs, +} + +#[derive(Debug, Default, Clone)] +struct RewardCycleState { + signers_slots: HashMap, + signers_keys: HashMap, + signers_addresses: HashMap, + slot_ids: Vec, + /// Reward cycle is not known until the first successful call to the node + reward_cycle: Option, +} + +impl SignerMonitor { + fn new(args: MonitorSignersArgs) -> Self { + url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), // We don't need a private key to read + args.host.clone(), + "FOO".to_string(), // We don't care about authorized paths. Just accessing public info + args.mainnet, + ); + Self { + stacks_client, + cycle_state: RewardCycleState::default(), + args, + } } - let mut reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; - let mut signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; - let entries = stacks_client - .get_reward_set_signers(reward_cycle)? - .unwrap_or_else(|| panic!("No signers found for the current reward cycle {reward_cycle}")); - let mut signers_keys = HashMap::with_capacity(entries.len()); - for entry in entries { - let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) - .expect("Failed to convert signing key to StacksPublicKey"); - let stacks_address = StacksAddress::p2pkh(args.mainnet, &public_key); - signers_keys.insert(stacks_address, public_key); + + fn refresh_state(&mut self) -> Result { + let reward_cycle = self + .stacks_client + .get_current_reward_cycle_info()? + .reward_cycle; + if Some(reward_cycle) == self.cycle_state.reward_cycle { + // The reward cycle has not changed. Nothing to refresh. + return Ok(false); + } + self.cycle_state.reward_cycle = Some(reward_cycle); + + self.cycle_state.signers_keys.clear(); + self.cycle_state.signers_addresses.clear(); + + self.cycle_state.signers_slots = + self.stacks_client.get_parsed_signer_slots(reward_cycle)?; + self.cycle_state.slot_ids = self + .cycle_state + .signers_slots + .values() + .map(|value| value.0) + .collect(); + + let entries = self + .stacks_client + .get_reward_set_signers(reward_cycle)? + .unwrap_or_else(|| { + panic!("No signers found for the current reward cycle {reward_cycle}") + }); + for entry in entries { + let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let stacks_address = StacksAddress::p2pkh(self.args.mainnet, &public_key); + self.cycle_state + .signers_keys + .insert(stacks_address, public_key); + } + for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { + self.cycle_state + .signers_addresses + .insert(*slot_id, *signer_address); + } + + self.cycle_state.signers_slots = + self.stacks_client.get_parsed_signer_slots(reward_cycle)?; + + for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { + self.cycle_state + .signers_addresses + .insert(*slot_id, *signer_address); + self.cycle_state.slot_ids.push(slot_id.0); + } + Ok(true) } - let mut signers_addresses = HashMap::with_capacity(signers_slots.len()); - for (signer_address, slot_id) in signers_slots.iter() { - signers_addresses.insert(*slot_id, *signer_address); + + fn print_missing_signers(&self, missing_signers: &[StacksAddress]) { + if missing_signers.is_empty() { + return; + } + let formatted_signers = missing_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if missing_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "Missing messages for {} of {} signer(s). ", missing_signers.len(), self.cycle_state.signers_addresses.len(); + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + fn print_stale_signers(&self, stale_signers: &[StacksAddress]) { + if stale_signers.is_empty() { + return; + } + let formatted_signers = stale_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if stale_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "No new updates from {} of {} signer(s) in over {} seconds", + stale_signers.len(), + self.cycle_state.signers_addresses.len(), + self.args.max_age; + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); } - let mut slot_ids: Vec<_> = signers_slots.values().map(|value| value.0).collect(); - - // Poll stackerdb slots to check for new expected messages - let mut last_messages = HashMap::with_capacity(slot_ids.len()); - let mut last_updates = HashMap::with_capacity(slot_ids.len()); - - let contract = MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle); - let mut session = stackerdb_session(&args.host.to_string(), contract.clone()); - info!( - "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", - args.interval, args.max_age - ); - info!("Confirming messages for {} registered signers", signers_addresses.len(); - "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") - ); - loop { - info!("Polling signers stackerdb for new messages..."); - let mut missing_signers = Vec::with_capacity(slot_ids.len()); - let mut stale_signers = Vec::with_capacity(slot_ids.len()); - let mut unexpected_messages = HashMap::new(); - - let next_reward_cycle = stacks_client.get_current_reward_cycle_info()?.reward_cycle; - if next_reward_cycle != reward_cycle { - signers_addresses.clear(); - signers_keys.clear(); - info!( - "Reward cycle has changed from {} to {}. Updating stacker db session to StackerDB contract {contract}.", - reward_cycle, next_reward_cycle - ); - reward_cycle = next_reward_cycle; - signers_slots = stacks_client.get_parsed_signer_slots(reward_cycle)?; - slot_ids = signers_slots.values().map(|value| value.0).collect(); - for (signer_address, slot_id) in signers_slots.iter() { - signers_addresses.insert(*slot_id, *signer_address); - } - let entries = stacks_client - .get_reward_set_signers(reward_cycle)? - .unwrap_or_else(|| { - panic!("No signers found for the current reward cycle {reward_cycle}") - }); - for entry in entries { - let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) - .expect("Failed to convert signing key to StacksPublicKey"); - let stacks_address = StacksAddress::p2pkh(args.mainnet, &public_key); - signers_keys.insert(stacks_address, public_key); - } - info!("Confirming messages for {} registered signers", signers_addresses.len(); - "signer_addresses" => signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") - ); - session = stackerdb_session( - &args.host.to_string(), - MessageSlotID::BlockResponse.stacker_db_contract(args.mainnet, reward_cycle), - ); - // Clear the last messages and signer last update times. - last_messages.clear(); - last_updates.clear(); + fn print_unexpected_messages( + &self, + unexpected_messages: &HashMap, + ) { + if unexpected_messages.is_empty() { + return; } - let new_messages: Vec<_> = session - .get_latest_chunks(&slot_ids)? - .into_iter() - .map(|chunk_opt| { - chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) + let formatted_signers = unexpected_messages + .iter() + .map(|(addr, (msg, slot))| { + format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") }) - .collect(); - for ((signer_address, slot_id), signer_message_opt) in - signers_slots.clone().into_iter().zip(new_messages) - { - if let Some(signer_message) = signer_message_opt { + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if unexpected_messages.contains_key(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "Unexpected messages from {} of {} signer(s).", + unexpected_messages.len(), + self.cycle_state.signers_addresses.len(); + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + /// Start monitoring the signers stackerdb slots for expected new messages + pub fn start(&mut self) -> Result<(), ClientError> { + self.refresh_state()?; + let nmb_signers = self.cycle_state.signers_keys.len(); + let interval_ms = self.args.interval * 1000; + let reward_cycle = self + .cycle_state + .reward_cycle + .expect("BUG: reward cycle not set"); + let contract = + MessageSlotID::BlockResponse.stacker_db_contract(self.args.mainnet, reward_cycle); + info!( + "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", + self.args.interval, self.args.max_age + ); + let mut session = stackerdb_session(&self.args.host, contract); + info!("Confirming messages for {nmb_signers} registered signers"; + "signer_addresses" => self.cycle_state.signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") + ); + let mut last_messages = HashMap::with_capacity(nmb_signers); + let mut last_updates = HashMap::with_capacity(nmb_signers); + loop { + info!("Polling signers stackerdb for new messages..."); + let mut missing_signers = Vec::with_capacity(nmb_signers); + let mut stale_signers = Vec::with_capacity(nmb_signers); + let mut unexpected_messages = HashMap::new(); + + if self.refresh_state()? { + let reward_cycle = self + .cycle_state + .reward_cycle + .expect("BUG: reward cycle not set"); + let contract = MessageSlotID::BlockResponse + .stacker_db_contract(self.args.mainnet, reward_cycle); + info!( + "Reward cycle has changed to {reward_cycle}. Updating stacker db session to StackerDB contract {contract}.", + ); + session = stackerdb_session(&self.args.host, contract); + // Clear the last messages and signer last update times. + last_messages.clear(); + last_updates.clear(); + } + let new_messages: Vec<_> = session + .get_latest_chunks(&self.cycle_state.slot_ids)? + .into_iter() + .map(|chunk_opt| { + chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) + }) + .collect(); + for ((signer_address, slot_id), signer_message_opt) in self + .cycle_state + .signers_slots + .clone() + .into_iter() + .zip(new_messages) + { + let Some(signer_message) = signer_message_opt else { + missing_signers.push(signer_address); + continue; + }; if let Some(last_message) = last_messages.get(&slot_id) { if last_message == &signer_message { continue; } } + let epoch = self.stacks_client.get_node_epoch()?; + if epoch < StacksEpochId::Epoch25 { + return Err(ClientError::UnsupportedStacksFeature(format!("Monitoring signers is only supported for Epoch 2.5 and later. Current epoch: {epoch:?}"))); + } if (epoch == StacksEpochId::Epoch25 && !matches!(signer_message, SignerMessage::MockSignature(_))) || (epoch > StacksEpochId::Epoch25 @@ -327,106 +472,32 @@ fn start_monitoring_signers( } last_messages.insert(slot_id, signer_message); last_updates.insert(slot_id, std::time::Instant::now()); - } else { - missing_signers.push(signer_address); - } - } - for (slot_id, last_update_time) in last_updates.iter() { - if last_update_time.elapsed().as_secs() > args.max_age { - let address = signers_addresses - .get(slot_id) - .expect("BUG: missing signer address for given slot id"); - stale_signers.push(*address); - } - } - if missing_signers.is_empty() - && stale_signers.is_empty() - && unexpected_messages.is_empty() - && !signers_addresses.is_empty() - { - info!( - "All {} signers are sending messages as expected.", - signers_addresses.len() - ); - } else { - if !missing_signers.is_empty() { - let formatted_signers = missing_signers - .iter() - .map(|addr| format!("{addr}")) - .collect::>() - .join(", "); - let formatted_keys = signers_keys - .iter() - .filter_map(|(addr, key)| { - if missing_signers.contains(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "Missing messages for {} of {} signer(s). ", missing_signers.len(), signers_addresses.len(); - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); } - if !stale_signers.is_empty() { - let formatted_signers = stale_signers - .iter() - .map(|addr| format!("{addr}")) - .collect::>() - .join(", "); - let formatted_keys = signers_keys - .iter() - .filter_map(|(addr, key)| { - if stale_signers.contains(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "No new updates from {} of {} signer(s) in over {} seconds", - stale_signers.len(), - signers_addresses.len(), - args.max_age; - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); + for (slot_id, last_update_time) in last_updates.iter() { + if last_update_time.elapsed().as_secs() > self.args.max_age { + let address = self + .cycle_state + .signers_addresses + .get(slot_id) + .expect("BUG: missing signer address for given slot id"); + stale_signers.push(*address); + } } - if !unexpected_messages.is_empty() { - let formatted_signers = unexpected_messages - .iter() - .map(|(addr, (msg, slot))| { - format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") - }) - .collect::>() - .join(", "); - let formatted_keys = signers_keys - .iter() - .filter_map(|(addr, key)| { - if unexpected_messages.contains_key(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "Unexpected messages from {} of {} Epoch {epoch} signer(s).", - unexpected_messages.len(), - signers_addresses.len(); - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys + if missing_signers.is_empty() + && stale_signers.is_empty() + && unexpected_messages.is_empty() + { + info!( + "All {} signers are sending messages as expected.", + nmb_signers ); + } else { + self.print_missing_signers(&missing_signers); + self.print_stale_signers(&stale_signers); + self.print_unexpected_messages(&unexpected_messages); } + sleep_ms(interval_ms); } - sleep_ms(interval_ms); } } From 7ca1ce1e11568c5a9776d7cc66ed19fe51e92472 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 30 Aug 2024 22:48:31 -0400 Subject: [PATCH 0938/1400] feat: retry check for UTXOs on startup Fixes #5124 --- .github/workflows/bitcoin-tests.yml | 2 + testnet/stacks-node/src/run_loop/nakamoto.rs | 35 +++-- testnet/stacks-node/src/run_loop/neon.rs | 35 +++-- .../src/tests/nakamoto_integrations.rs | 144 ++++++++++++++++++ 4 files changed, 190 insertions(+), 26 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 27e76a646d..68cb6153ff 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -108,6 +108,8 @@ jobs: - tests::nakamoto_integrations::continue_tenure_extend - tests::nakamoto_integrations::mock_mining - tests::nakamoto_integrations::multiple_miners + - tests::nakamoto_integrations::utxo_check_on_startup_panic + - tests::nakamoto_integrations::utxo_check_on_startup_recover # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 44a6c0fba9..8b206a66ab 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -155,6 +155,11 @@ impl RunLoop { self.miner_status.clone() } + /// Seconds to wait before retrying UTXO check during startup + const UTXO_RETRY_INTERVAL: u64 = 10; + /// Number of times to retry UTXO check during startup + const UTXO_RETRY_COUNT: u64 = 6; + /// Determine if we're the miner. /// If there's a network error, then assume that we're not a miner. fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { @@ -187,22 +192,26 @@ impl RunLoop { )); } - for (epoch_id, btc_addr) in btc_addrs.into_iter() { - info!("Miner node: checking UTXOs at address: {}", &btc_addr); - let utxos = burnchain.get_utxos(epoch_id, &op_signer.get_public_key(), 1, None, 0); - if utxos.is_none() { - warn!("UTXOs not found for {}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {} (importaddress)", btc_addr, btc_addr); - } else { - info!("UTXOs found - will run as a Miner node"); + // retry UTXO check a few times, in case bitcoind is still starting up + for _ in 0..Self::UTXO_RETRY_COUNT { + for (epoch_id, btc_addr) in &btc_addrs { + info!("Miner node: checking UTXOs at address: {btc_addr}"); + let utxos = + burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0); + if utxos.is_none() { + warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)"); + } else { + info!("UTXOs found - will run as a Miner node"); + return true; + } + } + if self.config.get_node_config(false).mock_mining { + info!("No UTXOs found, but configured to mock mine"); return true; } + thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL)); } - if self.config.get_node_config(false).mock_mining { - info!("No UTXOs found, but configured to mock mine"); - return true; - } else { - return false; - } + panic!("No UTXOs found, exiting"); } else { info!("Will run as a Follower node"); false diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5d5ff3653d..36777c4912 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -358,6 +358,11 @@ impl RunLoop { } } + /// Seconds to wait before retrying UTXO check during startup + const UTXO_RETRY_INTERVAL: u64 = 10; + /// Number of times to retry UTXO check during startup + const UTXO_RETRY_COUNT: u64 = 6; + /// Determine if we're the miner. /// If there's a network error, then assume that we're not a miner. fn check_is_miner(&mut self, burnchain: &mut BitcoinRegtestController) -> bool { @@ -390,22 +395,26 @@ impl RunLoop { )); } - for (epoch_id, btc_addr) in btc_addrs.into_iter() { - info!("Miner node: checking UTXOs at address: {}", &btc_addr); - let utxos = burnchain.get_utxos(epoch_id, &op_signer.get_public_key(), 1, None, 0); - if utxos.is_none() { - warn!("UTXOs not found for {}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {} (importaddress)", btc_addr, btc_addr); - } else { - info!("UTXOs found - will run as a Miner node"); + // retry UTXO check a few times, in case bitcoind is still starting up + for _ in 0..Self::UTXO_RETRY_COUNT { + for (epoch_id, btc_addr) in &btc_addrs { + info!("Miner node: checking UTXOs at address: {btc_addr}"); + let utxos = + burnchain.get_utxos(*epoch_id, &op_signer.get_public_key(), 1, None, 0); + if utxos.is_none() { + warn!("UTXOs not found for {btc_addr}. If this is unexpected, please ensure that your bitcoind instance is indexing transactions for the address {btc_addr} (importaddress)"); + } else { + info!("UTXOs found - will run as a Miner node"); + return true; + } + } + if self.config.get_node_config(false).mock_mining { + info!("No UTXOs found, but configured to mock mine"); return true; } + thread::sleep(std::time::Duration::from_secs(Self::UTXO_RETRY_INTERVAL)); } - if self.config.get_node_config(false).mock_mining { - info!("No UTXOs found, but configured to mock mine"); - return true; - } else { - return false; - } + panic!("No UTXOs found, exiting"); } else { info!("Will run as a Follower node"); false diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a658bfbcf6..0eaca05245 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7552,3 +7552,147 @@ fn mock_mining() { run_loop_thread.join().unwrap(); follower_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test checks for the proper handling of the case where UTXOs are not +/// available on startup. After 1 minute, the miner thread should panic. +fn utxo_check_on_startup_panic() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + println!("Nakamoto node started with config: {:?}", naka_conf); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); + let (last, rest) = epochs.split_last_mut().unwrap(); + for (index, epoch) in rest.iter_mut().enumerate() { + epoch.start_height = index as u64; + epoch.end_height = (index + 1) as u64; + } + last.start_height = 131; + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + // Do not fully bootstrap the chain, so that the UTXOs are not yet available + btc_regtest_controller.bootstrap_chain(99); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + + let timeout = Duration::from_secs(70); + let start_time = Instant::now(); + + loop { + // Check if the thread has panicked + if run_loop_thread.is_finished() { + match run_loop_thread.join() { + Ok(_) => { + // Thread completed without panicking + panic!("Miner should have panicked but it exited cleanly."); + } + Err(_) => { + // Thread panicked + info!("Thread has panicked!"); + break; + } + } + } + + // Check if 70 seconds have passed + assert!( + start_time.elapsed() < timeout, + "Miner should have panicked." + ); + + thread::sleep(Duration::from_millis(1000)); + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); +} + +#[test] +#[ignore] +/// This test checks for the proper handling of the case where UTXOs are not +/// available on startup, but become available later, before the 1 minute +/// timeout. The miner thread should recover and continue mining. +fn utxo_check_on_startup_recover() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + println!("Nakamoto node started with config: {:?}", naka_conf); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); + let (last, rest) = epochs.split_last_mut().unwrap(); + for (index, epoch) in rest.iter_mut().enumerate() { + epoch.start_height = index as u64; + epoch.end_height = (index + 1) as u64; + } + last.start_height = 131; + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + // Do not fully bootstrap the chain, so that the UTXOs are not yet available + btc_regtest_controller.bootstrap_chain(99); + // btc_regtest_controller.bootstrap_chain(108); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + + // Sleep for 30s to allow the miner to start and reach the UTXO check loop + thread::sleep(Duration::from_secs(30)); + + btc_regtest_controller.bootstrap_chain(3); + + wait_for_runloop(&blocks_processed); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + run_loop_thread.join().unwrap(); +} From ab46d76d4b29cd2066516e75b2b672d6d748799f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 31 Aug 2024 23:02:12 -0400 Subject: [PATCH 0939/1400] fix: add pause to ensure block is accepted --- .../stacks-node/src/tests/nakamoto_integrations.rs | 7 ------- testnet/stacks-node/src/tests/signer/v0.rs | 13 ++++--------- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 680fa42843..fb3e907677 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -740,7 +740,6 @@ pub fn next_block_and_wait_for_commits( .as_ref() .ok_or("TEST-ERROR: Processed time wasn't set")?; if commits_sent <= commits_before[i] { - info!("NO COMMITS"); return Ok(false); } let commit_sent_time = commit_sent_time[i] @@ -748,28 +747,22 @@ pub fn next_block_and_wait_for_commits( .ok_or("TEST-ERROR: Processed time wasn't set")?; // try to ensure the commit was sent after the block was processed if commit_sent_time > block_processed_time { - info!("COMMIT NOT SENT AFTER BLOCK PROCESSED TIME"); continue; } // if two commits have been sent, one of them must have been after if commits_sent >= commits_before[i] + 2 { - info!("MORE THAN ENOUGH COMMITS"); continue; } // otherwise, just timeout if the commit was sent and its been long enough // for a new commit pass to have occurred if block_processed_time.elapsed() > Duration::from_secs(10) { - info!("TIMEOUT COMMIT"); continue; } - info!("CONDITIONS OF COMMIT CHECK NOT MET"); return Ok(false); } else { - info!("NO BLOCK PROCESSED IN COMMIT CHECK"); return Ok(false); } } - info!("ALL CONDITIONS MET IN COMMIT CHECK"); Ok(true) }) } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1ab18b93e1..e17f8e9835 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3330,8 +3330,8 @@ fn partial_tenure_fork() { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); - let proposed_before = blocks_proposed.load(Ordering::SeqCst); - info!("proposed_blocks: {proposed_before}, proposed_blocks2: {proposed_before_2}"); + + sleep_ms(1000); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3339,10 +3339,6 @@ fn partial_tenure_fork() { let mined_1 = blocks_mined1.load(Ordering::SeqCst); let mined_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - let proposed_1 = blocks_proposed.load(Ordering::SeqCst); - info!( - "Fork initiated: {fork_initiated}, Mined 1 blocks: {mined_1}, Mined 2 blocks {mined_2}, Proposed blocks: {proposed_1}, Proposed blocks 2: {proposed_2}", - ); Ok((fork_initiated && proposed_2 > proposed_before_2) || mined_1 > mined_before_1 @@ -3431,8 +3427,8 @@ fn partial_tenure_fork() { } else { if miner_2_tenures < min_miner_2_tenures { assert_eq!(mined_2, mined_before_2 + inter_blocks_per_tenure + 1); - } else if miner_2_tenures == min_miner_2_tenures { - // If this is the forking tenure, miner 2 should have mined 0 blocks + } else { + // Miner 2 should have mined 0 blocks after the fork assert_eq!(mined_2, mined_before_2); } } @@ -3447,7 +3443,6 @@ fn partial_tenure_fork() { let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); assert_eq!(peer_2_height, ignore_block - 1); assert_eq!( peer_1_height, From 4e583e9dd99fe2684eb49f0748d85d9b211ec1cc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 1 Sep 2024 12:20:11 -0400 Subject: [PATCH 0940/1400] test: fix assertion and remove useless code --- testnet/stacks-node/src/tests/signer/v0.rs | 23 ++++++---------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e17f8e9835..2aa09238c5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3444,10 +3444,12 @@ fn partial_tenure_fork() { let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; assert_eq!(peer_2_height, ignore_block - 1); - assert_eq!( - peer_1_height, - pre_nakamoto_peer_1_height - + (miner_1_tenures + min_miner_2_tenures - 1) * (inter_blocks_per_tenure + 1) + // The height may be higher than expected due to extra transactions waiting + // to be mined during the forking miner's tenure. + assert!( + peer_1_height + >= pre_nakamoto_peer_1_height + + (miner_1_tenures + min_miner_2_tenures - 1) * (inter_blocks_per_tenure + 1) ); assert_eq!( btc_blocks_mined, @@ -3473,18 +3475,5 @@ fn partial_tenure_fork() { .unwrap(); assert_eq!(tip.stacks_block_height, ignore_block - 1); - let (chainstate, _) = StacksChainState::open( - false, - conf.burnchain.chain_id, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - - let blocks = chainstate - .get_stacks_chain_tips_at_height(ignore_block) - .unwrap(); - info!("blocks: {:?}", blocks); - signer_test.shutdown(); } From fd189d57e723044e7c550c28969354884a56bfff Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 1 Sep 2024 12:47:49 -0400 Subject: [PATCH 0941/1400] chore: cleanup unused code --- testnet/stacks-node/src/tests/signer/v0.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2aa09238c5..7b35279863 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -37,7 +37,7 @@ use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; -use stacks::net::relay::fault_injection::{clear_ignore_block, set_ignore_block}; +use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; use stacks::util::hash::MerkleHashFunc; @@ -3263,7 +3263,6 @@ fn partial_tenure_fork() { &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_proposed = signer_test.running_nodes.nakamoto_blocks_proposed.clone(); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); From 32cc4a48e0f66a94d633d98ea65c7748246255c2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 1 Sep 2024 16:23:47 -0400 Subject: [PATCH 0942/1400] test: adjust bitcoin block count to account for setup --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7b35279863..235186e2db 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3085,7 +3085,7 @@ fn multiple_miners_with_nakamoto_blocks() { let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); - let mut btc_blocks_mined = 0; + let mut btc_blocks_mined = 1; let mut miner_1_tenures = 0; let mut miner_2_tenures = 0; while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { From 568722e8138fac74bf31e99d005e96e8339d47a2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Sep 2024 10:22:56 -0400 Subject: [PATCH 0943/1400] fix: resolve errors after merge --- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4657d00e52..a1398f7b2c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3139,7 +3139,8 @@ fn multiple_miners_with_nakamoto_blocks() { false }) }, - &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3373,7 +3374,8 @@ fn partial_tenure_fork() { config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); }, - &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); From 48e23b94216c05259e1e2dd9000bed4617852e12 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 3 Sep 2024 21:47:37 -0400 Subject: [PATCH 0944/1400] test: fix remaining errors in tests --- testnet/stacks-node/src/tests/signer/v0.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a1398f7b2c..2ca6a12a8e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -278,13 +278,14 @@ impl SignerTest { self.run_until_epoch_3_boundary(); - let commits_submitted = self.running_nodes.commits_submitted.clone(); - let commits_before = commits_submitted.load(Ordering::SeqCst); - info!("Waiting 1 burnchain block for miner VRF key confirmation"); - // Wait one block to confirm the VRF register, wait until a block commit is submitted + // Wait until we see the first block of epoch 3.0. + // Note, we don't use `nakamoto_blocks_mined` counter, because there + // could be other miners mining blocks. + let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; + info!("Waiting for first Nakamoto block: {}", height_before + 1); next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > commits_before) + let height = get_chain_info(&self.running_nodes.conf).stacks_tip_height; + Ok(height > height_before) }) .unwrap(); info!("Ready to mine Nakamoto blocks!"); @@ -3202,6 +3203,7 @@ fn multiple_miners_with_nakamoto_blocks() { let mut btc_blocks_mined = 1; let mut miner_1_tenures = 0; let mut miner_2_tenures = 0; + let mut sender_nonce = 0; while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { if btc_blocks_mined > max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); @@ -3234,9 +3236,9 @@ fn multiple_miners_with_nakamoto_blocks() { let blocks_processed_before = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); // submit a tx so that the miner will mine an extra block - let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); wait_for(60, || { @@ -3300,7 +3302,7 @@ fn multiple_miners_with_nakamoto_blocks() { assert_eq!(peer_1_height, peer_2_height); assert_eq!( peer_1_height, - pre_nakamoto_peer_1_height + btc_blocks_mined * (inter_blocks_per_tenure + 1) + pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); assert_eq!( btc_blocks_mined, From 05174209f9d99d4844237cacfaf094f58d795ab1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:39:04 -0400 Subject: [PATCH 0945/1400] feat: add has_consensus_hash() function to sortition handle --- stackslib/src/chainstate/burn/db/sortdb.rs | 62 ++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 808cb73c1f..942e6774bd 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2620,6 +2620,27 @@ impl<'a> SortitionHandleConn<'a> { serde_json::from_str(&pox_addrs_json).expect("FATAL: failed to decode pox payout JSON"); Ok(pox_addrs) } + + /// Is a consensus hash's sortition valid on the fork represented by this handle? + /// Return Ok(true) if so + /// Return Ok(false) if not (including if there is no sortition with this consensus hash) + /// Return Err(..) on DB error + pub fn has_consensus_hash(&self, consensus_hash: &ConsensusHash) -> Result { + let Some(sn) = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? else { + // no sortition with this consensus hash + return Ok(false); + }; + + let Some(expected_sortition_id) = + get_ancestor_sort_id(self, sn.block_height, &self.context.chain_tip)? + else { + // no ancestor at this sortition height relative to this chain tip + // (e.g. perhaps this consensus hash is in the "future" relative to this chain tip) + return Ok(false); + }; + + Ok(sn.sortition_id == expected_sortition_id) + } } // Connection methods @@ -10948,4 +10969,45 @@ pub mod tests { SORTITION_DB_VERSION )); } + + #[test] + fn test_has_consensus_hash() { + let first_burn_hash = BurnchainHeaderHash::from_hex( + "10000000000000000000000000000000000000000000000000000000000000ff", + ) + .unwrap(); + let mut db = SortitionDB::connect_test(0, &first_burn_hash).unwrap(); + + let last_snapshot = SortitionDB::get_first_block_snapshot(db.conn()).unwrap(); + + // fork 1: 0 <-- 1 <-- 2 <-- 3 <-- 4 + // \ + // fork 2: *---- 5 <-- 6 + + let all_snapshots = make_fork_run(&mut db, &last_snapshot, 5, 0); + let fork_snapshots = make_fork_run(&mut db, &all_snapshots[3], 2, 0x80); + + let tip = &all_snapshots[4]; + let tip_2 = &fork_snapshots[1]; + assert_ne!(tip, tip_2); + + let ih = db.index_handle(&tip.sortition_id); + for sn in all_snapshots.iter() { + assert!(ih.has_consensus_hash(&sn.consensus_hash).unwrap()); + } + for sn in fork_snapshots.iter() { + assert!(!ih.has_consensus_hash(&sn.consensus_hash).unwrap()); + } + + let ih = db.index_handle(&tip_2.sortition_id); + for sn in fork_snapshots.iter() { + assert!(ih.has_consensus_hash(&sn.consensus_hash).unwrap()); + } + for sn in all_snapshots[0..4].iter() { + assert!(ih.has_consensus_hash(&sn.consensus_hash).unwrap()); + } + assert!(!ih + .has_consensus_hash(&all_snapshots[4].consensus_hash) + .unwrap()); + } } From 44c820bd8c175753147cd84873d923fcd73510ee Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:39:20 -0400 Subject: [PATCH 0946/1400] feat: add a way to query the highest known block in a given tenure on any Stacks fork --- stackslib/src/chainstate/nakamoto/mod.rs | 25 +++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b007a3e7d6..78baa2578b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2615,7 +2615,7 @@ impl NakamotoChainState { Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) } - /// Get the highest block in the given tenure. + /// Get the highest block in the given tenure on a given fork. /// Only works on Nakamoto blocks. /// TODO: unit test pub fn get_highest_block_header_in_tenure( @@ -2631,6 +2631,29 @@ impl NakamotoChainState { Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) } + /// Get the highest block in a given tenure (identified by its consensus hash). + /// Ties will be broken by timestamp. + /// + /// Used to verify that a signer-submitted block proposal builds atop the highest known block + /// in the given tenure, regardless of which fork it's on. + /// + /// DO NOT USE IN CONSENSUS CODE. Different nodes can have different blocks for the same + /// tenure. + pub fn get_highest_known_block_header_in_tenure( + db: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + // see if we have a nakamoto block in this tenure + let qry = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height DESC, timestamp DESC LIMIT 1"; + let args = params![consensus_hash]; + if let Some(header) = query_row(db, qry, args)? { + return Ok(Some(header)); + } + + // see if this is an epoch2 header. If it exists, then there will only be one. + Ok(StacksChainState::get_stacks_block_header_info_by_consensus_hash(db, consensus_hash)?) + } + /// Get the VRF proof for a Stacks block. /// For Nakamoto blocks, this is the VRF proof contained in the coinbase of the tenure-start /// block of the given tenure identified by the consensus hash. From 22c42ab075f6cad2645f5c1a7f401f1848d68d4b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:39:44 -0400 Subject: [PATCH 0947/1400] feat: add additional checks to the postblock endpoint to verify that the proposed block builds on a canonical tenure, and it builds on the highest known block in that tenure --- stackslib/src/net/api/postblock_proposal.rs | 146 +++++++++++++++++++- 1 file changed, 145 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6112ea0fae..9c5ab712c3 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -74,7 +74,9 @@ define_u8_enum![ValidateRejectCode { BadTransaction = 1, InvalidBlock = 2, ChainstateError = 3, - UnknownParent = 4 + UnknownParent = 4, + NonCanonicalTenure = 5, + NoSuchTenure = 6 }]; impl TryFrom for ValidateRejectCode { @@ -194,6 +196,136 @@ impl NakamotoBlockProposal { }) } + /// Check to see if a block builds atop the highest block in a given tenure. + /// That is: + /// - its parent must exist, and + /// - its parent must be as high as the highest block in the given tenure. + pub(crate) fn check_block_builds_on_highest_block_in_tenure( + chainstate: &StacksChainState, + tenure_id: &ConsensusHash, + parent_block_id: &StacksBlockId, + ) -> Result<(), BlockValidateRejectReason> { + let Some(highest_header) = NakamotoChainState::get_highest_known_block_header_in_tenure( + chainstate.db(), + tenure_id, + ) + .map_err(|e| BlockValidateRejectReason { + reason_code: ValidateRejectCode::ChainstateError, + reason: format!("Failed to query highest block in tenure ID: {:?}", &e), + })? + else { + warn!( + "Rejected block proposal"; + "reason" => "Block is not a tenure-start block, and has an unrecognized tenure consensus hash", + "consensus_hash" => %tenure_id, + ); + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::NoSuchTenure, + reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".into(), + }); + }; + let Some(parent_header) = + NakamotoChainState::get_block_header(chainstate.db(), parent_block_id).map_err( + |e| BlockValidateRejectReason { + reason_code: ValidateRejectCode::ChainstateError, + reason: format!("Failed to query block header by block ID: {:?}", &e), + }, + )? + else { + warn!( + "Rejected block proposal"; + "reason" => "Block has no parent", + "parent_block_id" => %parent_block_id + ); + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::UnknownParent, + reason: "Block has no parent".into(), + }); + }; + if parent_header.anchored_header.height() != highest_header.anchored_header.height() { + warn!( + "Rejected block proposal"; + "reason" => "Block's parent is not the highest block in this tenure", + "consensus_hash" => %tenure_id, + "parent_header.height" => parent_header.anchored_header.height(), + "highest_header.height" => highest_header.anchored_header.height(), + ); + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Block is not higher than the highest block in its tenure".into(), + }); + } + Ok(()) + } + + /// Verify that the block we received builds upon a valid tenure. + /// Implemented as a static function to facilitate testing. + pub(crate) fn check_block_has_valid_tenure( + db_handle: &SortitionHandleConn, + tenure_id: &ConsensusHash, + ) -> Result<(), BlockValidateRejectReason> { + // Verify that the block's tenure is on the canonical sortition history + if !db_handle.has_consensus_hash(tenure_id)? { + warn!( + "Rejected block proposal"; + "reason" => "Block's tenure consensus hash is not on the canonical Bitcoin fork", + "consensus_hash" => %tenure_id, + ); + return Err(BlockValidateRejectReason { + reason_code: ValidateRejectCode::NonCanonicalTenure, + reason: "Tenure consensus hash is not on the canonical Bitcoin fork".into(), + }); + } + Ok(()) + } + + /// Verify that the block we received builds on the highest block in its tenure. + /// * For tenure-start blocks, the parent must be as high as the highest block in the parent + /// block's tenure. + /// * For all other blocks, the parent must be as high as the highest block in the tenure. + /// + /// Implemented as a static function to facilitate testing + pub(crate) fn check_block_has_valid_parent( + chainstate: &StacksChainState, + block: &NakamotoBlock, + ) -> Result<(), BlockValidateRejectReason> { + let is_tenure_start = + block + .is_wellformed_tenure_start_block() + .map_err(|_| BlockValidateRejectReason { + reason_code: ValidateRejectCode::InvalidBlock, + reason: "Block is not well-formed".into(), + })?; + + if !is_tenure_start { + // this is a well-formed block that is not the start of a tenure, so it must build + // atop an existing block in its tenure. + Self::check_block_builds_on_highest_block_in_tenure( + chainstate, + &block.header.consensus_hash, + &block.header.parent_block_id, + )?; + } else { + // this is a tenure-start block, so it must build atop a parent which has the + // highest height in the *previous* tenure. + let parent_header = NakamotoChainState::get_block_header( + chainstate.db(), + &block.header.parent_block_id, + )? + .ok_or_else(|| BlockValidateRejectReason { + reason_code: ValidateRejectCode::UnknownParent, + reason: "No parent block".into(), + })?; + + Self::check_block_builds_on_highest_block_in_tenure( + chainstate, + &parent_header.consensus_hash, + &block.header.parent_block_id, + )?; + } + Ok(()) + } + /// Test this block proposal against the current chain state and /// either accept or reject the proposal /// @@ -232,6 +364,18 @@ impl NakamotoBlockProposal { let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let burn_dbconn: SortitionHandleConn = sortdb.index_handle(&sort_tip); let mut db_handle = sortdb.index_handle(&sort_tip); + + // (For the signer) + // Verify that the block's tenure is on the canonical sortition history + Self::check_block_has_valid_tenure(&db_handle, &self.block.header.consensus_hash)?; + + // (For the signer) + // Verify that this block's parent is the highest such block we can build off of + Self::check_block_has_valid_parent(chainstate, &self.block)?; + + // get the burnchain tokens spent for this block. There must be a record of this (i.e. + // there must be a block-commit for this), or otherwise this block doesn't correspond to + // any burnchain chainstate. let expected_burn_opt = NakamotoChainState::get_expected_burns(&mut db_handle, chainstate.db(), &self.block)?; if expected_burn_opt.is_none() { From cd8d8e841686f3e4ea49321b20f454988145c955 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:40:42 -0400 Subject: [PATCH 0948/1400] fix: get postblock proposal test to pass --- .../src/net/api/tests/postblock_proposal.rs | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 391afc949f..4f553efd21 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -229,16 +229,21 @@ fn test_try_make_response() { let tip = SortitionDB::get_canonical_burn_chain_tip(&rpc_test.peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash( + rpc_test.peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bhh); + let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); let mut block = { let chainstate = rpc_test.peer_1.chainstate(); - let parent_stacks_header = NakamotoChainState::get_block_header( - chainstate.db(), - &tip.get_canonical_stacks_block_id(), - ) - .unwrap() - .unwrap(); + let parent_stacks_header = + NakamotoChainState::get_block_header(chainstate.db(), &stacks_tip) + .unwrap() + .unwrap(); let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); @@ -262,7 +267,7 @@ fn test_try_make_response() { let addr = auth.origin().address_testnet(); let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); tx.chain_id = 0x80000000; - tx.auth.set_origin_nonce(34); + tx.auth.set_origin_nonce(36); tx.set_post_condition_mode(TransactionPostConditionMode::Allow); tx.set_tx_fee(300); let mut tx_signer = StacksTransactionSigner::new(&tx); @@ -271,8 +276,8 @@ fn test_try_make_response() { let mut builder = NakamotoBlockBuilder::new( &parent_stacks_header, - &tip.consensus_hash, - 25000, + &parent_stacks_header.consensus_hash, + 26000, None, None, 8, From 23a4d2ec482e5168663695080689dbcba913ef39 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:41:07 -0400 Subject: [PATCH 0949/1400] chore: remove wait_on_signers timeout option, and remove the integration test for verifying that it works --- testnet/stacks-node/src/config.rs | 9 -- testnet/stacks-node/src/tests/signer/mod.rs | 10 +- testnet/stacks-node/src/tests/signer/v0.rs | 108 +------------------- testnet/stacks-node/src/tests/signer/v1.rs | 14 +-- 4 files changed, 11 insertions(+), 130 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e4751a1010..f1c3775056 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2363,8 +2363,6 @@ pub struct MinerConfig { /// When selecting the "nicest" tip, do not consider tips that are more than this many blocks /// behind the highest tip. pub max_reorg_depth: u64, - /// Amount of time while mining in nakamoto to wait for signers to respond to a proposed block - pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined @@ -2398,8 +2396,6 @@ impl Default for MinerConfig { txs_to_consider: MemPoolWalkTxTypes::all(), filter_origins: HashSet::new(), max_reorg_depth: 3, - // TODO: update to a sane value based on stackerdb benchmarking - wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, } @@ -2750,7 +2746,6 @@ pub struct MinerConfigFile { pub txs_to_consider: Option, pub filter_origins: Option, pub max_reorg_depth: Option, - pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, pub min_time_between_blocks_ms: Option, } @@ -2857,10 +2852,6 @@ impl MinerConfigFile { max_reorg_depth: self .max_reorg_depth .unwrap_or(miner_default_config.max_reorg_depth), - wait_on_signers: self - .wait_on_signers_ms - .map(Duration::from_millis) - .unwrap_or(miner_default_config.wait_on_signers), pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 42407a1a76..0b38a79234 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -114,15 +114,10 @@ pub struct SignerTest { } impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { - fn new( - num_signers: usize, - initial_balances: Vec<(StacksAddress, u64)>, - wait_on_signers: Option, - ) -> Self { + fn new(num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>) -> Self { Self::new_with_config_modifications( num_signers, initial_balances, - wait_on_signers, |_| {}, |_| {}, None, @@ -136,7 +131,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, - wait_on_signers: Option, mut signer_config_modifier: F, mut node_config_modifier: G, btc_miner_pubkeys: Option>, @@ -167,8 +161,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); signer_test.boot_to_epoch_3(); let short_timeout = Duration::from_secs(30); @@ -546,7 +546,7 @@ fn miner_gather_signatures() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); @@ -609,7 +609,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(200); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -793,7 +793,6 @@ fn reloads_signer_set_in() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |_config| {}, |_| {}, None, @@ -917,7 +916,6 @@ fn forked_tenure_testing( let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; @@ -1229,7 +1227,6 @@ fn bitcoind_forking_test() { let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), ); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -1395,7 +1392,6 @@ fn multiple_miners() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1657,7 +1653,6 @@ fn miner_forking() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1914,7 +1909,6 @@ fn end_of_tenure() { let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(500)), ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); @@ -2060,96 +2054,6 @@ fn end_of_tenure() { signer_test.shutdown(); } -#[test] -#[ignore] -/// This test checks that the miner will retry when signature collection times out. -fn retry_on_timeout() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), - ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - - signer_test.boot_to_epoch_3(); - - signer_test.mine_nakamoto_block(Duration::from_secs(30)); - - // Stall block validation so the signers will not be able to sign. - TEST_VALIDATE_STALL.lock().unwrap().replace(true); - - let proposals_before = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - - // submit a tx so that the miner will mine a block - let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - submit_tx(&http_origin, &transfer_tx); - - info!("Submitted transfer tx and waiting for block proposal"); - loop { - let blocks_proposed = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - if blocks_proposed > proposals_before { - break; - } - std::thread::sleep(Duration::from_millis(100)); - } - - info!("Block proposed, verifying that it is not processed"); - - // Wait 10 seconds to be sure that the timeout has occurred - std::thread::sleep(Duration::from_secs(10)); - assert_eq!( - signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst), - blocks_before - ); - - // Disable the stall and wait for the block to be processed on retry - info!("Disable the stall and wait for the block to be processed"); - TEST_VALIDATE_STALL.lock().unwrap().replace(false); - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - if blocks_mined > blocks_before { - break; - } - std::thread::sleep(Duration::from_millis(100)); - } - - signer_test.shutdown(); -} - #[test] #[ignore] /// This test checks that the signers will broadcast a block once they receive enough signatures. @@ -2173,7 +2077,6 @@ fn signers_broadcast_signed_blocks() { let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -2272,7 +2175,6 @@ fn empty_sortition() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), |config| { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; @@ -2446,7 +2348,6 @@ fn mock_sign_epoch_25() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(5)), |_| {}, |node_config| { node_config.miner.pre_nakamoto_mock_signing = true; @@ -2644,7 +2545,6 @@ fn signer_set_rollover() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, initial_balances, - None, |_| {}, |naka_conf| { for toml in new_signer_configs.clone() { @@ -2875,7 +2775,6 @@ fn min_gap_between_blocks() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], - Some(Duration::from_secs(15)), |_config| {}, |config| { config.miner.min_time_between_blocks_ms = time_between_blocks_ms; @@ -2994,7 +2893,6 @@ fn duplicate_signers() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![], - None, |_| {}, |_| {}, None, diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs index 6e9ed71f36..3d6c934208 100644 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ b/testnet/stacks-node/src/tests/signer/v1.rs @@ -486,7 +486,7 @@ fn dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10, vec![], None); + let mut signer_test = SignerTest::new(10, vec![]); info!("Boot to epoch 3.0 reward calculation..."); boot_to_epoch_3_reward_set( &signer_test.running_nodes.conf, @@ -596,7 +596,7 @@ fn sign_request_rejected() { block2.header.tx_merkle_root = tx_merkle_root2; let timeout = Duration::from_secs(200); - let mut signer_test: SignerTest = SignerTest::new(10, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(10, vec![]); let _key = signer_test.boot_to_epoch_3(timeout); info!("------------------------- Test Sign -------------------------"); @@ -691,7 +691,7 @@ fn delayed_dkg() { info!("------------------------- Test Setup -------------------------"); let timeout = Duration::from_secs(200); let num_signers = 3; - let mut signer_test = SignerTest::new(num_signers, vec![], None); + let mut signer_test = SignerTest::new(num_signers, vec![]); boot_to_epoch_3_reward_set_calculation_boundary( &signer_test.running_nodes.conf, &signer_test.running_nodes.blocks_processed, @@ -884,7 +884,7 @@ fn block_proposal() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); let short_timeout = Duration::from_secs(30); @@ -945,7 +945,7 @@ fn mine_2_nakamoto_reward_cycles() { info!("------------------------- Test Setup -------------------------"); let nmb_reward_cycles = 2; - let mut signer_test: SignerTest = SignerTest::new(5, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(5, vec![]); let timeout = Duration::from_secs(200); let first_dkg = signer_test.boot_to_epoch_3(timeout); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -1020,7 +1020,7 @@ fn filter_bad_transactions() { info!("------------------------- Test Setup -------------------------"); // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test: SignerTest = SignerTest::new(5, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(5, vec![]); let timeout = Duration::from_secs(200); let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); let next_signers_dkg = signer_test @@ -1108,7 +1108,7 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Setup -------------------------"); let num_signers = 3; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![], None); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(30); From 25e14a83d5b4a2245e429cff35f7894c843da229 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 11:41:33 -0400 Subject: [PATCH 0950/1400] fix: the miner never times out waiting for signers --- .../src/nakamoto_node/sign_coordinator.rs | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6810afbb6b..d2c4f2b390 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -15,7 +15,7 @@ use std::collections::BTreeMap; use std::sync::mpsc::Receiver; -use std::time::{Duration, Instant}; +use std::time::Duration; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; @@ -73,7 +73,6 @@ pub struct SignCoordinator { wsts_public_keys: PublicKeys, is_mainnet: bool, miners_session: StackerDBSession, - signing_round_timeout: Duration, signer_entries: HashMap, weight_threshold: u32, total_weight: u32, @@ -302,7 +301,6 @@ impl SignCoordinator { wsts_public_keys, is_mainnet, miners_session, - signing_round_timeout: config.miner.wait_on_signers.clone(), next_signer_bitvec, signer_entries: signer_public_keys, weight_threshold: threshold, @@ -324,7 +322,6 @@ impl SignCoordinator { wsts_public_keys, is_mainnet, miners_session, - signing_round_timeout: config.miner.wait_on_signers.clone(), next_signer_bitvec, signer_entries: signer_public_keys, weight_threshold: threshold, @@ -485,8 +482,7 @@ impl SignCoordinator { )); }; - let start_ts = Instant::now(); - while start_ts.elapsed() <= self.signing_round_timeout { + loop { let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { Ok(event) => event, Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { @@ -630,10 +626,6 @@ impl SignCoordinator { }; } } - - Err(NakamotoNodeError::SignerSignatureError( - "Timed out waiting for group signature".into(), - )) } /// Do we ignore signer signatures? @@ -736,8 +728,7 @@ impl SignCoordinator { "threshold" => self.weight_threshold, ); - let start_ts = Instant::now(); - while start_ts.elapsed() <= self.signing_round_timeout { + loop { // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold if let Ok(Some((stored_block, _sz))) = chain_state @@ -947,9 +938,5 @@ impl SignCoordinator { return Ok(gathered_signatures.values().cloned().collect()); } } - - Err(NakamotoNodeError::SignerSignatureError( - "Timed out waiting for group signature".into(), - )) } } From 49585ebea8cb2fc031a3cd029a45d09f7e4fdad0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Sep 2024 12:14:17 -0400 Subject: [PATCH 0951/1400] test: update `multiple_miners` assertiions --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ca6a12a8e..5d067a5964 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1563,7 +1563,7 @@ fn multiple_miners() { assert_eq!(peer_1_height, peer_2_height); assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); assert_eq!( - btc_blocks_mined, + btc_blocks_mined + 1, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); From 603bb630f85d9d23c917ac2188ce2e529aaf013d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Sep 2024 12:17:41 -0400 Subject: [PATCH 0952/1400] test: better update to `multiple_miners` --- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5d067a5964..a0a5082b28 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1490,7 +1490,7 @@ fn multiple_miners() { let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); - let mut btc_blocks_mined = 0; + let mut btc_blocks_mined = 1; let mut miner_1_tenures = 0; let mut miner_2_tenures = 0; while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { @@ -1563,7 +1563,7 @@ fn multiple_miners() { assert_eq!(peer_1_height, peer_2_height); assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); assert_eq!( - btc_blocks_mined + 1, + btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); From af802a93a2dfb7ea7e7de7939ebec3b7f1c5c0df Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 4 Sep 2024 12:48:55 -0400 Subject: [PATCH 0953/1400] WIP: Add block state and aggregate signature rejections Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 132 ++++-- stacks-signer/src/chainstate.rs | 117 ++++-- stacks-signer/src/client/stackerdb.rs | 4 + stacks-signer/src/lib.rs | 2 +- stacks-signer/src/runloop.rs | 4 +- stacks-signer/src/signerdb.rs | 299 ++++++++++++-- stacks-signer/src/tests/chainstate.rs | 2 +- stacks-signer/src/v0/signer.rs | 387 +++++++++++++----- stacks-signer/src/v1/signer.rs | 4 +- .../src/tests/nakamoto_integrations.rs | 4 +- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 11 files changed, 739 insertions(+), 217 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 5f7b82a937..b82ee3bab2 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -42,6 +42,7 @@ use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::util_lib::signed_structured_data::{ make_structured_data_domain, structured_data_message_hash, }; +use clarity::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use clarity::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksPrivateKey, StacksPublicKey, }; @@ -615,8 +616,8 @@ impl std::fmt::Display for BlockResponse { BlockResponse::Rejected(r) => { write!( f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}", - r.reason_code, r.reason, r.signer_signature_hash + "BlockRejected: signer_sighash = {}, code = {}, reason = {}, signature = {}", + r.reason_code, r.reason, r.signer_signature_hash, r.signature ) } } @@ -629,9 +630,14 @@ impl BlockResponse { Self::Accepted((hash, sig)) } - /// Create a new rejected BlockResponse for the provided block signer signature hash and rejection code - pub fn rejected(hash: Sha512Trunc256Sum, reject_code: RejectCode) -> Self { - Self::Rejected(BlockRejection::new(hash, reject_code)) + /// Create a new rejected BlockResponse for the provided block signer signature hash and rejection code and sign it with the provided private key + pub fn rejected( + hash: Sha512Trunc256Sum, + reject_code: RejectCode, + private_key: &StacksPrivateKey, + mainnet: bool, + ) -> Self { + Self::Rejected(BlockRejection::new(hash, reject_code, private_key, mainnet)) } } @@ -677,16 +683,94 @@ pub struct BlockRejection { pub reason_code: RejectCode, /// The signer signature hash of the block that was rejected pub signer_signature_hash: Sha512Trunc256Sum, + /// The signer's signature across the rejection + pub signature: MessageSignature, + /// The chain id + pub chain_id: u32, } impl BlockRejection { /// Create a new BlockRejection for the provided block and reason code - pub fn new(signer_signature_hash: Sha512Trunc256Sum, reason_code: RejectCode) -> Self { - Self { + pub fn new( + signer_signature_hash: Sha512Trunc256Sum, + reason_code: RejectCode, + private_key: &StacksPrivateKey, + mainnet: bool, + ) -> Self { + let chain_id = if mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + }; + let mut rejection = Self { reason: reason_code.to_string(), reason_code, signer_signature_hash, + signature: MessageSignature::empty(), + chain_id, + }; + rejection + .sign(private_key) + .expect("Failed to sign BlockRejection"); + rejection + } + + /// Create a new BlockRejection from a BlockValidateRejection + pub fn from_validate_rejection( + reject: BlockValidateReject, + private_key: &StacksPrivateKey, + mainnet: bool, + ) -> Self { + let chain_id = if mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + }; + let mut rejection = Self { + reason: reject.reason, + reason_code: RejectCode::ValidationFailed(reject.reason_code), + signer_signature_hash: reject.signer_signature_hash, + chain_id, + signature: MessageSignature::empty(), + }; + rejection + .sign(private_key) + .expect("Failed to sign BlockRejection"); + rejection + } + + /// The signature hash for the block rejection + pub fn hash(&self) -> Sha256Sum { + let domain_tuple = make_structured_data_domain("block-rejection", "1.0.0", self.chain_id); + let data = Value::buff_from(self.signer_signature_hash.as_bytes().into()).unwrap(); + structured_data_message_hash(data, domain_tuple) + } + + /// Sign the block rejection and set the internal signature field + fn sign(&mut self, private_key: &StacksPrivateKey) -> Result<(), String> { + let signature_hash = self.hash(); + self.signature = private_key.sign(signature_hash.as_bytes())?; + Ok(()) + } + + /// Verify the rejection's signature against the provided signer public key + pub fn verify(&self, public_key: &StacksPublicKey) -> Result { + if self.signature == MessageSignature::empty() { + return Ok(false); } + let signature_hash = self.hash(); + public_key + .verify(&signature_hash.0, &self.signature) + .map_err(|e| e.to_string()) + } + + /// Recover the public key from the rejection signature + pub fn recover_public_key(&self) -> Result { + if self.signature == MessageSignature::empty() { + return Err("No signature to recover public key from"); + } + let signature_hash = self.hash(); + StacksPublicKey::recover_to_pubkey(signature_hash.as_bytes(), &self.signature) } } @@ -695,6 +779,8 @@ impl StacksMessageCodec for BlockRejection { write_next(fd, &self.reason.as_bytes().to_vec())?; write_next(fd, &self.reason_code)?; write_next(fd, &self.signer_signature_hash)?; + write_next(fd, &self.chain_id)?; + write_next(fd, &self.signature)?; Ok(()) } @@ -705,24 +791,18 @@ impl StacksMessageCodec for BlockRejection { })?; let reason_code = read_next::(fd)?; let signer_signature_hash = read_next::(fd)?; + let chain_id = read_next::(fd)?; + let signature = read_next::(fd)?; Ok(Self { reason, reason_code, signer_signature_hash, + chain_id, + signature, }) } } -impl From for BlockRejection { - fn from(reject: BlockValidateReject) -> Self { - Self { - reason: reject.reason, - reason_code: RejectCode::ValidationFailed(reject.reason_code), - signer_signature_hash: reject.signer_signature_hash, - } - } -} - impl StacksMessageCodec for RejectCode { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &(RejectCodeTypePrefix::from(self) as u8))?; @@ -792,12 +872,6 @@ impl From for SignerMessage { } } -impl From for BlockResponse { - fn from(rejection: BlockValidateReject) -> Self { - Self::Rejected(rejection.into()) - } -} - #[cfg(test)] mod test { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; @@ -851,14 +925,20 @@ mod test { let rejection = BlockRejection::new( Sha512Trunc256Sum([0u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + &StacksPrivateKey::new(), + thread_rng().next_u32() % 2 == 0, ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) .expect("Failed to deserialize BlockRejection"); assert_eq!(rejection, deserialized_rejection); - let rejection = - BlockRejection::new(Sha512Trunc256Sum([1u8; 32]), RejectCode::ConnectivityIssues); + let rejection = BlockRejection::new( + Sha512Trunc256Sum([1u8; 32]), + RejectCode::ConnectivityIssues, + &StacksPrivateKey::new(), + thread_rng().next_u32() % 2 == 0, + ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) .expect("Failed to deserialize BlockRejection"); @@ -877,6 +957,8 @@ mod test { let response = BlockResponse::Rejected(BlockRejection::new( Sha512Trunc256Sum([1u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), + &StacksPrivateKey::new(), + thread_rng().next_u32() % 2 == 0, )); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index c35ceb67e0..adfea4900d 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -280,42 +280,19 @@ impl SortitionsView { }; if let Some(tenure_change) = block.get_tenure_change_tx_payload() { - // in tenure changes, we need to check: - // (1) if the tenure change confirms the expected parent block (i.e., - // the last block we signed in the parent tenure) - // (2) if the parent tenure was a valid choice - let confirms_expected_parent = - Self::check_tenure_change_block_confirmation(tenure_change, block, signer_db)?; - if !confirms_expected_parent { - return Ok(false); - } - // now, we have to check if the parent tenure was a valid choice. - let is_valid_parent_tenure = Self::check_parent_tenure_choice( - proposed_by.state(), + if !self.validate_tenure_change_payload( + &proposed_by, + tenure_change, block, signer_db, client, - &self.config.first_proposal_burn_block_timing, - )?; - if !is_valid_parent_tenure { - return Ok(false); - } - let last_in_tenure = signer_db - .get_last_signed_block_in_tenure(&block.header.consensus_hash) - .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; - if let Some(last_in_tenure) = last_in_tenure { - warn!( - "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; - "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), - ); + )? { return Ok(false); } } else { // check if the new block confirms the last block in the current tenure let confirms_latest_in_tenure = - Self::confirms_known_blocks_in(block, &block.header.consensus_hash, signer_db)?; + Self::confirms_latest_block_in_same_tenure(block, signer_db)?; if !confirms_latest_in_tenure { return Ok(false); } @@ -453,32 +430,94 @@ impl SortitionsView { Ok(true) } - fn check_tenure_change_block_confirmation( + /// Check if the tenure change block confirms the expected parent block (i.e., the last globally accepted block in the parent tenure) + fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, signer_db: &SignerDb, ) -> Result { - // in tenure changes, we need to check: - // (1) if the tenure change confirms the expected parent block (i.e., - // the last block we signed in the parent tenure) - // (2) if the parent tenure was a valid choice - Self::confirms_known_blocks_in(block, &tenure_change.prev_tenure_consensus_hash, signer_db) + let Some(last_globally_accepted_block) = signer_db + .get_last_globally_accepted_block(&tenure_change.prev_tenure_consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))? + else { + info!( + "Have no globally accepted blocks in the parent tenure, assuming block confirmation is correct"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "tenure" => %block.header.consensus_hash, + ); + return Ok(true); + }; + if block.header.chain_length > last_globally_accepted_block.block.header.chain_length { + Ok(true) + } else { + warn!( + "Miner's block proposal does not confirm as many blocks as we expect"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_chain_length" => block.header.chain_length, + "expected_at_least" => last_globally_accepted_block.block.header.chain_length + 1, + ); + Ok(false) + } + } + + /// in tenure changes, we need to check: + /// (1) if the tenure change confirms the expected parent block (i.e., + /// the last globally accepted block in the parent tenure) + /// (2) if the parent tenure was a valid choice + fn validate_tenure_change_payload( + &self, + proposed_by: &ProposedBy, + tenure_change: &TenureChangePayload, + block: &NakamotoBlock, + signer_db: &SignerDb, + client: &StacksClient, + ) -> Result { + // Ensure that the tenure change block confirms the expected parent block + let confirms_expected_parent = + Self::check_tenure_change_confirms_parent(tenure_change, block, signer_db)?; + if !confirms_expected_parent { + return Ok(false); + } + // now, we have to check if the parent tenure was a valid choice. + let is_valid_parent_tenure = Self::check_parent_tenure_choice( + proposed_by.state(), + block, + signer_db, + client, + &self.config.first_proposal_burn_block_timing, + )?; + if !is_valid_parent_tenure { + return Ok(false); + } + let last_in_tenure = signer_db + .get_last_globally_accepted_block(&block.header.consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + if let Some(last_in_tenure) = last_in_tenure { + warn!( + "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), + ); + return Ok(false); + } + Ok(true) } - fn confirms_known_blocks_in( + fn confirms_latest_block_in_same_tenure( block: &NakamotoBlock, - tenure: &ConsensusHash, signer_db: &SignerDb, ) -> Result { let Some(last_known_block) = signer_db - .get_last_signed_block_in_tenure(tenure) + .get_last_accepted_block(&block.header.consensus_hash) .map_err(|e| ClientError::InvalidResponse(e.to_string()))? else { info!( - "Have not signed off on any blocks in the parent tenure, assuming block confirmation is correct"; + "Have no accepted blocks in the tenure, assuming block confirmation is correct"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "tenure" => %tenure, ); return Ok(true); }; diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index de77ccbd72..f2b574ef4f 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -234,7 +234,9 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; + use clarity::util::secp256k1::MessageSignature; use libsigner::v0::messages::{BlockRejection, BlockResponse, RejectCode, SignerMessage}; + use rand::{thread_rng, RngCore}; use super::*; use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; @@ -278,6 +280,8 @@ mod tests { reason: "Did not like it".into(), reason_code: RejectCode::RejectedInPriorRound, signer_signature_hash: block.header.signer_signature_hash(), + chain_id: thread_rng().next_u32(), + signature: MessageSignature::empty(), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index e16a70b607..c61ae39731 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -80,7 +80,7 @@ pub trait Signer: Debug + Display { command: Option, ); /// Check if the signer is in the middle of processing blocks - fn has_pending_blocks(&self) -> bool; + fn has_unprocessed_blocks(&self) -> bool; } /// A wrapper around the running signer type for the signer diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 9e1083047b..86d8458e30 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -466,7 +466,9 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo std::cmp::Ordering::Equal => { // We are the next reward cycle, so check if we were registered and have any pending blocks to process match signer { - ConfiguredSigner::RegisteredSigner(signer) => !signer.has_pending_blocks(), + ConfiguredSigner::RegisteredSigner(signer) => { + !signer.has_unprocessed_blocks() + } _ => true, } } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 2d2e9cc22a..98037d991a 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Display; use std::path::Path; use std::time::SystemTime; @@ -22,7 +23,7 @@ use blockstack_lib::util_lib::db::{ query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, }; -use clarity::types::chainstate::BurnchainHeaderHash; +use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use clarity::util::get_epoch_time_secs; use libsigner::BlockProposal; use rusqlite::{ @@ -34,7 +35,7 @@ use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMes use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::{debug, error}; +use stacks_common::{debug, define_u8_enum, error}; use wsts::net::NonceRequest; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -113,6 +114,49 @@ impl ExtraBlockInfo { } } +define_u8_enum!( +/// Block state relative to the signer's view of the stacks blockchain +BlockState { + /// The block has not yet been processed by the signer + Unprocessed = 0, + /// The block is accepted by the signer but a threshold of signers has not yet signed it + LocallyAccepted = 1, + /// The block is rejected by the signer but a threshold of signers has not accepted/rejected it yet + LocallyRejected = 2, + /// A threshold number of signers have signed the block + GloballyAccepted = 3, + /// A threshold number of signers have rejected the block + GloballyRejected = 4 +}); + +impl TryFrom for BlockState { + type Error = String; + fn try_from(value: u8) -> Result { + let state = match value { + 0 => BlockState::Unprocessed, + 1 => BlockState::LocallyAccepted, + 2 => BlockState::LocallyRejected, + 3 => BlockState::GloballyAccepted, + 4 => BlockState::GloballyRejected, + _ => return Err("Invalid block state".into()), + }; + Ok(state) + } +} + +impl Display for BlockState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let state = match self { + BlockState::Unprocessed => "Unprocessed", + BlockState::LocallyAccepted => "LocallyAccepted", + BlockState::LocallyRejected => "LocallyRejected", + BlockState::GloballyAccepted => "GloballyAccepted", + BlockState::GloballyRejected => "GloballyRejected", + }; + write!(f, "{}", state) + } +} + /// Additional Info about a proposed block #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct BlockInfo { @@ -134,6 +178,8 @@ pub struct BlockInfo { pub signed_self: Option, /// Time at which the proposal was signed by a threshold in the signer set (epoch time in seconds) pub signed_group: Option, + /// The block state relative to the signer's view of the stacks blockchain + pub state: BlockState, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, } @@ -151,6 +197,7 @@ impl From for BlockInfo { signed_self: None, signed_group: None, ext: ExtraBlockInfo::default(), + state: BlockState::Unprocessed, } } } @@ -163,18 +210,70 @@ impl BlockInfo { block_info } - /// Mark this block as valid, signed over, and record a timestamp in the block info if it wasn't + /// Mark this block as locally accepted, valid, signed over, and records a timestamp in the block info if it wasn't /// already set. - pub fn mark_signed_and_valid(&mut self) { + pub fn mark_locally_accepted(&mut self) -> Result<(), String> { self.valid = Some(true); self.signed_over = true; self.signed_self.get_or_insert(get_epoch_time_secs()); + self.move_to(BlockState::LocallyAccepted) + } + + /// Mark this block as globally accepted, valid, signed over, and records a timestamp in the block info if it wasn't + /// already set. + pub fn mark_globally_accepted(&mut self) -> Result<(), String> { + self.valid = Some(true); + self.signed_over = true; + self.signed_group.get_or_insert(get_epoch_time_secs()); + self.move_to(BlockState::GloballyAccepted) + } + + /// Mark the block as locally rejected and invalid + pub fn mark_locally_rejected(&mut self) -> Result<(), String> { + self.valid = Some(false); + self.move_to(BlockState::LocallyRejected) + } + + /// Mark the block as globally rejected and invalid + pub fn mark_globally_rejected(&mut self) -> Result<(), String> { + self.valid = Some(false); + self.move_to(BlockState::GloballyRejected) } /// Return the block's signer signature hash pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { self.block.header.signer_signature_hash() } + + /// Check if the block state transition is valid + fn check_state(&self, state: BlockState) -> bool { + let prev_state = &self.state; + match state { + BlockState::Unprocessed => { + matches!(prev_state, BlockState::Unprocessed) + } + BlockState::LocallyAccepted => { + matches!(prev_state, BlockState::Unprocessed) + } + BlockState::LocallyRejected => { + matches!(prev_state, BlockState::Unprocessed) + } + BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), + BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), + } + } + + /// Attempt to transition the block state + pub fn move_to(&mut self, state: BlockState) -> Result<(), String> { + if !self.check_state(state) { + return Err(format!( + "Invalid state transition from {} to {state}", + self.state + )); + } + self.state = state; + Ok(()) + } } /// This struct manages a SQLite database connection @@ -197,6 +296,19 @@ CREATE TABLE IF NOT EXISTS blocks ( PRIMARY KEY (reward_cycle, signer_signature_hash) ) STRICT"; +static CREATE_BLOCKS_TABLE_2: &str = " +CREATE TABLE IF NOT EXISTS blocks ( + reward_cycle INTEGER NOT NULL, + signer_signature_hash TEXT NOT NULL, + block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + broadcasted INTEGER, + stacks_height INTEGER NOT NULL, + burn_block_height INTEGER NOT NULL, + PRIMARY KEY (reward_cycle, signer_signature_hash) +) STRICT"; + static CREATE_INDEXES_1: &str = " CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (signed_over); CREATE INDEX IF NOT EXISTS blocks_consensus_hash ON blocks (consensus_hash); @@ -204,6 +316,14 @@ CREATE INDEX IF NOT EXISTS blocks_valid ON blocks ((json_extract(block_info, '$. CREATE INDEX IF NOT EXISTS burn_blocks_height ON burn_blocks (block_height); "; +static CREATE_INDEXES_2: &str = r#" +CREATE INDEX IF NOT EXISTS block_signatures_on_signer_signature_hash ON block_signatures(signer_signature_hash); +"#; + +static CREATE_INDEXES_3: &str = r#" +CREATE INDEX IF NOT EXISTS block_rejection_signer_addrs_on_block_signature_hash ON block_rejection_signer_addrs(signer_signature_hash); +"#; + static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -235,18 +355,11 @@ static DROP_SCHEMA_1: &str = " DROP TABLE IF EXISTS blocks; DROP TABLE IF EXISTS db_config;"; -static CREATE_BLOCKS_TABLE_2: &str = " -CREATE TABLE IF NOT EXISTS blocks ( - reward_cycle INTEGER NOT NULL, - signer_signature_hash TEXT NOT NULL, - block_info TEXT NOT NULL, - consensus_hash TEXT NOT NULL, - signed_over INTEGER NOT NULL, - broadcasted INTEGER, - stacks_height INTEGER NOT NULL, - burn_block_height INTEGER NOT NULL, - PRIMARY KEY (reward_cycle, signer_signature_hash) -) STRICT"; +static DROP_SCHEMA_2: &str = " + DROP TABLE IF EXISTS burn_blocks; + DROP TABLE IF EXISTS signer_states; + DROP TABLE IF EXISTS blocks; + DROP TABLE IF EXISTS db_config;"; static CREATE_BLOCK_SIGNATURES_TABLE: &str = r#" CREATE TABLE IF NOT EXISTS block_signatures ( @@ -260,9 +373,17 @@ CREATE TABLE IF NOT EXISTS block_signatures ( PRIMARY KEY (signature) ) STRICT;"#; -static CREATE_INDEXES_2: &str = r#" -CREATE INDEX IF NOT EXISTS block_signatures_on_signer_signature_hash ON block_signatures(signer_signature_hash); -"#; +static CREATE_BLOCK_REJECTION_SIGNER_ADDRS_TABLE: &str = r#" +CREATE TABLE IF NOT EXISTS block_rejection_signer_addrs ( + -- The block sighash commits to all of the stacks and burnchain state as of its parent, + -- as well as the tenure itself so there's no need to include the reward cycle. Just + -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, + -- and stacks forks. + signer_signature_hash TEXT NOT NULL, + -- the signer address that rejected the block + signer_addr TEXT NOT NULL, + PRIMARY KEY (signer_addr) +) STRICT;"#; static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, @@ -286,9 +407,23 @@ static SCHEMA_2: &[&str] = &[ "INSERT INTO db_config (version) VALUES (2);", ]; +static SCHEMA_3: &[&str] = &[ + DROP_SCHEMA_2, + CREATE_DB_CONFIG, + CREATE_BURN_STATE_TABLE, + CREATE_BLOCKS_TABLE_2, + CREATE_SIGNER_STATE_TABLE, + CREATE_BLOCK_SIGNATURES_TABLE, + CREATE_BLOCK_REJECTION_SIGNER_ADDRS_TABLE, + CREATE_INDEXES_1, + CREATE_INDEXES_2, + CREATE_INDEXES_3, + "INSERT INTO db_config (version) VALUES (3);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 2; + pub const SCHEMA_VERSION: u32 = 3; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -346,6 +481,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 2 to schema 3 + fn schema_3_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 3 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_3.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). @@ -356,7 +505,8 @@ impl SignerDb { match version { 0 => Self::schema_1_migration(&sql_tx)?, 1 => Self::schema_2_migration(&sql_tx)?, - 2 => break, + 2 => Self::schema_3_migration(&sql_tx)?, + 3 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -438,6 +588,34 @@ impl SignerDb { try_deserialize(result) } + /// Return the last accepted block in a tenure (identified by its consensus hash). + pub fn get_last_accepted_block( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; + let args = params![ + tenure, + &BlockState::GloballyAccepted.to_string(), + &BlockState::LocallyAccepted.to_string() + ]; + let result: Option = query_row(&self.db, query, args)?; + + try_deserialize(result) + } + + /// Return the last globally accepted block in a tenure (identified by its consensus hash). + pub fn get_last_globally_accepted_block( + &self, + tenure: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2 ORDER BY stacks_height DESC LIMIT 1"; + let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; + let result: Option = query_row(&self.db, query, args)?; + + try_deserialize(result) + } + /// Insert or replace a burn block into the database pub fn insert_burn_block( &mut self, @@ -491,7 +669,6 @@ impl SignerDb { .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, &hash)?; - debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, @@ -516,11 +693,17 @@ impl SignerDb { Ok(()) } - /// Determine if there are any pending blocks that have not yet been processed by checking the block_info.valid field - pub fn has_pending_blocks(&self, reward_cycle: u64) -> Result { - let query = "SELECT block_info FROM blocks WHERE reward_cycle = ? AND json_extract(block_info, '$.valid') IS NULL LIMIT 1"; - let result: Option = - query_row(&self.db, query, params!(&u64_to_sql(reward_cycle)?))?; + /// Determine if there are any unprocessed blocks + pub fn has_unprocessed_blocks(&self, reward_cycle: u64) -> Result { + let query = "SELECT block_info FROM blocks WHERE reward_cycle = ?1 AND json_extract(block_info, '$.state') = ?2 LIMIT 1"; + let result: Option = query_row( + &self.db, + query, + params!( + &u64_to_sql(reward_cycle)?, + &BlockState::Unprocessed.to_string() + ), + )?; Ok(result.is_some()) } @@ -559,6 +742,34 @@ impl SignerDb { .collect() } + /// Record an observed block rejection_signature + pub fn add_block_rejection_signer_addr( + &self, + block_sighash: &Sha512Trunc256Sum, + addr: &StacksAddress, + ) -> Result<(), DBError> { + let qry = "INSERT OR REPLACE INTO block_rejection_signer_addrs (signer_signature_hash, signer_addr) VALUES (?1, ?2);"; + let args = params![block_sighash, addr.to_string(),]; + + debug!("Inserting block rejection."; + "block_sighash" => %block_sighash, + "signer_address" => %addr); + + self.db.execute(qry, args)?; + Ok(()) + } + + /// Get all signer addresses that rejected the block + pub fn get_block_rejection_signer_addrs( + &self, + block_sighash: &Sha512Trunc256Sum, + ) -> Result, DBError> { + let qry = + "SELECT signer_addr FROM block_rejection_signer_addrs WHERE signer_signature_hash = ?1"; + let args = params![block_sighash]; + query_rows(&self.db, qry, args) + } + /// Mark a block as having been broadcasted pub fn set_block_broadcasted( &self, @@ -592,6 +803,18 @@ impl SignerDb { } Ok(u64::try_from(broadcasted).ok()) } + + /// Get the current state of a given block in the database + pub fn get_block_state( + &self, + reward_cycle: u64, + block_sighash: &Sha512Trunc256Sum, + ) -> Result, DBError> { + let qry = "SELECT json_extract(block_info, '$.state') FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2 LIMIT 1"; + let args = params![&u64_to_sql(reward_cycle)?, block_sighash]; + let state: Option = query_row(&self.db, qry, args)?; + try_deserialize(state) + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -759,7 +982,9 @@ mod tests { .unwrap() .is_none()); - block_info.mark_signed_and_valid(); + block_info + .mark_locally_accepted() + .expect("Failed to mark block as locally accepted"); db.insert_block(&block_info).unwrap(); let fetched_info = db @@ -824,7 +1049,7 @@ mod tests { } #[test] - fn test_has_pending_blocks() { + fn test_has_unprocessed_blocks() { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let (mut block_info_1, _block_proposal) = create_block_override(|b| { @@ -841,21 +1066,27 @@ mod tests { db.insert_block(&block_info_2) .expect("Unable to insert block into db"); - assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + assert!(db + .has_unprocessed_blocks(block_info_1.reward_cycle) + .unwrap()); - block_info_1.valid = Some(true); + block_info_1.state = BlockState::LocallyRejected; db.insert_block(&block_info_1) .expect("Unable to update block in db"); - assert!(db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + assert!(db + .has_unprocessed_blocks(block_info_1.reward_cycle) + .unwrap()); - block_info_2.valid = Some(true); + block_info_2.state = BlockState::LocallyAccepted; db.insert_block(&block_info_2) .expect("Unable to update block in db"); - assert!(!db.has_pending_blocks(block_info_1.reward_cycle).unwrap()); + assert!(!db + .has_unprocessed_blocks(block_info_1.reward_cycle) + .unwrap()); } #[test] diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index d0c7f1d9f3..1e12eeee5a 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -229,7 +229,7 @@ fn reorg_timing_testing( reward_cycle: 1, }; let mut block_info_1 = BlockInfo::from(block_proposal_1); - block_info_1.mark_signed_and_valid(); + block_info_1.mark_locally_accepted().unwrap(); signer_db.insert_block(&block_info_1).unwrap(); let sortition_time = SystemTime::UNIX_EPOCH diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53a288b7f5..3f99860ae2 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -16,14 +16,17 @@ use std::collections::{BTreeMap, HashMap}; use std::fmt::Debug; use std::sync::mpsc::Sender; -use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; -use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::net::api::postblock_proposal::{ + BlockValidateOk, BlockValidateReject, BlockValidateResponse, +}; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::v0::messages::{ - BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, SignerMessage, + BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, + SignerMessage, }; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; @@ -37,7 +40,7 @@ use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::runloop::{RunLoopCommand, SignerResult}; -use crate::signerdb::{BlockInfo, SignerDb}; +use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; /// The stacks signer registered for the reward cycle @@ -128,10 +131,7 @@ impl SignerTrait for Signer { let SignerMessage::BlockResponse(block_response) = message else { continue; }; - let BlockResponse::Accepted((block_hash, signature)) = block_response else { - continue; - }; - self.handle_block_signature(stacks_client, block_hash, signature); + self.handle_block_response(stacks_client, block_response); } } SignerEvent::MinerMessages(messages, miner_pubkey) => { @@ -217,9 +217,9 @@ impl SignerTrait for Signer { } } - fn has_pending_blocks(&self) -> bool { + fn has_unprocessed_blocks(&self) -> bool { self.signer_db - .has_pending_blocks(self.reward_cycle) + .has_unprocessed_blocks(self.reward_cycle) .unwrap_or_else(|e| { error!("{self}: Failed to check for pending blocks: {e:?}",); // Assume we have pending blocks to prevent premature cleanup @@ -300,6 +300,8 @@ impl Signer { BlockResponse::rejected( block_info.signer_signature_hash(), RejectCode::RejectedInPriorRound, + &self.private_key, + self.mainnet, ) }; Some(response) @@ -389,6 +391,8 @@ impl Signer { Some(BlockResponse::rejected( block_proposal.block.header.signer_signature_hash(), RejectCode::ConnectivityIssues, + &self.private_key, + self.mainnet, )) } // Block proposal is bad @@ -401,6 +405,8 @@ impl Signer { Some(BlockResponse::rejected( block_proposal.block.header.signer_signature_hash(), RejectCode::SortitionViewMismatch, + &self.private_key, + self.mainnet, )) } // Block proposal passed check, still don't know if valid @@ -415,6 +421,8 @@ impl Signer { Some(BlockResponse::rejected( block_proposal.block.header.signer_signature_hash(), RejectCode::NoSortitionView, + &self.private_key, + self.mainnet, )) }; @@ -448,6 +456,104 @@ impl Signer { .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } + /// Handle block response messages from a signer + fn handle_block_response( + &mut self, + stacks_client: &StacksClient, + block_response: &BlockResponse, + ) { + match block_response { + BlockResponse::Accepted((block_hash, signature)) => { + self.handle_block_signature(stacks_client, block_hash, signature); + } + BlockResponse::Rejected(block_rejection) => { + self.handle_block_rejection(block_rejection); + } + } + } + /// Handle the block validate ok response. Returns our block response if we have one + fn handle_block_validate_ok( + &mut self, + stacks_client: &StacksClient, + block_validate_ok: &BlockValidateOk, + ) -> Option { + crate::monitoring::increment_block_validation_responses(true); + let signer_signature_hash = block_validate_ok.signer_signature_hash; + // For mutability reasons, we need to take the block_info out of the map and add it back after processing + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + { + Ok(Some(block_info)) => block_info, + Ok(None) => { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return None; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}",); + return None; + } + }; + if let Err(e) = block_info.mark_locally_accepted() { + warn!("{self}: Failed to mark block as locally accepted: {e:?}",); + return None; + } + let signature = self + .private_key + .sign(&signer_signature_hash.0) + .expect("Failed to sign block"); + + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + // have to save the signature _after_ the block info + self.handle_block_signature( + stacks_client, + &block_info.signer_signature_hash(), + &signature, + ); + Some(BlockResponse::accepted(signer_signature_hash, signature)) + } + + /// Handle the block validate reject response. Returns our block response if we have one + fn handle_block_validate_reject( + &mut self, + block_validate_reject: &BlockValidateReject, + ) -> Option { + crate::monitoring::increment_block_validation_responses(false); + let signer_signature_hash = block_validate_reject.signer_signature_hash; + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signer_signature_hash) + { + Ok(Some(block_info)) => block_info, + Ok(None) => { + // We have not seen this block before. Why are we getting a response for it? + debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); + return None; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}"); + return None; + } + }; + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + return None; + } + let block_rejection = BlockRejection::from_validate_rejection( + block_validate_reject.clone(), + &self.private_key, + self.mainnet, + ); + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + self.handle_block_rejection(&block_rejection); + Some(BlockResponse::Rejected(block_rejection)) + } + /// Handle the block validate response returned from our prior calls to submit a block for validation fn handle_block_validate_response( &mut self, @@ -455,68 +561,20 @@ impl Signer { block_validate_response: &BlockValidateResponse, ) { info!("{self}: Received a block validate response: {block_validate_response:?}"); - let (response, block_info, signature_opt) = match block_validate_response { + let block_response = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { - crate::monitoring::increment_block_validation_responses(true); - let signer_signature_hash = block_validate_ok.signer_signature_hash; - // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}",); - return; - } - }; - block_info.mark_signed_and_valid(); - let signature = self - .private_key - .sign(&signer_signature_hash.0) - .expect("Failed to sign block"); - - ( - BlockResponse::accepted(signer_signature_hash, signature), - block_info, - Some(signature.clone()), - ) + self.handle_block_validate_ok(stacks_client, block_validate_ok) } BlockValidateResponse::Reject(block_validate_reject) => { - crate::monitoring::increment_block_validation_responses(false); - let signer_signature_hash = block_validate_reject.signer_signature_hash; - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}"); - return; - } - }; - block_info.valid = Some(false); - ( - BlockResponse::from(block_validate_reject.clone()), - block_info, - None, - ) + self.handle_block_validate_reject(block_validate_reject) } }; + let Some(response) = block_response else { + return; + }; // Submit a proposal response to the .signers contract for miners info!( "{self}: Broadcasting a block response to stacks node: {response:?}"; - "signer_sighash" => %block_info.signer_signature_hash(), ); match self .stackerdb @@ -530,18 +588,6 @@ impl Signer { warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } } - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - - if let Some(signature) = signature_opt { - // have to save the signature _after_ the block info - self.handle_block_signature( - stacks_client, - &block_info.signer_signature_hash(), - &signature, - ); - } } /// Compute the signing weight, given a list of signatures @@ -567,6 +613,99 @@ impl Signer { .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")) } + /// Handle an observed rejection from another signer + fn handle_block_rejection(&mut self, rejection: &BlockRejection) { + debug!("{self}: Received a block-reject signature: {rejection:?}"); + + let block_hash = &rejection.signer_signature_hash; + let signature = &rejection.signature; + + let mut block_info = match self.signer_db.block_lookup(self.reward_cycle, block_hash) { + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + debug!("{self}: Received block rejection for a block that is already marked as {}. Ignoring...", block_info.state); + return; + } + block_info + } + Ok(None) => { + debug!("{self}: Received block rejection for a block we have not seen before. Ignoring..."); + return; + } + Err(e) => { + warn!("{self}: Failed to load block state: {e:?}",); + return; + } + }; + + // recover public key + let Ok(public_key) = rejection.recover_public_key() else { + debug!("{self}: Received block rejection with an unrecovarable signature. Will not store."; + "block_hash" => %block_hash, + "signature" => %signature + ); + return; + }; + + let signer_address = StacksAddress::p2pkh(self.mainnet, &public_key); + + // authenticate the signature -- it must be signed by one of the stacking set + let is_valid_sig = self + .signer_addresses + .iter() + .find(|addr| { + // it only matters that the address hash bytes match + signer_address.bytes == addr.bytes + }) + .is_some(); + + if !is_valid_sig { + debug!("{self}: Receive block rejection with an invalid signature. Will not store."; + "block_hash" => %block_hash, + "signature" => %signature + ); + return; + } + + // signature is valid! store it + if let Err(e) = self + .signer_db + .add_block_rejection_signer_addr(block_hash, &signer_address) + { + warn!("{self}: Failed to save block rejection signature: {e:?}",); + } + + // do we have enough signatures to mark a block a globally rejected? + // i.e. is (set-size) - (threshold) + 1 reached. + let rejection_addrs = match self.signer_db.get_block_rejection_signer_addrs(block_hash) { + Ok(addrs) => addrs, + Err(e) => { + warn!("{self}: Failed to load block rejection addresses: {e:?}.",); + return; + } + }; + let total_reject_weight = self.compute_signature_signing_weight(rejection_addrs.iter()); + let total_weight = self.compute_signature_total_weight(); + + let min_weight = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight) + .unwrap_or_else(|_| { + panic!("{self}: Failed to compute threshold weight for {total_weight}") + }); + if total_reject_weight.saturating_add(min_weight) <= total_weight { + // Not enough rejection signatures to make a decision + return; + } + debug!("{self}: {total_reject_weight}/{total_weight} signers voteed to reject the block {block_hash}"); + if let Err(e) = block_info.mark_globally_rejected() { + warn!("{self}: Failed to mark block as globally rejected: {e:?}",); + } + if let Err(e) = self.signer_db.insert_block(&block_info) { + warn!("{self}: Failed to update block state: {e:?}",); + } + } + /// Handle an observed signature from another signer fn handle_block_signature( &mut self, @@ -574,26 +713,27 @@ impl Signer { block_hash: &Sha512Trunc256Sum, signature: &MessageSignature, ) { - if !self.broadcast_signed_blocks { - debug!("{self}: Will ignore block-accept signature, since configured not to broadcast signed blocks"); - return; - } - debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); - // have we broadcasted before? - if let Some(ts) = self + // Have we already processed this block? + match self .signer_db - .get_block_broadcasted(self.reward_cycle, block_hash) - .unwrap_or_else(|_| { - panic!("{self}: failed to determine if block {block_hash} was broadcasted") - }) + .get_block_state(self.reward_cycle, block_hash) { - debug!( - "{self}: have already broadcasted block {} at {}, so will not re-attempt", - block_hash, ts - ); - return; + Ok(Some(state)) => { + if state == BlockState::GloballyAccepted || state == BlockState::GloballyRejected { + debug!("{self}: Received block signature for a block that is already marked as {}. Ignoring...", state); + return; + } + } + Ok(None) => { + debug!("{self}: Received block signature for a block we have not seen before. Ignoring..."); + return; + } + Err(e) => { + warn!("{self}: Failed to load block state: {e:?}",); + return; + } } // recover public key @@ -611,7 +751,7 @@ impl Signer { .signer_addresses .iter() .find(|addr| { - let stacker_address = StacksAddress::p2pkh(true, &public_key); + let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); // it only matters that the address hash bytes match stacker_address.bytes == addr.bytes @@ -676,9 +816,11 @@ impl Signer { warn!("{self}: No such block {block_hash}"); return; }; - - // record time at which we reached the threshold - block_info.signed_group = Some(get_epoch_time_secs()); + // move block to globally accepted state. If this is not possible, we have a bug in our block handling logic. + if let Err(e) = block_info.mark_globally_accepted() { + // Do not abort as we should still try to store the block signature threshold + warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + } let _ = self.signer_db.insert_block(&block_info).map_err(|e| { warn!( "Failed to set group threshold signature timestamp for {}: {:?}", @@ -687,6 +829,25 @@ impl Signer { e }); + if self.broadcast_signed_blocks { + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); + } else { + debug!( + "{self}: Not broadcasting signed block {block_hash} since broadcast_signed_blocks is false"; + "stacks_block_id" => %block_info.block.block_id(), + "parent_block_id" => %block_info.block.header.parent_block_id, + "burnchain_consensus_hash" => %block_info.block.header.consensus_hash + ); + } + } + + fn broadcast_signed_block( + &self, + stacks_client: &StacksClient, + mut block: NakamotoBlock, + addrs_to_sigs: &HashMap, + ) { + let block_hash = block.header.signer_signature_hash(); // collect signatures for the block let signatures: Vec<_> = self .signer_addresses @@ -694,30 +855,30 @@ impl Signer { .filter_map(|addr| addrs_to_sigs.get(addr).cloned()) .collect(); - let mut block = block_info.block; + block.header.signer_signature_hash(); block.header.signer_signature = signatures; debug!( "{self}: Broadcasting Stacks block {} to node", &block.block_id() ); - let broadcasted = stacks_client - .post_block(&block) - .map_err(|e| { - warn!( - "{self}: Failed to post block {block_hash} (id {}): {e:?}", - &block.block_id() - ); - e - }) - .is_ok(); - if broadcasted { - self.signer_db - .set_block_broadcasted(self.reward_cycle, block_hash, get_epoch_time_secs()) - .unwrap_or_else(|_| { - panic!("{self}: failed to determine if block {block_hash} was broadcasted") - }); + if let Err(e) = stacks_client.post_block(&block) { + warn!( + "{self}: Failed to post block {block_hash}: {e:?}"; + "stacks_block_id" => %block.block_id(), + "parent_block_id" => %block.header.parent_block_id, + "burnchain_consensus_hash" => %block.header.consensus_hash + ); + return; + } + + if let Err(e) = self.signer_db.set_block_broadcasted( + self.reward_cycle, + &block_hash, + get_epoch_time_secs(), + ) { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); } } diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index fca9282ec5..08ccde5a92 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -273,9 +273,9 @@ impl SignerTrait for Signer { self.process_next_command(stacks_client, current_reward_cycle); } - fn has_pending_blocks(&self) -> bool { + fn has_unprocessed_blocks(&self) -> bool { self.signer_db - .has_pending_blocks(self.reward_cycle) + .has_unprocessed_blocks(self.reward_cycle) .unwrap_or_else(|e| { error!("{self}: Failed to check if there are pending blocks: {e:?}"); // Assume there are pending blocks to prevent premature cleanup diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9f30ea2908..97b612747c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -89,7 +89,7 @@ use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; -use stacks_signer::signerdb::{BlockInfo, ExtraBlockInfo, SignerDb}; +use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; @@ -5498,6 +5498,7 @@ fn signer_chainstate() { signed_self: None, signed_group: None, ext: ExtraBlockInfo::None, + state: BlockState::Unprocessed, }) .unwrap(); @@ -5575,6 +5576,7 @@ fn signer_chainstate() { signed_self: None, signed_group: None, ext: ExtraBlockInfo::None, + state: BlockState::Unprocessed, }) .unwrap(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5a0c294329..1a5baefef9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -501,6 +501,7 @@ fn block_proposal_rejection() { reason: _reason, reason_code, signer_signature_hash, + .. })) = message { if signer_signature_hash == block_signer_signature_hash_1 { From 9eef4066cce42414a0d0690de4da23027b8292ba Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 28 Aug 2024 14:49:59 -0400 Subject: [PATCH 0954/1400] chore: remove spurious deadlock condition arising from needlessly opening a transaction whenever we open the sortition DB --- stackslib/src/chainstate/burn/db/sortdb.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 15a3bf5641..b538ae17fa 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3456,6 +3456,14 @@ impl SortitionDB { SortitionDB::apply_schema_9(&tx.deref(), epochs)?; tx.commit()?; } else if version == expected_version { + // this transaction is almost never needed + let validated_epochs = StacksEpoch::validate_epochs(epochs); + let existing_epochs = Self::get_stacks_epochs(self.conn())?; + if existing_epochs == validated_epochs { + return Ok(()); + } + + // epochs are out of date let tx = self.tx_begin()?; SortitionDB::validate_and_replace_epochs(&tx, epochs)?; tx.commit()?; From 33ec49af9906a97a7c820da92b84bd05f84a179b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 4 Sep 2024 13:45:14 -0400 Subject: [PATCH 0955/1400] Fix miners to not accept multiple messages from the same signer for the same block Signed-off-by: Jacinta Ferrant --- .../src/nakamoto_node/sign_coordinator.rs | 176 ++++++++++-------- 1 file changed, 96 insertions(+), 80 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index d2c4f2b390..8104d2ebd2 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -722,6 +722,7 @@ impl SignCoordinator { let mut total_weight_signed: u32 = 0; let mut total_reject_weight: u32 = 0; + let mut responded_signers = HashSet::new(); let mut gathered_signatures = BTreeMap::new(); info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; @@ -800,24 +801,108 @@ impl SignCoordinator { ); for (message, slot_id) in messages.into_iter().zip(slot_ids) { - let (response_hash, signature) = match message { + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + + if responded_signers.contains(&signer_pubkey) { + debug!( + "Signer {slot_id} already responded for block {}. Ignoring {message:?}.", block.header.signer_signature_hash(); + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + continue; + } + + match message { SignerMessageV0::BlockResponse(BlockResponse::Accepted(( response_hash, signature, - ))) => (response_hash, signature), - SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { - let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { - return Err(NakamotoNodeError::SignerSignatureError( - "Signer entry not found".into(), - )); + ))) => { + let block_sighash = block.header.signer_signature_hash(); + if block_sighash != response_hash { + warn!( + "Processed signature for a different block. Will try to continue."; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "response_hash" => %response_hash, + "slot_id" => slot_id, + "reward_cycle_id" => reward_cycle_id, + "response_hash" => %response_hash + ); + continue; + } + debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) + else { + warn!("Got invalid signature from a signer. Ignoring."); + continue; }; - if rejected_data.signer_signature_hash - != block.header.signer_signature_hash() - { - debug!("Received rejected block response for a block besides my own. Ignoring."); + if !valid_sig { + warn!( + "Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + + if Self::fault_injection_ignore_signatures() { + warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => total_weight_signed, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); continue; } + info!("SignCoordinator: Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => total_weight_signed, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + gathered_signatures.insert(slot_id, signature); + responded_signers.insert(signer_pubkey); + } + SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let rejected_pubkey = match rejected_data.recover_public_key() { + Ok(rejected_pubkey) => { + if rejected_pubkey != signer_pubkey { + warn!("Recovered public key from rejected data does not match signer's public key. Ignoring."); + continue; + } + rejected_pubkey + } + Err(e) => { + warn!("Failed to recover public key from rejected data: {e:?}. Ignoring."); + continue; + } + }; + responded_signers.insert(rejected_pubkey); debug!( "Signer {} rejected our block {}/{}", slot_id, @@ -858,75 +943,6 @@ impl SignCoordinator { continue; } }; - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != response_hash { - warn!( - "Processed signature for a different block. Will try to continue."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "response_hash" => %response_hash, - "slot_id" => slot_id, - "reward_cycle_id" => reward_cycle_id, - "response_hash" => %response_hash - ); - continue; - } - debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); - let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { - return Err(NakamotoNodeError::SignerSignatureError( - "Signer entry not found".into(), - )); - }; - let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) - else { - return Err(NakamotoNodeError::SignerSignatureError( - "Failed to parse signer public key".into(), - )); - }; - let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) else { - warn!("Got invalid signature from a signer. Ignoring."); - continue; - }; - if !valid_sig { - warn!( - "Processed signature but didn't validate over the expected block. Ignoring"; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "slot_id" => slot_id, - ); - continue; - } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } - - if Self::fault_injection_ignore_signatures() { - warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - continue; - } - - info!("SignCoordinator: Signature Added to block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - gathered_signatures.insert(slot_id, signature); } // After gathering all signatures, return them if we've hit the threshold From afdaaf6b04d72b1d3c58f42bf7644b975ef1ded1 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 4 Sep 2024 10:47:07 -0700 Subject: [PATCH 0956/1400] Add deadlock fix addtion to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2add5b99f6..5302fc60c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Fix block proposal rejection test (#5084) - Mock signing revamp (#5070) - Multi miner fixes jude (#5040) +- Remove spurious deadlock condition whenever the sortition DB is opened ## [2.5.0.0.6] From cd8d5d4c5969b2591431c490f13bc1f1b09ed6e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 14:54:35 -0400 Subject: [PATCH 0957/1400] feat: add /v3/tenures/tip/{:consensus_hash} for getting the highest block header in a tenure --- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- stackslib/src/net/api/gettenuretip.rs | 184 ++++++++++++++++++++++ stackslib/src/net/api/mod.rs | 2 + 3 files changed, 187 insertions(+), 1 deletion(-) create mode 100644 stackslib/src/net/api/gettenuretip.rs diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index e8b5c7bb41..ed3158c761 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -160,7 +160,7 @@ pub struct MinerPaymentSchedule { pub vtxindex: u32, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum StacksBlockHeaderTypes { Epoch2(StacksBlockHeader), Nakamoto(NakamotoBlockHeader), diff --git a/stackslib/src/net/api/gettenuretip.rs b/stackslib/src/net/api/gettenuretip.rs new file mode 100644 index 0000000000..328aafda4d --- /dev/null +++ b/stackslib/src/net/api/gettenuretip.rs @@ -0,0 +1,184 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; +use {serde, serde_json}; + +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::http::{ + parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, + HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + request, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCNakamotoTenureTipRequestHandler { + consensus_hash: Option, +} + +impl RPCNakamotoTenureTipRequestHandler { + pub fn new() -> Self { + Self { + consensus_hash: None, + } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCNakamotoTenureTipRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v3/tenures/tip/(?P[0-9a-f]{40})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/tenures/tip/:consensus_hash" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + let consensus_hash = request::get_consensus_hash(captures, "consensus_hash")?; + self.consensus_hash = Some(consensus_hash); + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNakamotoTenureTipRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.consensus_hash = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + _contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let consensus_hash = self + .consensus_hash + .take() + .ok_or(NetError::SendError("`consensus_hash` not set".into()))?; + + let tenure_tip_resp = node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + let header_info = match NakamotoChainState::get_highest_known_block_header_in_tenure(chainstate.db(), &consensus_hash) { + Ok(Some(header)) => header, + Ok(None) => { + let msg = format!( + "No blocks in tenure {}", + &consensus_hash + ); + debug!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(msg), + )); + } + Err(e) => { + let msg = format!( + "Failed to query tenure blocks by consensus '{}': {:?}", + consensus_hash, &e + ); + error!("{}", &msg); + return Err(StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(msg), + )); + } + }; + Ok(header_info.anchored_header) + }); + + let tenure_tip = match tenure_tip_resp { + Ok(tenure_tip) => tenure_tip, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + + let preamble = HttpResponsePreamble::ok_json(&preamble); + let body = HttpResponseContents::try_from_json(&tenure_tip)?; + Ok((preamble, body)) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNakamotoTenureTipRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let tenure_tip: StacksBlockHeaderTypes = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(tenure_tip)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_get_tenure_tip(host: PeerHost, consensus_hash: &ConsensusHash) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/tenures/tip/{}", consensus_hash), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_tenure_tip(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let tenure_tip: StacksBlockHeaderTypes = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(tenure_tip) + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 5bbc6281a2..0246ac3152 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -62,6 +62,7 @@ pub mod getstackers; pub mod getstxtransfercost; pub mod gettenure; pub mod gettenureinfo; +pub mod gettenuretip; pub mod gettransaction_unconfirmed; pub mod liststackerdbreplicas; pub mod postblock; @@ -120,6 +121,7 @@ impl StacksHttp { self.register_rpc_endpoint(getsortition::GetSortitionHandler::new()); self.register_rpc_endpoint(gettenure::RPCNakamotoTenureRequestHandler::new()); self.register_rpc_endpoint(gettenureinfo::RPCNakamotoTenureInfoRequestHandler::new()); + self.register_rpc_endpoint(gettenuretip::RPCNakamotoTenureTipRequestHandler::new()); self.register_rpc_endpoint(get_tenures_fork_info::GetTenuresForkInfo::default()); self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), From b675546d0a1fb93a8d41406cd14d017b61c5da4d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Sep 2024 15:49:56 -0400 Subject: [PATCH 0958/1400] test: fix `follower_bootup` integration test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0eaca05245..4839bee3be 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3111,6 +3111,7 @@ fn follower_bootup() { wait_for_first_naka_block_commit(60, &commits_submitted); let mut follower_conf = naka_conf.clone(); + follower_conf.node.miner = false; follower_conf.events_observers.clear(); follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); follower_conf.node.seed = vec![0x01; 32]; From f5f6b99f30ca6b576778c499817b197eb4c9f386 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 4 Sep 2024 16:03:51 -0400 Subject: [PATCH 0959/1400] test: fix mistake on last update to `multiple_miners` test --- testnet/stacks-node/src/tests/signer/v0.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a0a5082b28..4d6ba5b806 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1561,7 +1561,10 @@ fn multiple_miners() { let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); assert_eq!(peer_1_height, peer_2_height); - assert_eq!(peer_1_height, pre_nakamoto_peer_1_height + btc_blocks_mined); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + btc_blocks_mined - 1 + ); assert_eq!( btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() From 0ec5688fcb2e92745badadff4b93eab24d9c816f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 4 Sep 2024 16:55:52 -0400 Subject: [PATCH 0960/1400] WIP: broken check_proposal reorg timing test Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 79 +++++++++++---- stacks-signer/src/client/mod.rs | 7 ++ stacks-signer/src/client/stacks_client.rs | 59 ++++++++++- stacks-signer/src/tests/chainstate.rs | 97 ++++++++++++++----- stacks-signer/src/v0/signer.rs | 3 +- .../src/tests/nakamoto_integrations.rs | 94 +++++++++++++++--- 6 files changed, 280 insertions(+), 59 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index adfea4900d..b7a8272040 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -27,7 +27,7 @@ use stacks_common::{info, warn}; use crate::client::{ClientError, StacksClient}; use crate::config::SignerConfig; -use crate::signerdb::SignerDb; +use crate::signerdb::{BlockState, SignerDb}; #[derive(thiserror::Error, Debug)] /// Error type for the signer chainstate module @@ -185,9 +185,10 @@ impl SortitionsView { pub fn check_proposal( &mut self, client: &StacksClient, - signer_db: &SignerDb, + signer_db: &mut SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, + reward_cycle: u64, ) -> Result { if self .cur_sortition @@ -284,6 +285,7 @@ impl SortitionsView { &proposed_by, tenure_change, block, + reward_cycle, signer_db, client, )? { @@ -434,21 +436,56 @@ impl SortitionsView { fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, - signer_db: &SignerDb, + reward_cycle: u64, + signer_db: &mut SignerDb, + client: &StacksClient, ) -> Result { - let Some(last_globally_accepted_block) = signer_db + // If the tenure change block confirms the expected parent block, it should confirm at least one more block than the last globally accepted block in the parent tenure. + let last_globally_accepted_block = signer_db .get_last_globally_accepted_block(&tenure_change.prev_tenure_consensus_hash) - .map_err(|e| ClientError::InvalidResponse(e.to_string()))? - else { - info!( - "Have no globally accepted blocks in the parent tenure, assuming block confirmation is correct"; - "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "tenure" => %block.header.consensus_hash, - ); - return Ok(true); + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + + if let Some(global_info) = last_globally_accepted_block { + if block.header.chain_length <= global_info.block.header.chain_length { + warn!( + "Miner's block proposal does not confirm as many blocks as we expect"; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_chain_length" => block.header.chain_length, + "expected_at_least" => global_info.block.header.chain_length + 1, + ); + return Ok(false); + } + } + + let tip = match client.get_tenure_tip(&tenure_change.prev_tenure_consensus_hash) { + Ok(tip) => tip, + Err(e) => { + warn!( + "Miner block proposal contains a tenure change, but failed to fetch the tenure tip for the parent tenure: {e:?}. Considering proposal invalid."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "parent_tenure" => %tenure_change.prev_tenure_consensus_hash, + ); + return Ok(false); + } }; - if block.header.chain_length > last_globally_accepted_block.block.header.chain_length { + if let Some(nakamoto_tip) = tip.as_stacks_nakamoto() { + // If we have seen this block already, make sure its state is updated to globally accepted + if let Ok(Some(mut block_info)) = + signer_db.block_lookup(reward_cycle, &nakamoto_tip.signer_signature_hash()) + { + if block_info.state != BlockState::GloballyAccepted { + if let Err(e) = block_info.mark_globally_accepted() { + warn!("Failed to update block info in db: {e}"); + } else if let Err(e) = signer_db.insert_block(&block_info) { + warn!("Failed to update block info in db: {e}"); + } + } + } + } + let tip_height = tip.height(); + if block.header.chain_length > tip_height { Ok(true) } else { warn!( @@ -456,7 +493,7 @@ impl SortitionsView { "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, - "expected_at_least" => last_globally_accepted_block.block.header.chain_length + 1, + "expected_at_least" => tip_height + 1, ); Ok(false) } @@ -471,12 +508,18 @@ impl SortitionsView { proposed_by: &ProposedBy, tenure_change: &TenureChangePayload, block: &NakamotoBlock, - signer_db: &SignerDb, + reward_cycle: u64, + signer_db: &mut SignerDb, client: &StacksClient, ) -> Result { // Ensure that the tenure change block confirms the expected parent block - let confirms_expected_parent = - Self::check_tenure_change_confirms_parent(tenure_change, block, signer_db)?; + let confirms_expected_parent = Self::check_tenure_change_confirms_parent( + tenure_change, + block, + reward_cycle, + signer_db, + client, + )?; if !confirms_expected_parent { return Ok(false); } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 32951d7990..b32f465b11 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -123,6 +123,7 @@ pub(crate) mod tests { use std::net::{SocketAddr, TcpListener}; use blockstack_lib::chainstate::stacks::boot::POX_4_NAME; + use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::{ @@ -597,4 +598,10 @@ pub(crate) mod tests { let clarity_value = ClarityValue::UInt(threshold as u128); build_read_only_response(&clarity_value) } + + pub fn build_get_tenure_tip_response(header_types: &StacksBlockHeaderTypes) -> String { + let response_json = + serde_json::to_string(header_types).expect("Failed to serialize tenure tip info"); + format!("HTTP/1.1 200 OK\n\n{response_json}") + } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cd65f7914b..05c3b0f156 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -21,6 +21,7 @@ use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ NakamotoSignerEntry, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; +use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, TransactionPostConditionMode, @@ -139,6 +140,28 @@ impl StacksClient { &self.stacks_address } + /// Get the stacks tip header of the tenure given its consensus hash + pub fn get_tenure_tip( + &self, + consensus_hash: &ConsensusHash, + ) -> Result { + let send_request = || { + self.stacks_node_client + .get(self.tenure_tip_path(consensus_hash)) + .send() + .map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) + }; + let response = send_request()?; + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let sortition_info = response.json()?; + Ok(sortition_info) + } + /// Retrieve the signer slots stored within the stackerdb contract pub fn get_stackerdb_signer_slots( &self, @@ -826,6 +849,10 @@ impl StacksClient { format!("{}/v2/fees/transaction", self.http_origin) } + fn tenure_tip_path(&self, consensus_hash: &ConsensusHash) -> String { + format!("{}/v3/tenures/tip/{}", self.http_origin, consensus_hash) + } + /// Helper function to create a stacks transaction for a modifying contract call #[allow(clippy::too_many_arguments)] pub fn build_unsigned_contract_call_transaction( @@ -893,12 +920,16 @@ mod tests { use blockstack_lib::chainstate::stacks::boot::{ NakamotoSignerEntry, PoxStartCycleInfo, RewardSet, }; + use clarity::types::chainstate::{StacksBlockId, TrieHash}; + use clarity::util::hash::Sha512Trunc256Sum; + use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{ ListData, ListTypeData, ResponseData, SequenceData, TupleData, TupleTypeSignature, TypeSignature, }; use rand::thread_rng; use rand_core::RngCore; + use stacks_common::bitvec::BitVec; use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; use wsts::curve::scalar::Scalar; @@ -907,8 +938,9 @@ mod tests { build_account_nonce_response, build_get_approved_aggregate_key_response, build_get_last_round_response, build_get_medium_estimated_fee_ustx_response, build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, - build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, - build_read_only_response, write_response, MockServerClient, + build_get_tenure_tip_response, build_get_vote_for_aggregate_key_response, + build_get_weight_threshold_response, build_read_only_response, write_response, + MockServerClient, }; #[test] @@ -1542,4 +1574,27 @@ mod tests { write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), estimate); } + + #[test] + fn get_tenure_tip_should_succeed() { + let mock = MockServerClient::new(); + let consensus_hash = ConsensusHash([15; 20]); + let header = StacksBlockHeaderTypes::Nakamoto(NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: ConsensusHash([15; 20]), + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 3, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }); + let response = build_get_tenure_tip_response(&header); + let h = spawn(move || mock.client.get_tenure_tip(&consensus_hash)); + write_response(mock.server, response.as_bytes()); + assert_eq!(h.join().unwrap().unwrap(), header); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 1e12eeee5a..41f493ed57 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -18,6 +18,7 @@ use std::net::{Ipv4Addr, SocketAddrV4}; use std::time::{Duration, SystemTime}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ CoinbasePayload, SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction, TenureChangeCause, TenureChangePayload, TransactionAnchorMode, TransactionAuth, @@ -124,33 +125,45 @@ fn setup_test_environment( #[test] fn check_proposal_units() { - let (stacks_client, signer_db, block_pk, mut view, block) = + let (stacks_client, mut signer_db, block_pk, mut view, block) = setup_test_environment("check_proposal_units"); assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); view.last_sortition = None; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk,) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); } #[test] fn check_proposal_miner_pkh_mismatch() { - let (stacks_client, signer_db, _block_pk, mut view, mut block) = + let (stacks_client, mut signer_db, _block_pk, mut view, mut block) = setup_test_environment("miner_pkh_mismatch"); block.header.consensus_hash = view.cur_sortition.consensus_hash; let different_block_pk = StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 3])); assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &block, + &different_block_pk, + 1 + ) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &different_block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &block, + &different_block_pk, + 1 + ) .unwrap()); } @@ -158,6 +171,7 @@ fn reorg_timing_testing( test_name: &str, first_proposal_burn_block_timing_secs: u64, sortition_timing_secs: u64, + check_tip: bool, ) -> Result { let (_stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment(test_name); @@ -228,6 +242,7 @@ fn reorg_timing_testing( burn_height: 2, reward_cycle: 1, }; + let mut header_clone = block_proposal_1.block.header.clone(); let mut block_info_1 = BlockInfo::from(block_proposal_1); block_info_1.mark_locally_accepted().unwrap(); signer_db.insert_block(&block_info_1).unwrap(); @@ -238,13 +253,27 @@ fn reorg_timing_testing( .insert_burn_block(&view.cur_sortition.burn_block_hash, 3, &sortition_time) .unwrap(); - let MockServerClient { server, client, .. } = MockServerClient::new(); - let h = std::thread::spawn(move || view.check_proposal(&client, &signer_db, &block, &block_pk)); + let MockServerClient { + mut server, + client, + config, + } = MockServerClient::new(); + let h = std::thread::spawn(move || { + view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1) + }); crate::client::tests::write_response( server, format!("HTTP/1.1 200 Ok\n\n{}", serde_json::json!(expected_result)).as_bytes(), ); + if check_tip { + server = crate::client::tests::mock_server_from_config(&config); + header_clone.chain_length -= 1; + let response = crate::client::tests::build_get_tenure_tip_response( + &StacksBlockHeaderTypes::Nakamoto(header_clone), + ); + crate::client::tests::write_response(server, response.as_bytes()); + } let result = h.join().unwrap(); info!("Result: {result:?}"); @@ -253,32 +282,32 @@ fn reorg_timing_testing( #[test] fn check_proposal_reorg_timing_bad() { - let result = reorg_timing_testing("reorg_timing_bad", 30, 31); + let result = reorg_timing_testing("reorg_timing_bad", 30, 31, false); assert!(!result.unwrap(), "Proposal should not validate, because the reorg occurred in a block whose proposed time was long enough before the sortition"); } #[test] fn check_proposal_reorg_timing_ok() { - let result = reorg_timing_testing("reorg_timing_okay", 30, 30); + let result = reorg_timing_testing("reorg_timing_okay", 30, 30, true); assert!(result.unwrap(), "Proposal should validate okay, because the reorg occurred in a block whose proposed time was close to the sortition"); } #[test] fn check_proposal_invalid_status() { - let (stacks_client, signer_db, block_pk, mut view, mut block) = + let (stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment("invalid_status"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -289,7 +318,7 @@ fn check_proposal_invalid_status() { // parent blocks have been seen before, while the signer state checks are only reasoning about // stacks blocks seen by the signer, which may be a subset) assert!(view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); } @@ -328,7 +357,7 @@ fn make_tenure_change_tx(payload: TenureChangePayload) -> StacksTransaction { #[test] fn check_proposal_tenure_extend_invalid_conditions() { - let (stacks_client, signer_db, block_pk, mut view, mut block) = + let (stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment("tenure_extend"); block.header.consensus_hash = view.cur_sortition.consensus_hash; let mut extend_payload = make_tenure_change_payload(); @@ -338,7 +367,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(!view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); let mut extend_payload = make_tenure_change_payload(); @@ -348,7 +377,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(view - .check_proposal(&stacks_client, &signer_db, &block, &block_pk) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) .unwrap()); } @@ -370,21 +399,45 @@ fn check_block_proposal_timeout() { .unwrap(); assert!(view - .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &curr_sortition_block, + &block_pk, + 1 + ) .unwrap()); assert!(!view - .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &last_sortition_block, + &block_pk, + 1 + ) .unwrap()); // Sleep a bit to time out the block proposal std::thread::sleep(Duration::from_secs(5)); assert!(!view - .check_proposal(&stacks_client, &signer_db, &curr_sortition_block, &block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &curr_sortition_block, + &block_pk, + 1 + ) .unwrap()); assert!(view - .check_proposal(&stacks_client, &signer_db, &last_sortition_block, &block_pk) + .check_proposal( + &stacks_client, + &mut signer_db, + &last_sortition_block, + &block_pk, + 1 + ) .unwrap()); } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 3f99860ae2..c71a2ff637 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -377,9 +377,10 @@ impl Signer { let block_response = if let Some(sortition_state) = sortition_state { match sortition_state.check_proposal( stacks_client, - &self.signer_db, + &mut self.signer_db, &block_proposal.block, miner_pubkey, + self.reward_cycle, ) { // Error validating block Err(e) => { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 97b612747c..1b3bf397ab 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5427,6 +5427,13 @@ fn signer_chainstate() { ) .unwrap(); + let reward_cycle = burnchain + .block_height_to_reward_cycle( + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height, + ) + .unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), @@ -5441,7 +5448,13 @@ fn signer_chainstate() { last_tenures_proposals { let valid = sortitions_view - .check_proposal(&signer_client, &signer_db, prior_tenure_first, miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + prior_tenure_first, + miner_pk, + reward_cycle, + ) .unwrap(); assert!( !valid, @@ -5449,7 +5462,13 @@ fn signer_chainstate() { ); for block in prior_tenure_interims.iter() { let valid = sortitions_view - .check_proposal(&signer_client, &signer_db, block, miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + block, + miner_pk, + reward_cycle, + ) .unwrap(); assert!( !valid, @@ -5472,20 +5491,26 @@ fn signer_chainstate() { thread::sleep(Duration::from_secs(1)); }; + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); let valid = sortitions_view - .check_proposal(&signer_client, &signer_db, &proposal.0, &proposal.1) + .check_proposal( + &signer_client, + &mut signer_db, + &proposal.0, + &proposal.1, + reward_cycle, + ) .unwrap(); assert!( valid, "Nakamoto integration test produced invalid block proposal" ); - let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .unwrap() - .block_height; - let reward_cycle = burnchain - .block_height_to_reward_cycle(burn_block_height) - .unwrap(); signer_db .insert_block(&BlockInfo { block: proposal.0.clone(), @@ -5531,9 +5556,10 @@ fn signer_chainstate() { let valid = sortitions_view .check_proposal( &signer_client, - &signer_db, + &mut signer_db, &proposal_interim.0, &proposal_interim.1, + reward_cycle, ) .unwrap(); @@ -5548,14 +5574,21 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), }; + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let valid = sortitions_view .check_proposal( &signer_client, - &signer_db, + &mut signer_db, &proposal_interim.0, &proposal_interim.1, + reward_cycle, ) .unwrap(); @@ -5618,10 +5651,21 @@ fn signer_chainstate() { block_proposal_timeout: Duration::from_secs(100), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); - + let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap(); assert!( !sortitions_view - .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + &sibling_block, + &miner_pk, + reward_cycle + ) .unwrap(), "A sibling of a previously approved block must be rejected." ); @@ -5672,7 +5716,13 @@ fn signer_chainstate() { assert!( !sortitions_view - .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + &sibling_block, + &miner_pk, + reward_cycle + ) .unwrap(), "A sibling of a previously approved block must be rejected." ); @@ -5729,7 +5779,13 @@ fn signer_chainstate() { assert!( !sortitions_view - .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + &sibling_block, + &miner_pk, + reward_cycle + ) .unwrap(), "A sibling of a previously approved block must be rejected." ); @@ -5788,7 +5844,13 @@ fn signer_chainstate() { assert!( !sortitions_view - .check_proposal(&signer_client, &signer_db, &sibling_block, &miner_pk) + .check_proposal( + &signer_client, + &mut signer_db, + &sibling_block, + &miner_pk, + reward_cycle + ) .unwrap(), "A sibling of a previously approved block must be rejected." ); From 29d42a577417e23a96b1c6df97d9508e435708c7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 17:53:36 -0400 Subject: [PATCH 0961/1400] fix: serde requires an owned string for decoding a hex-encoded BitVec. Also, add serde round-trip test --- stacks-common/src/bitvec.rs | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 792532e135..6602f62e5c 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -100,8 +100,8 @@ impl Serialize for BitVec { impl<'de, const MAX_SIZE: u16> Deserialize<'de> for BitVec { fn deserialize>(deserializer: D) -> Result { - let hex: &str = Deserialize::deserialize(deserializer)?; - let bytes = hex_bytes(hex).map_err(serde::de::Error::custom)?; + let hex: String = Deserialize::deserialize(deserializer)?; + let bytes = hex_bytes(hex.as_str()).map_err(serde::de::Error::custom)?; Self::consensus_deserialize(&mut bytes.as_slice()).map_err(serde::de::Error::custom) } } @@ -412,4 +412,21 @@ mod test { check_ok_vector(i.as_slice()); } } + + #[test] + fn test_serde() { + let mut bitvec_zero_10 = BitVec::<10>::zeros(10).unwrap(); + bitvec_zero_10.set(0, true).unwrap(); + bitvec_zero_10.set(5, true).unwrap(); + bitvec_zero_10.set(3, true).unwrap(); + assert_eq!( + bitvec_zero_10.binary_str(), + "1001010000", + "Binary string should be 1001010000" + ); + + let serde_bitvec_json = serde_json::to_string(&bitvec_zero_10).unwrap(); + let serde_bitvec: BitVec<10> = serde_json::from_str(&serde_bitvec_json).unwrap(); + assert_eq!(serde_bitvec, bitvec_zero_10); + } } From 39614674b1d72cea2aae67acad3d68d0bb6c9c2b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 17:54:48 -0400 Subject: [PATCH 0962/1400] fix: pub(crate) for testing --- stackslib/src/net/api/gettenuretip.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/gettenuretip.rs b/stackslib/src/net/api/gettenuretip.rs index 328aafda4d..188fe0dc51 100644 --- a/stackslib/src/net/api/gettenuretip.rs +++ b/stackslib/src/net/api/gettenuretip.rs @@ -43,7 +43,7 @@ use crate::util_lib::db::{DBConn, Error as DBError}; #[derive(Clone)] pub struct RPCNakamotoTenureTipRequestHandler { - consensus_hash: Option, + pub(crate) consensus_hash: Option, } impl RPCNakamotoTenureTipRequestHandler { From 42bce18c9109f60256e120791f29299d36c28aac Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 17:55:19 -0400 Subject: [PATCH 0963/1400] chore: unit tests for /v3/tenures/tip/:consensus_hash --- stackslib/src/net/api/tests/gettenuretip.rs | 142 ++++++++++++++++++++ stackslib/src/net/api/tests/mod.rs | 1 + 2 files changed, 143 insertions(+) create mode 100644 stackslib/src/net/api/tests/gettenuretip.rs diff --git a/stackslib/src/net/api/tests/gettenuretip.rs b/stackslib/src/net/api/tests/gettenuretip.rs new file mode 100644 index 0000000000..15ca3fcb61 --- /dev/null +++ b/stackslib/src/net/api/tests/gettenuretip.rs @@ -0,0 +1,142 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use serde_json; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::test_rpc; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::net::api::tests::TestRPC; +use crate::net::api::{gettenuretip, *}; +use crate::net::connection::ConnectionOptions; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::test::TestEventObserver; +use crate::net::{ProtocolFamily, TipRequest}; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + let request = StacksHttpRequest::new_get_tenure_tip(addr.into(), &ConsensusHash([0x01; 20])); + + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + + let mut handler = gettenuretip::RPCNakamotoTenureTipRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + assert_eq!(handler.consensus_hash, Some(ConsensusHash([0x01; 20]))); + + // parsed request consumes headers that would not be in a constructed reqeuest + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + assert_eq!(&preamble, request.preamble()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_observer = TestEventObserver::new(); + let mut rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + + let nakamoto_chain_tip = rpc_test.canonical_tip.clone(); + let consensus_hash = rpc_test.consensus_hash.clone(); + + let mut requests = vec![]; + + // query existing, non-empty Nakamoto tenure + let request = StacksHttpRequest::new_get_tenure_tip(addr.clone().into(), &consensus_hash); + requests.push(request); + + // query existing epoch2 tenure + let all_sortitions = rpc_test.peer_1.sortdb().get_all_snapshots().unwrap(); + assert!(all_sortitions.len() > 30); + assert!(all_sortitions[30].sortition); + let epoch2_consensus_hash = all_sortitions[30].consensus_hash.clone(); + + let request = + StacksHttpRequest::new_get_tenure_tip(addr.clone().into(), &epoch2_consensus_hash); + requests.push(request); + + // query non-existant tenure + let request = + StacksHttpRequest::new_get_tenure_tip(addr.clone().into(), &ConsensusHash([0x01; 20])); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the Nakamoto tip + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_tenure_tip().unwrap(); + assert_eq!( + resp.as_stacks_nakamoto().unwrap().consensus_hash, + consensus_hash + ); + assert_eq!( + resp.as_stacks_nakamoto().unwrap().block_id(), + nakamoto_chain_tip + ); + + // got an epoch2 block + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let resp = response.decode_tenure_tip().unwrap(); + let block_header = resp.as_stacks_epoch2().unwrap(); + assert_eq!( + block_header.block_hash(), + all_sortitions[30].winning_stacks_block_hash + ); + + // got a failure + let response = responses.remove(0); + debug!( + "Response:\n{}\n", + std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() + ); + + let (preamble, body) = response.destruct(); + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index b02bb53bb8..ded0360555 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -79,6 +79,7 @@ mod getstackerdbmetadata; mod getstxtransfercost; mod gettenure; mod gettenureinfo; +mod gettenuretip; mod gettransaction_unconfirmed; mod liststackerdbreplicas; mod postblock; From f34c74144cf1089239de21571a4073d170813895 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 18:03:45 -0400 Subject: [PATCH 0964/1400] fix: fix block proposal integration test --- .../src/tests/nakamoto_integrations.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9f30ea2908..dd5f84516e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2501,13 +2501,23 @@ fn block_proposal_api_endpoint() { ), ("Must wait", sign(&proposal), HTTP_TOO_MANY, None), ( - "Corrupted (bit flipped after signing)", + "Non-canonical or absent tenure", (|| { let mut sp = sign(&proposal); sp.block.header.consensus_hash.0[3] ^= 0x07; sp })(), HTTP_ACCEPTED, + Some(Err(ValidateRejectCode::NonCanonicalTenure)), + ), + ( + "Corrupted (bit flipped after signing)", + (|| { + let mut sp = sign(&proposal); + sp.block.header.timestamp ^= 0x07; + sp + })(), + HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), ( @@ -2624,6 +2634,10 @@ fn block_proposal_api_endpoint() { .iter() .zip(proposal_responses.iter()) { + info!( + "Received response {:?}, expecting {:?}", + &response, &expected_response + ); match expected_response { Ok(_) => { assert!(matches!(response, BlockValidateResponse::Ok(_))); From 3047b5f10f9aa1e0ea0464433956ff6455574122 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 18:10:48 -0400 Subject: [PATCH 0965/1400] chore: address PR feedback --- libsigner/src/v0/messages.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index b82ee3bab2..d5f5fe63c4 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -926,7 +926,7 @@ mod test { Sha512Trunc256Sum([0u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), - thread_rng().next_u32() % 2 == 0, + thread_rng().gen_bool(0.5), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -937,7 +937,7 @@ mod test { Sha512Trunc256Sum([1u8; 32]), RejectCode::ConnectivityIssues, &StacksPrivateKey::new(), - thread_rng().next_u32() % 2 == 0, + thread_rng().gen_bool(0.5), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -958,7 +958,7 @@ mod test { Sha512Trunc256Sum([1u8; 32]), RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), - thread_rng().next_u32() % 2 == 0, + thread_rng().gen_bool(0.5), )); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) From 5487b6594001adcecb849d6152ea4894bca0c892 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 4 Sep 2024 21:12:43 -0400 Subject: [PATCH 0966/1400] chore: fix get_block_state() and add a unit test --- stacks-signer/src/chainstate.rs | 10 +++++++++ stacks-signer/src/signerdb.rs | 36 +++++++++++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index b7a8272040..bbbe741d8b 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -433,6 +433,12 @@ impl SortitionsView { } /// Check if the tenure change block confirms the expected parent block (i.e., the last globally accepted block in the parent tenure) + /// It checks the local DB first, and if the block is not present in the local DB, it asks the + /// Stacks node for the highest processed block header in the given tenure (and then caches it + /// in the DB). + /// + /// The rationale here is that the signer DB can be out-of-sync with the node. For example, + /// the signer may have been added to an already-running node. fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, @@ -446,6 +452,10 @@ impl SortitionsView { .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; if let Some(global_info) = last_globally_accepted_block { + // N.B. this block might not be the last globally accepted block across the network; + // it's just the highest one in this tenure that we know about. If this given block is + // no higher than it, then it's definitely no higher than the last globally accepted + // block across the network, so we can do an early rejection here. if block.header.chain_length <= global_info.block.header.chain_length { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 98037d991a..e5a40ff9fb 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -157,6 +157,21 @@ impl Display for BlockState { } } +impl TryFrom<&str> for BlockState { + type Error = String; + fn try_from(value: &str) -> Result { + let state = match value { + "Unprocessed" => BlockState::Unprocessed, + "LocallyAccepted" => BlockState::LocallyAccepted, + "LocallyRejected" => BlockState::LocallyRejected, + "GloballyAccepted" => BlockState::GloballyAccepted, + "GloballyRejected" => BlockState::GloballyRejected, + _ => return Err("Unparsable block state".into()), + }; + Ok(state) + } +} + /// Additional Info about a proposed block #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct BlockInfo { @@ -812,8 +827,13 @@ impl SignerDb { ) -> Result, DBError> { let qry = "SELECT json_extract(block_info, '$.state') FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2 LIMIT 1"; let args = params![&u64_to_sql(reward_cycle)?, block_sighash]; - let state: Option = query_row(&self.db, qry, args)?; - try_deserialize(state) + let state_opt: Option = query_row(&self.db, qry, args)?; + let Some(state) = state_opt else { + return Ok(None); + }; + Ok(Some( + BlockState::try_from(state.as_str()).map_err(|_| DBError::Corruption)?, + )) } } @@ -907,11 +927,23 @@ mod tests { ) .unwrap(); assert!(block_info.is_none()); + + // test getting the block state + let block_state = db + .get_block_state( + reward_cycle, + &block_proposal.block.header.signer_signature_hash(), + ) + .unwrap() + .expect("Unable to get block state from db"); + + assert_eq!(block_state, BlockInfo::from(block_proposal.clone()).state); } #[test] fn test_basic_signer_db() { let db_path = tmp_db_path(); + eprintln!("db path is {}", &db_path.display()); test_basic_signer_db_with_path(db_path) } From 5988c65af8ebd92d1770687f1edfb9712f34969d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 10:40:06 -0400 Subject: [PATCH 0967/1400] Add a log to block miner thread stopping Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb79c6abc7..8ff9ca44d1 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -250,6 +250,10 @@ impl BlockMinerThread { globals: &Globals, prior_miner: JoinHandle>, ) -> Result<(), NakamotoNodeError> { + debug!( + "Stopping prior miner thread ID {:?}", + prior_miner.thread().id() + ); globals.block_miner(); let prior_miner_result = prior_miner .join() From 9afe92e5478a21d2b7506d91dc1d0a8421c0bde0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:56:09 -0400 Subject: [PATCH 0968/1400] chore: documentation --- stacks-signer/src/chainstate.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index bbbe741d8b..a017adf44f 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -481,7 +481,8 @@ impl SortitionsView { } }; if let Some(nakamoto_tip) = tip.as_stacks_nakamoto() { - // If we have seen this block already, make sure its state is updated to globally accepted + // If we have seen this block already, make sure its state is updated to globally accepted. + // Otherwise, don't worry about it. if let Ok(Some(mut block_info)) = signer_db.block_lookup(reward_cycle, &nakamoto_tip.signer_signature_hash()) { From 9e5e389a060ab50b99bfc430fa1c1d1fde6391d7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:56:27 -0400 Subject: [PATCH 0969/1400] chore: test_debug --> debug --- stackslib/src/net/relay.rs | 2 ++ stackslib/src/net/server.rs | 10 ++++++---- stackslib/src/net/stackerdb/sync.rs | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 1b08f5cd35..dde4e9bbd8 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1678,6 +1678,8 @@ impl Relayer { ); accepted_blocks.push(nakamoto_block); } else { + // TODO: this shouldn't be a warning if it's only because we + // already have the block warn!( "Rejected Nakamoto block {} ({}) from {}", &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index a26fa2f7b4..3849b9b058 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -560,14 +560,14 @@ impl HttpPeer { let mut msgs = vec![]; for event_id in &poll_state.ready { if !self.sockets.contains_key(&event_id) { - test_debug!("Rogue socket event {}", event_id); + debug!("Rogue socket event {}", event_id); to_remove.push(*event_id); continue; } let client_sock_opt = self.sockets.get_mut(&event_id); if client_sock_opt.is_none() { - test_debug!("No such socket event {}", event_id); + debug!("No such socket event {}", event_id); to_remove.push(*event_id); continue; } @@ -576,7 +576,7 @@ impl HttpPeer { match self.peers.get_mut(event_id) { Some(ref mut convo) => { // activity on a http socket - test_debug!("Process HTTP data from {:?}", convo); + debug!("Process HTTP data from {:?}", convo); match HttpPeer::process_http_conversation( node_state, *event_id, @@ -585,11 +585,13 @@ impl HttpPeer { ) { Ok((alive, mut new_msgs)) => { if !alive { + debug!("HTTP convo {:?} is no longer alive", &convo); to_remove.push(*event_id); } msgs.append(&mut new_msgs); } - Err(_e) => { + Err(e) => { + debug!("Failed to process HTTP convo {:?}: {:?}", &convo, &e); to_remove.push(*event_id); continue; } diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 32d7a7e37e..fa94c5be55 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -808,7 +808,7 @@ impl StackerDBSync { ); let chunks_req = self.make_getchunkinv(&network.get_chain_view().rc_consensus_hash); if let Err(e) = self.comms.neighbor_send(network, &naddr, chunks_req) { - info!( + debug!( "{:?}: failed to send StackerDBGetChunkInv to {:?}: {:?}", network.get_local_peer(), &naddr, From 8f2b2e78da9803ec4a782f713dade80092fdd2f9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:57:07 -0400 Subject: [PATCH 0970/1400] chore: raise initiative on miner failure, in case it's due to a new sortition being processed --- testnet/stacks-node/src/nakamoto_node/miner.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb79c6abc7..1e415d87c7 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -344,6 +344,10 @@ impl BlockMinerThread { } Err(e) => { warn!("Failed to mine block: {e:?}"); + + // try again, in case a new sortition is pending + self.globals + .raise_initiative(format!("MiningFailure: {:?}", &e)); return Err(NakamotoNodeError::MiningFailure( ChainstateError::MinerAborted, )); From e1d7f6e73bf4af06110c23032c0c1628bd4e0e56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:57:30 -0400 Subject: [PATCH 0971/1400] chore: log joined miner thread error --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 4701656587..b48d93db44 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -817,7 +817,14 @@ impl RelayerThread { let new_miner_handle = std::thread::Builder::new() .name(format!("miner.{parent_tenure_start}",)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) - .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) + .spawn(move || { + if let Err(e) = new_miner_state.run_miner(prior_tenure_thread) { + info!("Miner thread failed: {:?}", &e); + Err(e) + } else { + Ok(()) + } + }) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); NakamotoNodeError::SpawnError(e) From bbdfc66eaca677b094d4726ca4719796f14f3c54 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 10:57:39 -0400 Subject: [PATCH 0972/1400] fix: abort signer waiting if the tenure changes, but only after a timeout has passed --- .../src/nakamoto_node/sign_coordinator.rs | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 8104d2ebd2..6c54e50af8 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -15,7 +15,7 @@ use std::collections::BTreeMap; use std::sync::mpsc::Receiver; -use std::time::Duration; +use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; @@ -76,6 +76,7 @@ pub struct SignCoordinator { signer_entries: HashMap, weight_threshold: u32, total_weight: u32, + config: Config, pub next_signer_bitvec: BitVec<4000>, } @@ -305,6 +306,7 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, total_weight, + config: config.clone(), }; return Ok(sign_coordinator); } @@ -326,6 +328,7 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, total_weight, + config: config.clone(), }) } @@ -642,6 +645,19 @@ impl SignCoordinator { false } + /// Check if the tenure needs to change + fn check_burn_tip_changed(sortdb: &SortitionDB, consensus_hash: &ConsensusHash) -> bool { + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if cur_burn_chain_tip.consensus_hash != *consensus_hash { + info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); + true + } else { + false + } + } + /// Start gathering signatures for a Nakamoto block. /// This function begins by sending a `BlockProposal` message /// to the signers, and then waits for the signers to respond @@ -729,6 +745,8 @@ impl SignCoordinator { "threshold" => self.weight_threshold, ); + let mut new_burn_tip_ts = None; + loop { // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold @@ -749,6 +767,18 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } + if new_burn_tip_ts.is_none() { + if Self::check_burn_tip_changed(&sortdb, &burn_tip.consensus_hash) { + new_burn_tip_ts = Some(Instant::now()); + } + } + if let Some(ref new_burn_tip_ts) = new_burn_tip_ts.as_ref() { + if new_burn_tip_ts.elapsed() >= self.config.miner.wait_on_interim_blocks { + debug!("SignCoordinator: Exiting due to new burnchain tip"); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + } + // one of two things can happen: // * we get enough signatures from stackerdb from the signers, OR // * we see our block get processed in our chainstate (meaning, the signers broadcasted From 7f0a1f3640dcc4086ee4365070f8dfde0c24aac0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 11:58:37 -0400 Subject: [PATCH 0973/1400] fix: fix failing follower bootup test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 18e3f6b1f3..a61713bd0f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3483,6 +3483,7 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; + follower_conf.node.miner = false; let mut rng = rand::thread_rng(); let mut buf = [0u8; 8]; From 343dc3162117a847f059675f77ddaa37b2aeaab2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 13:02:58 -0400 Subject: [PATCH 0974/1400] Fix check_proposal_reorg_ok test Signed-off-by: Jacinta Ferrant --- stacks-signer/src/tests/chainstate.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 41f493ed57..d8252a2c20 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -171,7 +171,6 @@ fn reorg_timing_testing( test_name: &str, first_proposal_burn_block_timing_secs: u64, sortition_timing_secs: u64, - check_tip: bool, ) -> Result { let (_stacks_client, mut signer_db, block_pk, mut view, mut block) = setup_test_environment(test_name); @@ -261,19 +260,17 @@ fn reorg_timing_testing( let h = std::thread::spawn(move || { view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1) }); + header_clone.chain_length -= 1; + let response = crate::client::tests::build_get_tenure_tip_response( + &StacksBlockHeaderTypes::Nakamoto(header_clone), + ); + crate::client::tests::write_response(server, response.as_bytes()); + server = crate::client::tests::mock_server_from_config(&config); crate::client::tests::write_response( server, format!("HTTP/1.1 200 Ok\n\n{}", serde_json::json!(expected_result)).as_bytes(), ); - if check_tip { - server = crate::client::tests::mock_server_from_config(&config); - header_clone.chain_length -= 1; - let response = crate::client::tests::build_get_tenure_tip_response( - &StacksBlockHeaderTypes::Nakamoto(header_clone), - ); - crate::client::tests::write_response(server, response.as_bytes()); - } let result = h.join().unwrap(); info!("Result: {result:?}"); @@ -282,13 +279,13 @@ fn reorg_timing_testing( #[test] fn check_proposal_reorg_timing_bad() { - let result = reorg_timing_testing("reorg_timing_bad", 30, 31, false); + let result = reorg_timing_testing("reorg_timing_bad", 30, 31); assert!(!result.unwrap(), "Proposal should not validate, because the reorg occurred in a block whose proposed time was long enough before the sortition"); } #[test] fn check_proposal_reorg_timing_ok() { - let result = reorg_timing_testing("reorg_timing_okay", 30, 30, true); + let result = reorg_timing_testing("reorg_timing_okay", 30, 30); assert!(result.unwrap(), "Proposal should validate okay, because the reorg occurred in a block whose proposed time was close to the sortition"); } From 5833a8f6961ca5766d54859c6c6a4b30fd23045e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 14:46:11 -0400 Subject: [PATCH 0975/1400] add allow_reorg_locally_accepted_block_if_globally_rejected_succeeds integration tests Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + libsigner/src/v0/messages.rs | 14 +- stacks-signer/Cargo.toml | 3 +- stacks-signer/src/v0/signer.rs | 38 +++++- testnet/stacks-node/Cargo.toml | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 148 +++++++++++++++++++++ 6 files changed, 201 insertions(+), 5 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 14bd7ef023..e550b2b857 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -100,6 +100,7 @@ jobs: - tests::signer::v0::signers_broadcast_signed_blocks - tests::signer::v0::min_gap_between_blocks - tests::signer::v0::duplicate_signers + - tests::signer::v0::allow_reorg_locally_accepted_block_if_globally_rejected_succeeds - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index d5f5fe63c4..ae565207a7 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -526,7 +526,9 @@ RejectCodeTypePrefix { /// The block was rejected due to no sortition view NoSortitionView = 3, /// The block was rejected due to a mismatch with expected sortition view - SortitionViewMismatch = 4 + SortitionViewMismatch = 4, + /// The block was rejected due to a testing directive + TestingDirective = 5 }); impl TryFrom for RejectCodeTypePrefix { @@ -546,6 +548,7 @@ impl From<&RejectCode> for RejectCodeTypePrefix { RejectCode::RejectedInPriorRound => RejectCodeTypePrefix::RejectedInPriorRound, RejectCode::NoSortitionView => RejectCodeTypePrefix::NoSortitionView, RejectCode::SortitionViewMismatch => RejectCodeTypePrefix::SortitionViewMismatch, + RejectCode::TestingDirective => RejectCodeTypePrefix::TestingDirective, } } } @@ -563,6 +566,8 @@ pub enum RejectCode { RejectedInPriorRound, /// The block was rejected due to a mismatch with expected sortition view SortitionViewMismatch, + /// The block was rejected due to a testing directive + TestingDirective, } define_u8_enum!( @@ -812,7 +817,8 @@ impl StacksMessageCodec for RejectCode { RejectCode::ConnectivityIssues | RejectCode::RejectedInPriorRound | RejectCode::NoSortitionView - | RejectCode::SortitionViewMismatch => { + | RejectCode::SortitionViewMismatch + | RejectCode::TestingDirective => { // No additional data to serialize / deserialize } }; @@ -835,6 +841,7 @@ impl StacksMessageCodec for RejectCode { RejectCodeTypePrefix::RejectedInPriorRound => RejectCode::RejectedInPriorRound, RejectCodeTypePrefix::NoSortitionView => RejectCode::NoSortitionView, RejectCodeTypePrefix::SortitionViewMismatch => RejectCode::SortitionViewMismatch, + RejectCodeTypePrefix::TestingDirective => RejectCode::TestingDirective, }; Ok(code) } @@ -862,6 +869,9 @@ impl std::fmt::Display for RejectCode { "The block was rejected due to a mismatch with expected sortition view." ) } + RejectCode::TestingDirective => { + write!(f, "The block was rejected due to a testing directive.") + } } } } diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 1d1af6da78..64e3cd5ca9 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -62,4 +62,5 @@ version = "0.24.3" features = ["serde", "recovery"] [features] -monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] \ No newline at end of file +monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] +testing = [] \ No newline at end of file diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index c71a2ff637..c3cf204dd4 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -43,6 +43,12 @@ use crate::runloop::{RunLoopCommand, SignerResult}; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; +#[cfg(any(test, feature = "testing"))] +/// A global variable that can be used to reject all block proposals if the signer's public key is in the provided list +pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< + Option>, +> = std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -324,6 +330,7 @@ impl Signer { ); return; } + // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); @@ -427,9 +434,38 @@ impl Signer { )) }; + #[cfg(any(test, feature = "testing"))] + let block_response = match &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() { + Some(public_keys) => { + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private( + &self.private_key, + ), + ) { + // Do an extra check just so we don't log EVERY time. + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + )) + } else { + None + } + } + None => block_response, + }; + if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation - block_info.valid = Some(false); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); let res = self .stackerdb diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 5128f17f03..19165db0a8 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -45,7 +45,7 @@ reqwest = { version = "0.11", default-features = false, features = ["blocking", clarity = { path = "../../clarity", features = ["default", "testing"]} stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../../stackslib", features = ["default", "testing"] } -stacks-signer = { path = "../../stacks-signer" } +stacks-signer = { path = "../../stacks-signer", features = ["testing"] } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1a5baefef9..3ada35b0df 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -51,6 +51,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; +use stacks_signer::v0::signer::TEST_REJECT_ALL_BLOCK_PROPOSAL; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -2957,3 +2958,150 @@ fn duplicate_signers() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test that signers that accept a block locally, but that was rejected globally will accept a subsequent attempt +/// by the miner to reorg their prior signed block. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by >30% of the signers. +/// The miner then attempts to mine N+1', and all signers accept the block. +/// +/// Test Assertion: +/// All signers sign all blocks successfully. +/// The chain advances 2 full reward cycles. +fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let long_timeout = Duration::from_secs(200); + let short_timeout = Duration::from_secs(20); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + sender_nonce += 1; + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected + let rejecting_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .take(num_signers / 2) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in Tenure A to mine block N+1"); + let start_time = Instant::now(); + let mut rejected_hash = None; + let blocks_before = mined_blocks.load(Ordering::SeqCst); + loop { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + if let Some(rejected_hash) = &rejected_hash { + if rejection.signer_signature_hash != *rejected_hash { + return None; + } + } else { + rejected_hash = Some(rejection.signer_signature_hash); + } + if rejecting_signers.contains(&rejected_pubkey) + && rejection.reason_code == RejectCode::TestingDirective + { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + if block_rejections.len() == rejecting_signers.len() { + break; + } + assert!( + start_time.elapsed() < long_timeout, + "FAIL: Test timed out while waiting for block proposal rejections", + ); + } + + assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); + + info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + let blocks_after = mined_blocks.load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before + 1); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1 + ); +} From 7d6902f5d8f874a5a74a725e740f73e124877f6b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 14:47:21 -0400 Subject: [PATCH 0976/1400] Fix test description Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3ada35b0df..f8bca0da97 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2973,8 +2973,7 @@ fn duplicate_signers() { /// The miner then attempts to mine N+1', and all signers accept the block. /// /// Test Assertion: -/// All signers sign all blocks successfully. -/// The chain advances 2 full reward cycles. +/// Stacks tip advances to N+1' fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; From e570031f1f05115553d2d6ecbcc52d2e732fcecd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 5 Sep 2024 14:50:18 -0400 Subject: [PATCH 0977/1400] test: fix flaky behavior in `miner_forking` --- testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4d6ba5b806..a6f975566f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1641,6 +1641,7 @@ fn miner_forking() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; + let first_proposal_burn_block_timing = 1; let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -1673,7 +1674,8 @@ fn miner_forking() { // we're deliberately stalling proposals: don't punish this in this test! signer_config.block_proposal_timeout = Duration::from_secs(240); // make sure that we don't allow forking due to burn block timing - signer_config.first_proposal_burn_block_timing = Duration::from_secs(1); + signer_config.first_proposal_burn_block_timing = + Duration::from_secs(first_proposal_burn_block_timing); }, |config| { let localhost = "127.0.0.1"; @@ -1804,8 +1806,8 @@ fn miner_forking() { }) .unwrap(); - // sleep for 1 second to prevent the block timing from allowing a fork by the signer set - thread::sleep(Duration::from_secs(1)); + // sleep for 2*first_proposal_burn_block_timing to prevent the block timing from allowing a fork by the signer set + thread::sleep(Duration::from_secs(first_proposal_burn_block_timing * 2)); (sort_tip, true) }; @@ -1885,7 +1887,7 @@ fn miner_forking() { assert_eq!( peer_1_height - pre_nakamoto_peer_1_height, - u64::try_from(nakamoto_blocks_count).unwrap(), + u64::try_from(nakamoto_blocks_count).unwrap() - 1, // subtract 1 for the first Nakamoto block "There should be no forks in this test" ); From dff1fb286c620099f399c8d97ea3a50d048ef26a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 5 Sep 2024 14:58:54 -0400 Subject: [PATCH 0978/1400] test: fix `signer_set_rollover` test --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7c7412a6e4..b931441230 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2790,7 +2790,7 @@ fn signer_set_rollover() { .running_nodes .btc_regtest_controller .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle) + .nakamoto_first_block_of_cycle(next_reward_cycle) .saturating_add(1); info!("---- Mining to next reward set calculation -----"); From baee54e38b68adb9bc9f8f98452b5a968e2db8c7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 15:44:46 -0400 Subject: [PATCH 0979/1400] chore: fix potential deadlock condition by avoiding a transaction when opening the chainstate --- stackslib/src/chainstate/stacks/db/mod.rs | 123 +++++++++++++--------- 1 file changed, 75 insertions(+), 48 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index ed3158c761..23dc79a763 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1038,13 +1038,17 @@ impl StacksChainState { Ok(config.expect("BUG: no db_config installed")) } - fn apply_schema_migrations<'a>( - tx: &DBTx<'a>, + /// Do we need a schema migration? + /// Return Ok(true) if so + /// Return Ok(false) if not + /// Return Err(..) on DB errors, or if this DB is not consistent with `mainnet` or `chain_id` + fn need_schema_migrations( + conn: &Connection, mainnet: bool, - chain_id: u32, - ) -> Result<(), Error> { - let mut db_config = - StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + chain_id: u32 + ) -> Result { + let db_config = + StacksChainState::load_db_config(conn).expect("CORRUPTION: no db_config found"); if db_config.mainnet != mainnet { error!( @@ -1062,55 +1066,68 @@ impl StacksChainState { return Err(Error::InvalidChainstateDB); } - if db_config.version != CHAINSTATE_VERSION { - while db_config.version != CHAINSTATE_VERSION { - match db_config.version.as_str() { - "1" => { - // migrate to 2 - info!("Migrating chainstate schema from version 1 to 2"); - for cmd in CHAINSTATE_SCHEMA_2.iter() { - tx.execute_batch(cmd)?; - } - } - "2" => { - // migrate to 3 - info!("Migrating chainstate schema from version 2 to 3"); - for cmd in CHAINSTATE_SCHEMA_3.iter() { - tx.execute_batch(cmd)?; - } + Ok(db_config.version != CHAINSTATE_VERSION) + } + + fn apply_schema_migrations<'a>( + tx: &DBTx<'a>, + mainnet: bool, + chain_id: u32, + ) -> Result<(), Error> { + if !Self::need_schema_migrations(tx, mainnet, chain_id)? { + return Ok(()); + } + + let mut db_config = + StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + + while db_config.version != CHAINSTATE_VERSION { + match db_config.version.as_str() { + "1" => { + // migrate to 2 + info!("Migrating chainstate schema from version 1 to 2"); + for cmd in CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; } - "3" => { - // migrate to nakamoto 1 - info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { - tx.execute_batch(cmd)?; - } + } + "2" => { + // migrate to 3 + info!("Migrating chainstate schema from version 2 to 3"); + for cmd in CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; } - "4" => { - // migrate to nakamoto 2 - info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { - tx.execute_batch(cmd)?; - } + } + "3" => { + // migrate to nakamoto 1 + info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { + tx.execute_batch(cmd)?; } - "5" => { - // migrate to nakamoto 3 - info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { - tx.execute_batch(cmd)?; - } + } + "4" => { + // migrate to nakamoto 2 + info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; } - _ => { - error!( - "Invalid chain state database: expected version = {}, got {}", - CHAINSTATE_VERSION, db_config.version - ); - return Err(Error::InvalidChainstateDB); + } + "5" => { + // migrate to nakamoto 3 + info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; } } - db_config = - StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + _ => { + error!( + "Invalid chain state database: expected version = {}, got {}", + CHAINSTATE_VERSION, db_config.version + ); + return Err(Error::InvalidChainstateDB); + } } + db_config = + StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); } Ok(()) } @@ -1134,6 +1151,11 @@ impl StacksChainState { StacksChainState::instantiate_db(mainnet, chain_id, index_path, true) } else { let mut marf = StacksChainState::open_index(index_path)?; + if !Self::need_schema_migrations(marf.sqlite_conn(), mainnet, chain_id)? { + return Ok(marf); + } + + // need a migration let tx = marf.storage_tx()?; StacksChainState::apply_schema_migrations(&tx, mainnet, chain_id)?; StacksChainState::add_indexes(&tx)?; @@ -1155,6 +1177,11 @@ impl StacksChainState { StacksChainState::instantiate_db(mainnet, chain_id, index_path, false) } else { let mut marf = StacksChainState::open_index(index_path)?; + + // do we need to apply a schema change? + let db_config = + StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + let tx = marf.storage_tx()?; StacksChainState::add_indexes(&tx)?; tx.commit()?; From db88eb8daa40e989b5b76336701c92b9bb28cef8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 15:46:41 -0400 Subject: [PATCH 0980/1400] chore: fmt --- stackslib/src/chainstate/stacks/db/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 23dc79a763..49ea557652 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1045,7 +1045,7 @@ impl StacksChainState { fn need_schema_migrations( conn: &Connection, mainnet: bool, - chain_id: u32 + chain_id: u32, ) -> Result { let db_config = StacksChainState::load_db_config(conn).expect("CORRUPTION: no db_config found"); @@ -1106,7 +1106,9 @@ impl StacksChainState { } "4" => { // migrate to nakamoto 2 - info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); + info!( + "Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo" + ); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { tx.execute_batch(cmd)?; } From 3fb981fa8985d5c967570f4e07c84cec8c109376 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 5 Sep 2024 16:39:19 -0400 Subject: [PATCH 0981/1400] chore: Remove unnecessary function `slice_partialeq()` --- stacks-common/src/util/mod.rs | 13 ------------- stackslib/src/chainstate/stacks/index/node.rs | 13 +++++-------- stackslib/src/chainstate/stacks/index/proofs.rs | 9 ++++----- 3 files changed, 9 insertions(+), 26 deletions(-) diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 8575fee283..13ab79dcb3 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -85,19 +85,6 @@ impl error::Error for HexError { } } -/// PartialEq helper method for slices of arbitrary length. -pub fn slice_partialeq(s1: &[T], s2: &[T]) -> bool { - if s1.len() != s2.len() { - return false; - } - for i in 0..s1.len() { - if s1[i] != s2[i] { - return false; - } - } - true -} - pub mod db_common { use std::{thread, time}; diff --git a/stackslib/src/chainstate/stacks/index/node.rs b/stackslib/src/chainstate/stacks/index/node.rs index 109dbaa8fc..19e8aa327f 100644 --- a/stackslib/src/chainstate/stacks/index/node.rs +++ b/stackslib/src/chainstate/stacks/index/node.rs @@ -27,7 +27,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; use stacks_common::util::hash::to_hex; -use stacks_common::util::slice_partialeq; use crate::chainstate::stacks::index::bits::{ get_path_byte_len, get_ptrs_byte_len, path_from_bytes, ptrs_from_bytes, write_path_to_bytes, @@ -597,7 +596,7 @@ impl TrieCursor { impl PartialEq for TrieLeaf { fn eq(&self, other: &TrieLeaf) -> bool { - self.path == other.path && slice_partialeq(self.data.as_bytes(), other.data.as_bytes()) + self.path == other.path && self.data.as_bytes() == other.data.as_bytes() } } @@ -730,9 +729,7 @@ impl fmt::Debug for TrieNode48 { impl PartialEq for TrieNode48 { fn eq(&self, other: &TrieNode48) -> bool { - self.path == other.path - && slice_partialeq(&self.ptrs, &other.ptrs) - && slice_partialeq(&self.indexes, &other.indexes) + self.path == other.path && self.ptrs == other.ptrs && self.indexes == other.indexes } } @@ -755,8 +752,8 @@ impl TrieNode48 { } TrieNode48 { path: node16.path.clone(), - indexes: indexes, - ptrs: ptrs, + indexes, + ptrs, } } } @@ -781,7 +778,7 @@ impl fmt::Debug for TrieNode256 { impl PartialEq for TrieNode256 { fn eq(&self, other: &TrieNode256) -> bool { - self.path == other.path && slice_partialeq(&self.ptrs, &other.ptrs) + self.path == other.path && self.ptrs == other.ptrs } } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 9348a8b4f9..815def9c91 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -28,7 +28,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, TrieHash, BLOCK_HEADER_HASH_ENCODED_SIZE, TRIEHASH_ENCODED_SIZE, }; use stacks_common::util::hash::to_hex; -use stacks_common::util::slice_partialeq; use crate::chainstate::stacks::index::bits::{ get_leaf_hash, get_node_hash, read_root_hash, write_path_to_bytes, @@ -118,19 +117,19 @@ impl PartialEq for TrieMerkleProofType { ( TrieMerkleProofType::Node4((ref chr, ref node, ref hashes)), TrieMerkleProofType::Node4((ref other_chr, ref other_node, ref other_hashes)), - ) => chr == other_chr && node == other_node && slice_partialeq(hashes, other_hashes), + ) => chr == other_chr && node == other_node && hashes == other_hashes, ( TrieMerkleProofType::Node16((ref chr, ref node, ref hashes)), TrieMerkleProofType::Node16((ref other_chr, ref other_node, ref other_hashes)), - ) => chr == other_chr && node == other_node && slice_partialeq(hashes, other_hashes), + ) => chr == other_chr && node == other_node && hashes == other_hashes, ( TrieMerkleProofType::Node48((ref chr, ref node, ref hashes)), TrieMerkleProofType::Node48((ref other_chr, ref other_node, ref other_hashes)), - ) => chr == other_chr && node == other_node && slice_partialeq(hashes, other_hashes), + ) => chr == other_chr && node == other_node && hashes == other_hashes, ( TrieMerkleProofType::Node256((ref chr, ref node, ref hashes)), TrieMerkleProofType::Node256((ref other_chr, ref other_node, ref other_hashes)), - ) => chr == other_chr && node == other_node && slice_partialeq(hashes, other_hashes), + ) => chr == other_chr && node == other_node && hashes == other_hashes, ( TrieMerkleProofType::Leaf((ref chr, ref node)), TrieMerkleProofType::Leaf((ref other_chr, ref other_node)), From 7217594ec82c76a425c46271d5cb15bd3b53d3f1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 18:16:09 -0400 Subject: [PATCH 0982/1400] Add locally_rejected_nlocks_overriden_by_global_acceptance test Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 213 ++++++++++++++++++++- 2 files changed, 213 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e550b2b857..2269416940 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -101,6 +101,7 @@ jobs: - tests::signer::v0::min_gap_between_blocks - tests::signer::v0::duplicate_signers - tests::signer::v0::allow_reorg_locally_accepted_block_if_globally_rejected_succeeds + - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f8bca0da97..edd4073f4f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3035,7 +3035,7 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in Tenure A to mine block N+1"); + info!("Submitted tx {tx} to mine block N+1"); let start_time = Instant::now(); let mut rejected_hash = None; let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -3104,3 +3104,214 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { info_before.stacks_tip_height + 1 ); } + +#[test] +#[ignore] +/// Test that signers that reject a block locally, but that was accepted globally will accept +/// a subsequent block built on top of the accepted block +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by <30% of the signers. +/// The miner then attempts to mine N+2, and all signers accept the block. +/// +/// Test Assertion: +/// Stacks tip advances to N+2 +fn locally_rejected_blocks_overriden_by_global_acceptance() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let long_timeout = Duration::from_secs(200); + let short_timeout = Duration::from_secs(30); + signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + sender_nonce += 1; + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block to ensure it is marked globally accepted + let rejecting_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .take(num_signers * 3 / 10) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + let start_time = Instant::now(); + let mut rejected_hash = None; + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + loop { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + if let Some(rejected_hash) = &rejected_hash { + if rejection.signer_signature_hash != *rejected_hash { + return None; + } + } else { + rejected_hash = Some(rejection.signer_signature_hash); + } + if rejecting_signers.contains(&rejected_pubkey) + && rejection.reason_code == RejectCode::TestingDirective + { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + if block_rejections.len() == rejecting_signers.len() { + break; + } + assert!( + start_time.elapsed() < long_timeout, + "FAIL: Test timed out while waiting for block proposal rejections", + ); + } + // Assert the block was mined + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_before + 1, mined_blocks.load(Ordering::SeqCst)); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); + + info!("------------------------- Test Mine Nakamoto Block N+2' -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+2"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + let blocks_after = mined_blocks.load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before + 1); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height, + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers); +} From cfd2c37e71fdb431372c400e3e000b5191dd28b5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 18:22:26 -0400 Subject: [PATCH 0983/1400] Do not store blocks that fail the initial checks Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index c3cf204dd4..597ba19198 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -480,17 +480,18 @@ impl Signer { Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), } } else { - // We don't know if proposal is valid, submit to stacks-node for further checks + // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. + // Do not store invalid blocks as this could DOS the signer. We only store blocks that are valid or unknown. stacks_client .submit_block_for_validation(block_info.block.clone()) .unwrap_or_else(|e| { warn!("{self}: Failed to submit block for validation: {e:?}"); }); - } - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + } } /// Handle block response messages from a signer From 2be05f9a0aa66ad72950c2407b76ae02cf054a23 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 20:40:16 -0400 Subject: [PATCH 0984/1400] Fix broken build from prior db change commit Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/db/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 49ea557652..a942ec7fd1 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1181,8 +1181,8 @@ impl StacksChainState { let mut marf = StacksChainState::open_index(index_path)?; // do we need to apply a schema change? - let db_config = - StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + let db_config = StacksChainState::load_db_config(marf.sqlite_conn()) + .expect("CORRUPTION: no db_config found"); let tx = marf.storage_tx()?; StacksChainState::add_indexes(&tx)?; From 1a1b0764fa5541d4434f32fa8215762bdefaf7cb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 21:46:35 -0400 Subject: [PATCH 0985/1400] chore: remove .mined_blocks and replace it with .last_mined_block, and use the highest tenure block from the chainstate to deduce where we should build (since it's not possible anymore to keep .mined_blocks coherent with the node's chainstate) --- .../stacks-node/src/nakamoto_node/miner.rs | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 6f0f6f88ac..ba32122b6d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -138,8 +138,8 @@ pub struct BlockMinerThread { keychain: Keychain, /// burnchain configuration burnchain: Burnchain, - /// Set of blocks that we have mined - mined_blocks: Vec, + /// Last block mined + last_block_mined: Option, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner @@ -172,7 +172,7 @@ impl BlockMinerThread { globals: rt.globals.clone(), keychain: rt.keychain.clone(), burnchain: rt.burnchain.clone(), - mined_blocks: vec![], + last_block_mined: None, registered_key, burn_election_block, burn_block, @@ -402,7 +402,7 @@ impl BlockMinerThread { // update mined-block counters and mined-tenure counters self.globals.counters.bump_naka_mined_blocks(); - if self.mined_blocks.is_empty() { + if !self.last_block_mined.is_none() { // this is the first block of the tenure, bump tenure counter self.globals.counters.bump_naka_mined_tenures(); } @@ -411,8 +411,7 @@ impl BlockMinerThread { Self::fault_injection_block_announce_stall(&new_block); self.globals.coord().announce_new_stacks_block(); - // store mined block - self.mined_blocks.push(new_block); + self.last_block_mined = Some(new_block); } let Ok(sort_db) = SortitionDB::open( @@ -913,32 +912,42 @@ impl BlockMinerThread { burn_db: &mut SortitionDB, chain_state: &mut StacksChainState, ) -> Result { + // load up stacks chain tip + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { + error!("Failed to load canonical Stacks tip: {:?}", &e); + NakamotoNodeError::ParentNotFound + })?; + + let stacks_tip_block_id = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + let tenure_tip_opt = NakamotoChainState::get_highest_block_header_in_tenure( + &mut chain_state.index_conn(), + &stacks_tip_block_id, + &self.burn_election_block.consensus_hash, + ) + .map_err(|e| { + error!( + "Could not query header info for tenure tip {} off of {}: {:?}", + &self.burn_election_block.consensus_hash, &stacks_tip_block_id, &e + ); + NakamotoNodeError::ParentNotFound + })?; + // The nakamoto miner must always build off of a chain tip that is the highest of: // 1. The highest block in the miner's current tenure // 2. The highest block in the current tenure's parent tenure + // // Where the current tenure's parent tenure is the tenure start block committed to in the current tenure's associated block commit. - let stacks_tip_header = if let Some(block) = self.mined_blocks.last() { - test_debug!( - "Stacks block parent ID is last mined block {}", - &block.block_id() + let stacks_tip_header = if let Some(tenure_tip) = tenure_tip_opt { + debug!( + "Stacks block parent ID is last block in tenure ID {}", + &tenure_tip.consensus_hash ); - let stacks_block_id = block.block_id(); - NakamotoChainState::get_block_header(chain_state.db(), &stacks_block_id) - .map_err(|e| { - error!( - "Could not query header info for last-mined block ID {}: {:?}", - &stacks_block_id, &e - ); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!("No header for parent tenure ID {}", &stacks_block_id); - NakamotoNodeError::ParentNotFound - })? + tenure_tip } else { - // no mined blocks yet - test_debug!( - "Stacks block parent ID is last block in parent tenure ID {}", + // This tenure is empty on the canonical fork, so mine the first tenure block. + debug!( + "Stacks block parent ID is last block in parent tenure tipped by {}", &self.parent_tenure_id ); @@ -957,18 +966,9 @@ impl BlockMinerThread { NakamotoNodeError::ParentNotFound })?; - // NOTE: this is the soon-to-be parent's block ID, since it's the tip we mine on top - // of. We're only interested in performing queries relative to the canonical tip. - let (stacks_tip_ch, stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { - error!("Failed to load canonical Stacks tip: {:?}", &e); - NakamotoNodeError::ParentNotFound - })?; - - let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); let header_opt = NakamotoChainState::get_highest_block_header_in_tenure( &mut chain_state.index_conn(), - &stacks_tip, + &stacks_tip_block_id, &parent_tenure_header.consensus_hash, ) .map_err(|e| { @@ -1004,7 +1004,7 @@ impl BlockMinerThread { } }; - test_debug!( + debug!( "Miner: stacks tip parent header is {} {:?}", &stacks_tip_header.index_block_hash(), &stacks_tip_header @@ -1132,7 +1132,7 @@ impl BlockMinerThread { .make_vrf_proof() .ok_or_else(|| NakamotoNodeError::BadVrfConstruction)?; - if self.mined_blocks.is_empty() && parent_block_info.parent_tenure.is_none() { + if self.last_block_mined.is_none() && parent_block_info.parent_tenure.is_none() { warn!("Miner should be starting a new tenure, but failed to load parent tenure info"); return Err(NakamotoNodeError::ParentNotFound); }; From a19af95d20b368960429570079a4d385f767affc Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 5 Sep 2024 22:45:29 -0400 Subject: [PATCH 0986/1400] Add reorg_locally_accepted_blocks_across_tenures_succeeds integration test Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 3 +- stacks-signer/src/v0/signer.rs | 24 +- stackslib/src/chainstate/stacks/miner.rs | 4 +- testnet/stacks-node/src/event_dispatcher.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 271 +++++++++++++++++++- 5 files changed, 295 insertions(+), 9 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 2269416940..762563871c 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -100,8 +100,9 @@ jobs: - tests::signer::v0::signers_broadcast_signed_blocks - tests::signer::v0::min_gap_between_blocks - tests::signer::v0::duplicate_signers - - tests::signer::v0::allow_reorg_locally_accepted_block_if_globally_rejected_succeeds + - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance + - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 597ba19198..4da514d1d5 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -49,6 +49,12 @@ pub static TEST_REJECT_ALL_BLOCK_PROPOSAL: std::sync::Mutex< Option>, > = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// A global variable that can be used to ignore block proposals if the signer's public key is in the provided list +pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< + Option>, +> = std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -148,6 +154,23 @@ impl SignerTrait for Signer { for message in messages { match message { SignerMessage::BlockProposal(block_proposal) => { + #[cfg(any(test, feature = "testing"))] + if let Some(public_keys) = + &*TEST_IGNORE_ALL_BLOCK_PROPOSALS.lock().unwrap() + { + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private( + &self.private_key, + ), + ) { + warn!("{self}: Ignoring block proposal due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + continue; + } + } self.handle_block_proposal( stacks_client, sortition_state, @@ -442,7 +465,6 @@ impl Signer { &self.private_key, ), ) { - // Do an extra check just so we don't log EVERY time. warn!("{self}: Rejecting block proposal automatically due to testing directive"; "block_id" => %block_proposal.block.block_id(), "height" => block_proposal.block.header.chain_length, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f0e4c96307..0195385d3b 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -321,7 +321,7 @@ pub struct TransactionSuccessEvent { } /// Represents an event for a failed transaction. Something went wrong when processing this transaction. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct TransactionErrorEvent { #[serde(deserialize_with = "hex_deserialize", serialize_with = "hex_serialize")] pub txid: Txid, @@ -378,7 +378,7 @@ pub enum TransactionResult { /// This struct is used to transmit data about transaction results through either the `mined_block` /// or `mined_microblock` event. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum TransactionEvent { /// Transaction has already succeeded. Success(TransactionSuccessEvent), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 34e42501ac..7ad55a994b 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -142,7 +142,7 @@ pub struct MinedMicroblockEvent { pub anchor_block: BlockHeaderHash, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct MinedNakamotoBlockEvent { pub target_burn_height: u64, pub parent_block_id: String, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index edd4073f4f..6a6a0867c9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -51,7 +51,7 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::TEST_REJECT_ALL_BLOCK_PROPOSAL; +use stacks_signer::v0::signer::{TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_REJECT_ALL_BLOCK_PROPOSAL}; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -2962,7 +2962,8 @@ fn duplicate_signers() { #[test] #[ignore] /// Test that signers that accept a block locally, but that was rejected globally will accept a subsequent attempt -/// by the miner to reorg their prior signed block. +/// by the miner essentially reorg their prior locally accepted/signed block, i.e. the globally rejected block overrides +/// their local view. /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. @@ -2974,7 +2975,7 @@ fn duplicate_signers() { /// /// Test Assertion: /// Stacks tip advances to N+1' -fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { +fn locally_accepted_blocks_overriden_by_global_rejection() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -3002,6 +3003,7 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let start_time = Instant::now(); @@ -3019,6 +3021,14 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { thread::sleep(Duration::from_secs(1)); } sender_nonce += 1; + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected @@ -3039,6 +3049,7 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { let start_time = Instant::now(); let mut rejected_hash = None; let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); loop { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events @@ -3081,6 +3092,12 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { } assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!(info_before, info_after); + // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); @@ -3103,6 +3120,22 @@ fn allow_reorg_locally_accepted_block_if_globally_rejected_succeeds() { info_after.stacks_tip_height, info_before.stacks_tip_height + 1 ); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let start_time = Instant::now(); + while test_observer::get_mined_nakamoto_blocks().last().unwrap() == block_n_1 { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); + assert_ne!(block_n_1_prime, block_n_1); } #[test] @@ -3187,6 +3220,11 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers); + // Ensure that the block was accepted globally so the stacks tip has not advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); // Make less than 30% of the signers reject the block to ensure it is marked globally accepted let rejecting_signers: Vec<_> = signer_test @@ -3278,7 +3316,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); - info!("------------------------- Test Mine Nakamoto Block N+2' -------------------------"); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + assert_ne!(block_n_1, block_n); + + info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -3314,4 +3358,223 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .signer_signature .len(); assert_eq!(nmb_signatures, num_signers); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_2 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); + assert_ne!(block_n_2, block_n_1); +} + +#[test] +#[ignore] +/// Test that signers that have accept a locally signed block N+1 built in tenure A can sign a block proposed during a +/// new tenure B built upon the last globally accepted block N, i.e. a reorg can occur at a tenure boundary. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers +/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers accept. +/// +/// Test Assertion: +/// Stacks tip advances to N+1' +fn reorg_locally_accepted_blocks_across_tenures_succeeds() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(30); + signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N in Tenure B -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + sender_nonce += 1; + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let ignoring_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .take(num_signers * 7 / 10) + .collect(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(ignoring_signers.clone()); + // Clear the stackerdb chunks + test_observer::clear(); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let start_time = Instant::now(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut last_hash = None; + loop { + let ignored_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + // Only care about the last proposed block + if let Some(h) = &last_hash { + if h != &hash { + return None; + } + } else { + last_hash = Some(hash); + } + ignoring_signers + .iter() + .find(|key| key.verify(hash.bits(), &signature).is_ok()) + } + _ => None, + } + }) + .collect::>(); + if ignored_signers.len() + ignoring_signers.len() == num_signers { + break; + } + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block proposal acceptance", + ); + sleep_ms(1000); + } + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + + info!( + "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" + ); + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); + assert_ne!(block_n_1_prime, block_n); } From 2cee593ccaa212b74ec7fcb3bf835ad9d3a7e906 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 00:39:46 -0400 Subject: [PATCH 0987/1400] fix: correct logic in `validate_timestamp` and fix integration test --- .../stacks-node/src/nakamoto_node/miner.rs | 10 +- testnet/stacks-node/src/tests/signer/v0.rs | 103 +++++++++--------- 2 files changed, 60 insertions(+), 53 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index cd811a9346..6cb7a0ce65 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,7 +45,6 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; -use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -1068,9 +1067,12 @@ impl BlockMinerThread { ); NakamotoNodeError::ParentNotFound })?; - let current_timestamp = get_epoch_time_secs(); - let time_since_parent_ms = - current_timestamp.saturating_sub(stacks_parent_header.burn_header_timestamp) * 1000; + let current_timestamp = x.header.timestamp; + let parent_timestamp = match stacks_parent_header.anchored_header.as_stacks_nakamoto() { + Some(naka_header) => naka_header.timestamp, + None => stacks_parent_header.burn_header_timestamp, + }; + let time_since_parent_ms = current_timestamp.saturating_sub(parent_timestamp) * 1000; if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; "current_timestamp" => current_timestamp, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a6f975566f..45ab322d01 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2895,68 +2895,73 @@ fn min_gap_between_blocks() { signer_test.boot_to_epoch_3(); - let proposals_before = signer_test + let blocks_before = signer_test .running_nodes - .nakamoto_blocks_proposed + .nakamoto_blocks_mined .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); + info!("Ensure that the first Nakamoto block is mined after the gap is exceeded"); + let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + assert_eq!(blocks.len(), 1); + let first_block = blocks.last().unwrap(); + let blocks = test_observer::get_blocks(); + let parent = blocks + .iter() + .find(|b| b.get("block_height").unwrap() == first_block.stacks_block_height - 1) + .unwrap(); + let first_block_time = first_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + let parent_block_time = parent.get("burn_block_time").unwrap().as_u64().unwrap(); + assert!( + Duration::from_secs(first_block_time - parent_block_time) + >= Duration::from_millis(time_between_blocks_ms), + "First block proposed before gap was exceeded: {}s - {}s > {}ms", + first_block_time, + parent_block_time, + time_between_blocks_ms + ); - // submit a tx so that the miner will mine a block + // Submit a tx so that the miner will mine a block let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - info!("Submitted transfer tx and waiting for block proposal. Ensure it does not arrive before the gap is exceeded"); - let start_time = Instant::now(); - loop { - let blocks_proposed = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - if blocks_proposed > proposals_before { - assert!( - start_time.elapsed().as_millis() >= time_between_blocks_ms.into(), - "Block proposed before gap was exceeded" - ); - break; - } - std::thread::sleep(Duration::from_millis(100)); - } - - debug!("Ensure that the block is mined after the gap is exceeded"); - - let start = Instant::now(); - let duration = 30; - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - loop { - let blocks_mined = signer_test + info!("Submitted transfer tx and waiting for block to be processed. Ensure it does not arrive before the gap is exceeded"); + wait_for(60, || { + let blocks_processed = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); + Ok(blocks_processed > blocks_before) + }) + .unwrap(); - let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before - && info.stacks_tip_height == info_before.stacks_tip_height + 1 - { - break; - } - - debug!( - "blocks_mined: {},{}, stacks_tip_height: {},{}", - blocks_mined, blocks_before, info_before.stacks_tip_height, info.stacks_tip_height - ); - - std::thread::sleep(Duration::from_millis(100)); - assert!( - start.elapsed() < Duration::from_secs(duration), - "Block not mined within timeout" - ); - } + // Verify that the second Nakamoto block is mined after the gap is exceeded + let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + let last_block = blocks.last().unwrap(); + let last_block_time = last_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + let penultimate_block = blocks.get(blocks.len() - 2).unwrap(); + let penultimate_block_time = penultimate_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + assert!( + Duration::from_secs(last_block_time - penultimate_block_time) + >= Duration::from_millis(time_between_blocks_ms), + "Block proposed before gap was exceeded: {}s - {}s > {}ms", + last_block_time, + penultimate_block_time, + time_between_blocks_ms + ); signer_test.shutdown(); } From 025cc0d4e84501589ed90518e7fb7fe8b0020ec9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 01:00:00 -0400 Subject: [PATCH 0988/1400] fix: ignore rejections for other blocks in sign coordinator --- .../stacks-node/src/nakamoto_node/sign_coordinator.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6c54e50af8..beece7f99e 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -919,6 +919,17 @@ impl SignCoordinator { responded_signers.insert(signer_pubkey); } SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let block_sighash = block.header.signer_signature_hash(); + if block_sighash != rejected_data.signer_signature_hash { + warn!( + "Processed rejection for a different block. Will try to continue."; + "block_signer_signature_hash" => %block_sighash, + "rejected_data.signer_signature_hash" => %rejected_data.signer_signature_hash, + "slot_id" => slot_id, + "reward_cycle_id" => reward_cycle_id, + ); + continue; + } let rejected_pubkey = match rejected_data.recover_public_key() { Ok(rejected_pubkey) => { if rejected_pubkey != signer_pubkey { From 557a4ca43eb42132baaf800c5b849604cb93b5e8 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 12:22:23 -0400 Subject: [PATCH 0989/1400] fix: update `/v2/stacker_set/` to `/v3/stacker_set/` Matches latest node release, 2.5.0.0.7. --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index b6337364db..554a8361bd 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -792,7 +792,7 @@ impl StacksClient { } fn reward_set_path(&self, reward_cycle: u64) -> String { - format!("{}/v2/stacker_set/{reward_cycle}", self.http_origin) + format!("{}/v3/stacker_set/{reward_cycle}", self.http_origin) } fn fees_transaction_path(&self) -> String { From 66ee29632c2fff4373b3a3791608f27ad17b754a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 13:07:44 -0400 Subject: [PATCH 0990/1400] fix: `/v2/block_proposal` -> `/v3/block_proposal` --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 554a8361bd..ebc1398071 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -767,7 +767,7 @@ impl StacksClient { } fn block_proposal_path(&self) -> String { - format!("{}/v2/block_proposal", self.http_origin) + format!("{}/v3/block_proposal", self.http_origin) } fn sortition_info_path(&self) -> String { From 1f74c451af5a680b2d9a2be0611c214791fc595f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 6 Sep 2024 13:17:22 -0400 Subject: [PATCH 0991/1400] docs: update changelog --- stacks-signer/CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index dabe0b346a..aa2b87deb7 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,16 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [2.5.0.0.5.3] + +### Added + +### Changed + +- Update node endpoints to match stacks-core release 2.5.0.0.7 + - `/v2/block_proposal` -> `/v3/block_proposal` + - `/v2/stacker_set` -> `/v3/stacker_set` + ## [2.5.0.0.5.2] ### Added From 47c562a9d586a325a28629b5b9ede0203d0be8c5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 22:20:25 -0400 Subject: [PATCH 0992/1400] fix: make a stackerdb shrink if its signer list becomes smaller than that in the DB --- stackslib/src/net/stackerdb/db.rs | 11 +++ stackslib/src/net/stackerdb/mod.rs | 8 ++- stackslib/src/net/stackerdb/tests/db.rs | 96 +++++++++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 1dab3f4052..2b735668ac 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -293,6 +293,15 @@ impl<'a> StackerDBTx<'a> { Ok(()) } + /// Shrink a StackerDB. Remove all slots at and beyond a particular slot ID. + fn shrink_stackerdb(&self, stackerdb_id: i64, first_slot_id: u32) -> Result<(), net_error> { + let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1 AND slot_id >= ?2"; + let args = params![&stackerdb_id, &first_slot_id]; + let mut stmt = self.sql_tx.prepare(&qry)?; + stmt.execute(args)?; + Ok(()) + } + /// Update a database's storage slots, e.g. from new configuration state in its smart contract. /// Chunk data for slots that no longer exist will be dropped. /// Newly-created slots will be instantiated with empty data. @@ -343,6 +352,8 @@ impl<'a> StackerDBTx<'a> { stmt.execute(args)?; } } + debug!("Shrink {} to {} slots", smart_contract, total_slots_read); + self.shrink_stackerdb(stackerdb_id, total_slots_read)?; Ok(()) } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index d310998a19..ea01b1b22a 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -341,8 +341,14 @@ impl StackerDBs { &e ); } - } else if new_config != stackerdb_config && new_config.signers.len() > 0 { + } else if (new_config != stackerdb_config && new_config.signers.len() > 0) + || (new_config == stackerdb_config + && new_config.signers.len() + != self.get_slot_versions(&stackerdb_contract_id)?.len()) + { // only reconfigure if the config has changed + // (that second check on the length is needed in case the node is a victim of + // #5142, which was a bug whereby a stackerdb could never shrink) if let Err(e) = self.reconfigure_stackerdb(&stackerdb_contract_id, &new_config) { warn!( "Failed to create or reconfigure StackerDB {stackerdb_contract_id}: DB error {:?}", diff --git a/stackslib/src/net/stackerdb/tests/db.rs b/stackslib/src/net/stackerdb/tests/db.rs index 7371b6b9c5..9bcf800529 100644 --- a/stackslib/src/net/stackerdb/tests/db.rs +++ b/stackslib/src/net/stackerdb/tests/db.rs @@ -20,6 +20,7 @@ use std::path::Path; use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; use libstackerdb::SlotMetadata; +use rusqlite::params; use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, }; @@ -649,6 +650,16 @@ fn test_reconfigure_stackerdb() { initial_metadata.push((slot_metadata, chunk_data)); } + tx.commit().unwrap(); + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } + + let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let reconfigured_pks = vec![ // first five slots are unchanged @@ -722,6 +733,91 @@ fn test_reconfigure_stackerdb() { assert_eq!(chunk.len(), 0); } } + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), reconfigured_pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } + + // reconfigure with fewer slots + let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let reconfigured_pks = vec![ + // first five slots are unchanged + pks[0], pks[1], pks[2], pks[3], pks[4], + // next five slots are different, so their contents will be dropped and versions and write + // timestamps reset + new_pks[0], new_pks[1], new_pks[2], new_pks[3], + new_pks[4], + // slots 10-15 will disappear + ]; + let reconfigured_addrs: Vec<_> = reconfigured_pks + .iter() + .map(|pk| { + StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&pk)], + ) + .unwrap() + }) + .collect(); + + let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); + + // reconfigure + tx.reconfigure_stackerdb( + &sc, + &reconfigured_addrs + .clone() + .into_iter() + .map(|addr| (addr, 1)) + .collect::>(), + ) + .unwrap(); + + tx.commit().unwrap(); + + for (i, pk) in new_pks.iter().enumerate() { + if i < 5 { + // first five are unchanged + let chunk_data = StackerDBChunkData { + slot_id: i as u32, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![i as u8; 128], + }; + + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap(); + + assert_eq!(initial_metadata[i].0, slot_metadata); + assert_eq!(initial_metadata[i].1.data, chunk); + } else if i < 10 { + // next five are wiped + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata.slot_id, i as u32); + assert_eq!(slot_metadata.slot_version, 0); + assert_eq!(slot_metadata.data_hash, Sha512Trunc256Sum([0x00; 32])); + assert_eq!(slot_metadata.signature, MessageSignature::empty()); + + let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap(); + assert_eq!(chunk.len(), 0); + } else { + // final five are gone + let slot_metadata_opt = db.get_slot_metadata(&sc, i as u32).unwrap(); + assert!(slot_metadata_opt.is_none()); + } + } + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), reconfigured_pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } } // TODO: max chunk size From ed3486c25c06024a9378b2d6c2a8cae09bae5e65 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 6 Sep 2024 14:26:16 -0400 Subject: [PATCH 0993/1400] Add miner_recovers_when_broadcast_block_delay_across_tenures_occurs Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/client/mod.rs | 1 - stacks-signer/src/client/stacks_client.rs | 28 +- stacks-signer/src/config.rs | 5 - stacks-signer/src/runloop.rs | 1 - stacks-signer/src/signerdb.rs | 45 ++- stacks-signer/src/tests/chainstate.rs | 2 +- stacks-signer/src/v0/signer.rs | 82 +++-- testnet/stacks-node/src/tests/signer/v0.rs | 330 +++++++++++++++++++-- 9 files changed, 417 insertions(+), 78 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 762563871c..e38c862552 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -103,6 +103,7 @@ jobs: - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds + - tests::signer::v0::miner_recovers_when_broadcast_block_delay_across_tenures_occurs - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index b32f465b11..5ce8706274 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -571,7 +571,6 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, - broadcast_signed_blocks: true, } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 05c3b0f156..85fa7fd34b 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -706,17 +706,23 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { - let response = self - .stacks_node_client - .post(format!( - "{}{}?broadcast=1", - self.http_origin, - postblock_v3::PATH - )) - .header("Content-Type", "application/octet-stream") - .header(AUTHORIZATION, self.auth_password.clone()) - .body(block.serialize_to_vec()) - .send()?; + let send_request = || { + self.stacks_node_client + .post(format!( + "{}{}?broadcast=1", + self.http_origin, + postblock_v3::PATH + )) + .header("Content-Type", "application/octet-stream") + .header(AUTHORIZATION, self.auth_password.clone()) + .body(block.serialize_to_vec()) + .send() + .map_err(|e| { + debug!("Failed to submit block to the Stacks node: {e:?}"); + backoff::Error::transient(e) + }) + }; + let response = retry_with_exponential_backoff(send_request)?; if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 037e8af773..66cf5a5f7d 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -157,8 +157,6 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, - /// Broadcast a block to the node if we gather enough signatures from other signers - pub broadcast_signed_blocks: bool, } /// The parsed configuration for the signer @@ -203,8 +201,6 @@ pub struct GlobalConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, - /// Broadcast a block to the node if we gather enough signatures from other signers - pub broadcast_signed_blocks: bool, } /// Internal struct for loading up the config file @@ -361,7 +357,6 @@ impl TryFrom for GlobalConfig { metrics_endpoint, first_proposal_burn_block_timing, block_proposal_timeout, - broadcast_signed_blocks: true, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 86d8458e30..cb29221ba9 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -335,7 +335,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, - broadcast_signed_blocks: self.config.broadcast_signed_blocks, })) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index e5a40ff9fb..6f5b6c6e06 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -225,16 +225,20 @@ impl BlockInfo { block_info } - /// Mark this block as locally accepted, valid, signed over, and records a timestamp in the block info if it wasn't + /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. - pub fn mark_locally_accepted(&mut self) -> Result<(), String> { + pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { self.valid = Some(true); self.signed_over = true; - self.signed_self.get_or_insert(get_epoch_time_secs()); + if group_signed { + self.signed_group.get_or_insert(get_epoch_time_secs()); + } else { + self.signed_self.get_or_insert(get_epoch_time_secs()); + } self.move_to(BlockState::LocallyAccepted) } - /// Mark this block as globally accepted, valid, signed over, and records a timestamp in the block info if it wasn't + /// Mark this block as valid, signed over, and records a group timestamp in the block info if it wasn't /// already set. pub fn mark_globally_accepted(&mut self) -> Result<(), String> { self.valid = Some(true); @@ -785,15 +789,20 @@ impl SignerDb { query_rows(&self.db, qry, args) } - /// Mark a block as having been broadcasted + /// Mark a block as having been broadcasted and therefore GloballyAccepted pub fn set_block_broadcasted( &self, reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1 WHERE reward_cycle = ?2 AND signer_signature_hash = ?3"; - let args = params![u64_to_sql(ts)?, u64_to_sql(reward_cycle)?, block_sighash]; + let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2) WHERE reward_cycle = ?3 AND signer_signature_hash = ?4"; + let args = params![ + u64_to_sql(ts)?, + BlockState::GloballyAccepted.to_string(), + u64_to_sql(reward_cycle)?, + block_sighash + ]; debug!("Marking block {} as broadcasted at {}", block_sighash, ts); self.db.execute(qry, args)?; @@ -1015,7 +1024,7 @@ mod tests { .is_none()); block_info - .mark_locally_accepted() + .mark_locally_accepted(false) .expect("Failed to mark block as locally accepted"); db.insert_block(&block_info).unwrap(); @@ -1175,12 +1184,32 @@ mod tests { ) .unwrap() .is_none()); + assert_eq!( + db.block_lookup( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash() + ) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, + BlockState::Unprocessed + ); db.set_block_broadcasted( block_info_1.reward_cycle, &block_info_1.signer_signature_hash(), 12345, ) .unwrap(); + assert_eq!( + db.block_lookup( + block_info_1.reward_cycle, + &block_info_1.signer_signature_hash() + ) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, + BlockState::GloballyAccepted + ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index d8252a2c20..a13ab24a59 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -243,7 +243,7 @@ fn reorg_timing_testing( }; let mut header_clone = block_proposal_1.block.header.clone(); let mut block_info_1 = BlockInfo::from(block_proposal_1); - block_info_1.mark_locally_accepted().unwrap(); + block_info_1.mark_locally_accepted(false).unwrap(); signer_db.insert_block(&block_info_1).unwrap(); let sortition_time = SystemTime::UNIX_EPOCH diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 4da514d1d5..639ace66d2 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -55,6 +55,14 @@ pub static TEST_IGNORE_ALL_BLOCK_PROPOSALS: std::sync::Mutex< Option>, > = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Pause the block broadcast +pub static TEST_PAUSE_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); + +#[cfg(any(test, feature = "testing"))] +/// Skip broadcasting the block to the network +pub static TEST_SKIP_BLOCK_BROADCAST: std::sync::Mutex> = std::sync::Mutex::new(None); + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -78,8 +86,6 @@ pub struct Signer { pub signer_db: SignerDb, /// Configuration for proposal evaluation pub proposal_config: ProposalEvalConfig, - /// Whether or not to broadcast signed blocks if we gather all signatures - pub broadcast_signed_blocks: bool, } impl std::fmt::Display for Signer { @@ -179,13 +185,23 @@ impl SignerTrait for Signer { ); } SignerMessage::BlockPushed(b) => { - let block_push_result = stacks_client.post_block(b); + // This will infinitely loop until the block is acknowledged by the node info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), "signer_sighash" => %b.header.signer_signature_hash(), - "push_result" => ?block_push_result, ); + loop { + match stacks_client.post_block(b) { + Ok(block_push_result) => { + debug!("{self}: Block pushed to stacks node: {block_push_result:?}"); + break; + } + Err(e) => { + warn!("{self}: Failed to push block to stacks node: {e}. Retrying..."); + } + }; + } } SignerMessage::MockProposal(mock_proposal) => { let epoch = match stacks_client.get_node_epoch() { @@ -306,7 +322,6 @@ impl From for Signer { reward_cycle: signer_config.reward_cycle, signer_db, proposal_config, - broadcast_signed_blocks: signer_config.broadcast_signed_blocks, } } } @@ -555,7 +570,7 @@ impl Signer { return None; } }; - if let Err(e) = block_info.mark_locally_accepted() { + if let Err(e) = block_info.mark_locally_accepted(false) { warn!("{self}: Failed to mark block as locally accepted: {e:?}",); return None; } @@ -876,10 +891,11 @@ impl Signer { warn!("{self}: No such block {block_hash}"); return; }; - // move block to globally accepted state. If this is not possible, we have a bug in our block handling logic. - if let Err(e) = block_info.mark_globally_accepted() { + // move block to LOCALLY accepted state. + // We only mark this GLOBALLY accepted if we manage to broadcast it... + if let Err(e) = block_info.mark_locally_accepted(true) { // Do not abort as we should still try to store the block signature threshold - warn!("{self}: Failed to mark block as globally accepted: {e:?}"); + warn!("{self}: Failed to mark block as locally accepted: {e:?}"); } let _ = self.signer_db.insert_block(&block_info).map_err(|e| { warn!( @@ -888,17 +904,24 @@ impl Signer { ); e }); - - if self.broadcast_signed_blocks { - self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); - } else { - debug!( - "{self}: Not broadcasting signed block {block_hash} since broadcast_signed_blocks is false"; - "stacks_block_id" => %block_info.block.block_id(), - "parent_block_id" => %block_info.block.header.parent_block_id, - "burnchain_consensus_hash" => %block_info.block.header.consensus_hash - ); + #[cfg(any(test, feature = "testing"))] + { + if *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block broadcast is stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + while *TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block validation is no longer stalled due to testing directive."; + "block_id" => %block_info.block.block_id(), + "height" => block_info.block.header.chain_length, + ); + } } + self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); } fn broadcast_signed_block( @@ -918,11 +941,30 @@ impl Signer { block.header.signer_signature_hash(); block.header.signer_signature = signatures; + #[cfg(any(test, feature = "testing"))] + { + if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self.signer_db.set_block_broadcasted( + self.reward_cycle, + &block_hash, + get_epoch_time_secs(), + ) { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return; + } + } debug!( "{self}: Broadcasting Stacks block {} to node", &block.block_id() ); - if let Err(e) = stacks_client.post_block(&block) { warn!( "{self}: Failed to post block {block_hash}: {e:?}"; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6a6a0867c9..4c83913fcc 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -51,7 +51,10 @@ use stacks_common::util::sleep_ms; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; -use stacks_signer::v0::signer::{TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_REJECT_ALL_BLOCK_PROPOSAL}; +use stacks_signer::v0::signer::{ + TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, + TEST_SKIP_BLOCK_BROADCAST, +}; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -923,7 +926,7 @@ fn forked_tenure_testing( config.first_proposal_burn_block_timing = proposal_limit; // don't allow signers to post signed blocks (limits the amount of fault injection we // need) - config.broadcast_signed_blocks = false; + TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); }, |_| {}, None, @@ -3042,12 +3045,12 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(rejecting_signers.clone()); + test_observer::clear(); let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} to mine block N+1"); let start_time = Instant::now(); - let mut rejected_hash = None; let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); loop { @@ -3063,13 +3066,6 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let rejected_pubkey = rejection .recover_public_key() .expect("Failed to recover public key from rejection"); - if let Some(rejected_hash) = &rejected_hash { - if rejection.signer_signature_hash != *rejected_hash { - return None; - } - } else { - rejected_hash = Some(rejection.signer_signature_hash); - } if rejecting_signers.contains(&rejected_pubkey) && rejection.reason_code == RejectCode::TestingDirective { @@ -3237,13 +3233,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .lock() .unwrap() .replace(rejecting_signers.clone()); + test_observer::clear(); let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine block N+1"); let start_time = Instant::now(); - let mut rejected_hash = None; let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client @@ -3269,13 +3265,6 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let rejected_pubkey = rejection .recover_public_key() .expect("Failed to recover public key from rejection"); - if let Some(rejected_hash) = &rejected_hash { - if rejection.signer_signature_hash != *rejected_hash { - return None; - } - } else { - rejected_hash = Some(rejection.signer_signature_hash); - } if rejecting_signers.contains(&rejected_pubkey) && rejection.reason_code == RejectCode::TestingDirective { @@ -3405,7 +3394,8 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(30); signer_test.boot_to_epoch_3(); - info!("------------------------- Test Mine Nakamoto Block N in Tenure B -------------------------"); + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test @@ -3465,7 +3455,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let mut last_hash = None; loop { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() @@ -3475,14 +3464,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to deserialize SignerMessage"); match message { SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - // Only care about the last proposed block - if let Some(h) = &last_hash { - if h != &hash { - return None; - } - } else { - last_hash = Some(hash); - } ignoring_signers .iter() .find(|key| key.verify(hash.bits(), &signature).is_ok()) @@ -3513,9 +3494,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { assert_ne!(block_n_1, block_n); assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); - info!( - "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" - ); + info!("------------------------- Starting Tenure B -------------------------"); let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -3527,6 +3506,9 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }, ) .unwrap(); + info!( + "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" + ); TEST_IGNORE_ALL_BLOCK_PROPOSALS .lock() .unwrap() @@ -3578,3 +3560,289 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { ); assert_ne!(block_n_1_prime, block_n); } + +#[test] +#[ignore] +/// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure +/// before it receives these signatures, the miner can recover in the following tenure. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but >70% accept it. +/// The signers delay broadcasting the block and the miner ends its tenure before it receives these signatures. The +/// miner will propose an invalid block N+1' which all signers reject. The broadcast delay is removed and the miner +/// proposes a new block N+2 which all signers accept. +/// +/// Test Assertion: +/// Stacks tip advances to N+2 +fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(30); + signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + sender_nonce += 1; + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Propose a valid block, but force the miner to ignore the returned signatures and delay the block being + // broadcasted to the miner so it can end its tenure before block confirmation obtained + // Clear the stackerdb chunks + info!("Forcing miner to ignore block responses for block N+1"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + info!("Delaying signer block N+1 broadcasting to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); + test_observer::clear(); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let start_time = Instant::now(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let mut block = None; + loop { + if block.is_none() { + block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + } + if let Some(block) = &block { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(( + hash, + signature, + ))) => { + if block.header.signer_signature_hash() == hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + if signatures.len() == num_signers { + break; + } + } + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for signers signatures for first block proposal", + ); + sleep_ms(1000); + } + let block = block.unwrap(); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was not yet broadcasted to the miner so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_same = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_same, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_same.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + // Wait for the miner to propose a new invalid block N+1' + let start_time = Instant::now(); + let mut rejected_block = None; + while rejected_block.is_none() { + rejected_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash != block.header.consensus_hash { + assert!( + proposal.block.header.chain_length == block.header.chain_length + ); + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for N+1' block proposal", + ); + } + + info!("Allowing miner to accept block responses again. "); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + info!("Allowing singers to broadcast block N+1 to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + + // Assert the N+1' block was rejected + let rejected_block = rejected_block.unwrap(); + loop { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + if rejection.signer_signature_hash + == rejected_block.header.signer_signature_hash() + { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + if block_rejections.len() == num_signers { + break; + } + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block proposal rejections", + ); + } + + info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); + while mined_blocks.load(Ordering::SeqCst) <= blocks_before + 2 { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 2, + info_after.stacks_tip_height + ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert_eq!(nmb_signatures, num_signers); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_2 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); + assert_ne!(block_n_2, block_n); +} From 55d2f32e06a0d8b7a7334ac2137c4c4317a3561a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 5 Sep 2024 22:20:25 -0400 Subject: [PATCH 0994/1400] fix: make a stackerdb shrink if its signer list becomes smaller than that in the DB --- stackslib/src/net/stackerdb/db.rs | 11 +++ stackslib/src/net/stackerdb/mod.rs | 8 ++- stackslib/src/net/stackerdb/tests/db.rs | 96 +++++++++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 1dab3f4052..2b735668ac 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -293,6 +293,15 @@ impl<'a> StackerDBTx<'a> { Ok(()) } + /// Shrink a StackerDB. Remove all slots at and beyond a particular slot ID. + fn shrink_stackerdb(&self, stackerdb_id: i64, first_slot_id: u32) -> Result<(), net_error> { + let qry = "DELETE FROM chunks WHERE stackerdb_id = ?1 AND slot_id >= ?2"; + let args = params![&stackerdb_id, &first_slot_id]; + let mut stmt = self.sql_tx.prepare(&qry)?; + stmt.execute(args)?; + Ok(()) + } + /// Update a database's storage slots, e.g. from new configuration state in its smart contract. /// Chunk data for slots that no longer exist will be dropped. /// Newly-created slots will be instantiated with empty data. @@ -343,6 +352,8 @@ impl<'a> StackerDBTx<'a> { stmt.execute(args)?; } } + debug!("Shrink {} to {} slots", smart_contract, total_slots_read); + self.shrink_stackerdb(stackerdb_id, total_slots_read)?; Ok(()) } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index a2de124793..b022746d6a 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -341,8 +341,14 @@ impl StackerDBs { &e ); } - } else if new_config != stackerdb_config && new_config.signers.len() > 0 { + } else if (new_config != stackerdb_config && new_config.signers.len() > 0) + || (new_config == stackerdb_config + && new_config.signers.len() + != self.get_slot_versions(&stackerdb_contract_id)?.len()) + { // only reconfigure if the config has changed + // (that second check on the length is needed in case the node is a victim of + // #5142, which was a bug whereby a stackerdb could never shrink) if let Err(e) = self.reconfigure_stackerdb(&stackerdb_contract_id, &new_config) { warn!( "Failed to create or reconfigure StackerDB {stackerdb_contract_id}: DB error {:?}", diff --git a/stackslib/src/net/stackerdb/tests/db.rs b/stackslib/src/net/stackerdb/tests/db.rs index 7371b6b9c5..9bcf800529 100644 --- a/stackslib/src/net/stackerdb/tests/db.rs +++ b/stackslib/src/net/stackerdb/tests/db.rs @@ -20,6 +20,7 @@ use std::path::Path; use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::ContractName; use libstackerdb::SlotMetadata; +use rusqlite::params; use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, }; @@ -649,6 +650,16 @@ fn test_reconfigure_stackerdb() { initial_metadata.push((slot_metadata, chunk_data)); } + tx.commit().unwrap(); + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } + + let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let reconfigured_pks = vec![ // first five slots are unchanged @@ -722,6 +733,91 @@ fn test_reconfigure_stackerdb() { assert_eq!(chunk.len(), 0); } } + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), reconfigured_pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } + + // reconfigure with fewer slots + let new_pks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); + let reconfigured_pks = vec![ + // first five slots are unchanged + pks[0], pks[1], pks[2], pks[3], pks[4], + // next five slots are different, so their contents will be dropped and versions and write + // timestamps reset + new_pks[0], new_pks[1], new_pks[2], new_pks[3], + new_pks[4], + // slots 10-15 will disappear + ]; + let reconfigured_addrs: Vec<_> = reconfigured_pks + .iter() + .map(|pk| { + StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_MAINNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&pk)], + ) + .unwrap() + }) + .collect(); + + let tx = db.tx_begin(StackerDBConfig::noop()).unwrap(); + + // reconfigure + tx.reconfigure_stackerdb( + &sc, + &reconfigured_addrs + .clone() + .into_iter() + .map(|addr| (addr, 1)) + .collect::>(), + ) + .unwrap(); + + tx.commit().unwrap(); + + for (i, pk) in new_pks.iter().enumerate() { + if i < 5 { + // first five are unchanged + let chunk_data = StackerDBChunkData { + slot_id: i as u32, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![i as u8; 128], + }; + + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap(); + + assert_eq!(initial_metadata[i].0, slot_metadata); + assert_eq!(initial_metadata[i].1.data, chunk); + } else if i < 10 { + // next five are wiped + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata.slot_id, i as u32); + assert_eq!(slot_metadata.slot_version, 0); + assert_eq!(slot_metadata.data_hash, Sha512Trunc256Sum([0x00; 32])); + assert_eq!(slot_metadata.signature, MessageSignature::empty()); + + let chunk = db.get_latest_chunk(&sc, i as u32).unwrap().unwrap(); + assert_eq!(chunk.len(), 0); + } else { + // final five are gone + let slot_metadata_opt = db.get_slot_metadata(&sc, i as u32).unwrap(); + assert!(slot_metadata_opt.is_none()); + } + } + + let db_slot_metadata = db.get_db_slot_metadata(&sc).unwrap(); + assert_eq!(db_slot_metadata.len(), reconfigured_pks.len()); + for (i, slot_md) in db_slot_metadata.iter().enumerate() { + let slot_metadata = db.get_slot_metadata(&sc, i as u32).unwrap().unwrap(); + assert_eq!(slot_metadata, *slot_md); + } } // TODO: max chunk size From bb40d1a141be23bd60e315e18d01e4b883449c8a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 6 Sep 2024 17:30:46 -0400 Subject: [PATCH 0995/1400] fix: get miner_recovers_when_broadcast_block_delay_across_tenures_occurs to pass --- testnet/stacks-node/src/tests/signer/v0.rs | 63 +++++++++++++++++++--- 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4c83913fcc..1c1f4117ed 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3594,7 +3594,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 2; + let nmb_txs = 3; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, @@ -3603,6 +3603,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(30); signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3612,12 +3613,36 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .get_peer_info() .expect("Failed to get peer info"); let start_time = Instant::now(); + + // wait until we get a sortition. + // we might miss a block-commit at the start of epoch 3 + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + loop { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + + sleep_ms(10_000); + + let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + if tip.sortition { + break; + } + } + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); + + // a tenure has begun, so wait until we mine a block while mined_blocks.load(Ordering::SeqCst) <= blocks_before { assert!( start_time.elapsed() < short_timeout, @@ -3649,16 +3674,20 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Delaying signer block N+1 broadcasting to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to attempt to mine block N+1"); - let start_time = Instant::now(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); + + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let start_time = Instant::now(); let mut block = None; loop { if block.is_none() { @@ -3813,8 +3842,27 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { ); } + // Induce block N+2 to get mined + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+2"); + info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before + 2 { + loop { + // N.B. have to use /v2/info because mined_blocks only increments if the miner's signing + // coordinator returns successfully (meaning, mined_blocks won't increment for block N+1) + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + if info_before.stacks_tip_height + 2 <= info.stacks_tip_height { + break; + } + assert!( start_time.elapsed() < short_timeout, "FAIL: Test timed out while waiting for block production", @@ -3826,6 +3874,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); + assert_eq!( info_before.stacks_tip_height + 2, info_after.stacks_tip_height From 17aa1c42c38f3da4c148f9eedd595766ad786433 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 7 Sep 2024 23:16:35 -0400 Subject: [PATCH 0996/1400] chore: a NetworkResult has data if it has an uploaded Nakamoto block --- stackslib/src/net/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index af391e03e8..2c9c9473b4 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1571,7 +1571,9 @@ impl NetworkResult { } pub fn has_nakamoto_blocks(&self) -> bool { - self.nakamoto_blocks.len() > 0 || self.pushed_nakamoto_blocks.len() > 0 + self.nakamoto_blocks.len() > 0 + || self.pushed_nakamoto_blocks.len() > 0 + || uploaded_nakamoto_blocks.len() > 0 } pub fn has_transactions(&self) -> bool { From 835e39bdee9d392564fcd8b81cd67a25fad64493 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 7 Sep 2024 23:38:38 -0400 Subject: [PATCH 0997/1400] fix: compiler error (forgot self.) --- stackslib/src/net/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 2c9c9473b4..7f8dea9329 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1573,7 +1573,7 @@ impl NetworkResult { pub fn has_nakamoto_blocks(&self) -> bool { self.nakamoto_blocks.len() > 0 || self.pushed_nakamoto_blocks.len() > 0 - || uploaded_nakamoto_blocks.len() > 0 + || self.uploaded_nakamoto_blocks.len() > 0 } pub fn has_transactions(&self) -> bool { From 37e27266f8fa8c81554077ea56a99b51a028bdae Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 9 Sep 2024 18:44:12 +0300 Subject: [PATCH 0998/1400] added test for fast blocks when epoch 3 is deployed --- .../src/tests/nakamoto_integrations.rs | 393 +++++++++++++++++- 1 file changed, 391 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4839bee3be..c57b59dce6 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -100,8 +100,8 @@ use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, - get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, - wait_for_runloop, + get_pox_info, next_block_and_wait, next_block_and_wait_with_timeout, + run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, @@ -928,6 +928,158 @@ pub fn boot_to_epoch_3( info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); } +/// +/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order +/// for pox-4 to activate +pub fn boot_to_epoch_3_flash_blocks( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + self_signing: &mut Option<&mut TestSigners>, + btc_regtest_controller: &mut BitcoinRegtestController, +) { + assert_eq!(stacker_sks.len(), signer_sks.len()); + + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let current_height = btc_regtest_controller.get_headers_height(); + info!( + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; + "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), + ); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + // first mined stacks block + next_block_and_wait(btc_regtest_controller, &blocks_processed); + + let start_time = Instant::now(); + loop { + if start_time.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for the stacks height to increment") + } + let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + if stacks_height >= 1 { + break; + } + thread::sleep(Duration::from_millis(100)); + } + // stack enough to activate pox-4 + + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + + for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &signer_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 12_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = StacksPublicKey::from_private(signer_sk); + + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(block_height as u128), + clarity::vm::Value::UInt(12), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + + // Update TestSigner with `signer_sks` if self-signing + if let Some(ref mut signers) = self_signing { + signers.signer_keys = signer_sks.to_vec(); + } + + let prepare_phase_start = btc_regtest_controller + .get_burnchain() + .pox_constants + .prepare_phase_start( + btc_regtest_controller.get_burnchain().first_block_height, + reward_cycle, + ); + + // Run until the prepare phase + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + prepare_phase_start, + &naka_conf, + ); + + // We need to vote on the aggregate public key if this test is self signing + if let Some(signers) = self_signing { + // Get the aggregate key + let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); + let aggregate_public_key = + clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) + .expect("Failed to serialize aggregate public key"); + let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); + let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); + // Vote on the aggregate public key + for signer_sk in signer_sks_unique.values() { + let signer_index = + get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) + .unwrap(); + let voting_tx = tests::make_contract_call( + signer_sk, + 0, + 300, + &StacksAddress::burn_address(false), + SIGNERS_VOTING_NAME, + SIGNERS_VOTING_FUNCTION_NAME, + &[ + clarity::vm::Value::UInt(u128::try_from(signer_index).unwrap()), + aggregate_public_key.clone(), + clarity::vm::Value::UInt(0), + clarity::vm::Value::UInt(reward_cycle as u128 + 1), + ], + ); + submit_tx(&http_origin, &voting_tx); + } + } + + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_3.start_height - 2, + &naka_conf, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); +} + fn get_signer_index( stacker_set: &GetStackersResponse, signer_key: &Secp256k1PublicKey, @@ -1519,6 +1671,243 @@ fn simple_neon_integration() { run_loop_thread.join().unwrap(); } +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, +/// having flash blocks when epoch updates and expects everything to work normally, +/// then switches to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * The final chain tip is a nakamoto block +fn simple_neon_integration_with_flash_blocks_on_epoch_3() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt * 2 + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3_flash_blocks( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + // mine 3 blocks which should be the ones for setting up the miner + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + // query for prometheus metrics + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + assert!(res.contains(&expected_result)); + } + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine 15 nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + + // Mine 15 more nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + // assert that the transfer tx was observed + let transfer_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + + // Check that we aren't missing burn blocks + let bhh = u64::from(tip.burn_header_height); + test_observer::contains_burn_block_range(220..=bhh).unwrap(); + + // make sure prometheus returns an updated height + #[cfg(feature = "monitoring_prom")] + { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); + assert!(res.contains(&expected_result)); + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. From 95196236927a7182fe7159ce32a3d072792029e4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:40:51 -0400 Subject: [PATCH 0999/1400] chore: react early to /shutdown --- libsigner/src/events.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 6dbc10110a..4fb6d7a507 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -314,6 +314,9 @@ impl EventReceiver for SignerEventReceiver { process_proposal_response(request) } else if request.url() == "/new_burn_block" { process_new_burn_block_event(request) + } else if request.url() == "/shutdown" { + event_receiver.stop_signal.store(true, Ordering::SeqCst); + return Err(EventError::Terminated); } else { let url = request.url().to_string(); // `/new_block` is expected, but not specifically handled. do not log. From 6788f487f11eeace4e1ecd86e7ea3af008e48e83 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:41:05 -0400 Subject: [PATCH 1000/1400] chore: move doc warnings to top of docstring --- stackslib/src/chainstate/nakamoto/mod.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 78baa2578b..cb515a860c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2558,8 +2558,10 @@ impl NakamotoChainState { Ok(result.is_some()) } + /// DO NOT CALL IN CONSENSUS CODE, such as during Stacks block processing + /// (including during Clarity VM evaluation). This function returns the latest data + /// known to the node, which may not have been at the time of original block assembly. /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) - /// DO NOT CALL during Stacks block processing (including during Clarity VM evaluation). This function returns the latest data known to the node, which may not have been at the time of original block assembly. pub fn get_canonical_block_header( chainstate_conn: &Connection, sortdb: &SortitionDB, @@ -2631,14 +2633,14 @@ impl NakamotoChainState { Self::get_block_header_nakamoto(chainstate_conn.sqlite(), &block_id) } + /// DO NOT USE IN CONSENSUS CODE. Different nodes can have different blocks for the same + /// tenure. + /// /// Get the highest block in a given tenure (identified by its consensus hash). /// Ties will be broken by timestamp. /// /// Used to verify that a signer-submitted block proposal builds atop the highest known block /// in the given tenure, regardless of which fork it's on. - /// - /// DO NOT USE IN CONSENSUS CODE. Different nodes can have different blocks for the same - /// tenure. pub fn get_highest_known_block_header_in_tenure( db: &Connection, consensus_hash: &ConsensusHash, @@ -4258,10 +4260,10 @@ impl NakamotoChainState { Ok(Some(slot_id_range)) } + /// DO NOT USE IN MAINNET /// Boot code instantiation for the aggregate public key. /// TODO: This should be removed once it's possible for stackers to vote on the aggregate /// public key - /// DO NOT USE IN MAINNET pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: &Point) { let agg_pub_key = to_hex(&apk.compress().data); let contract_content = format!( From f98422c44e58135a0c99c9d7be99fc77edd5eb5c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:41:17 -0400 Subject: [PATCH 1001/1400] chore: copyright refinement --- stackslib/src/net/api/gettenuretip.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stackslib/src/net/api/gettenuretip.rs b/stackslib/src/net/api/gettenuretip.rs index 188fe0dc51..5bed2a6cc2 100644 --- a/stackslib/src/net/api/gettenuretip.rs +++ b/stackslib/src/net/api/gettenuretip.rs @@ -1,5 +1,4 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// Copyright (C) 2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by From 1820319c480363eba4480b4d537868334c53cee9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:41:32 -0400 Subject: [PATCH 1002/1400] feat: plumb through should_keep_running so the signcoordinator will exit on ctrl+c --- .../stacks-node/src/nakamoto_node/miner.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ba32122b6d..3fa5ff9333 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -544,14 +544,17 @@ impl BlockMinerThread { return Ok((reward_set, Vec::new())); } - let mut coordinator = - SignCoordinator::new(&reward_set, miner_privkey_as_scalar, &self.config).map_err( - |e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - }, - )?; + let mut coordinator = SignCoordinator::new( + &reward_set, + miner_privkey_as_scalar, + &self.config, + self.globals.should_keep_running.clone(), + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + })?; let mut chain_state = neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { From e5af00151347ce59a5726ee2aaad31549e0d191f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:41:53 -0400 Subject: [PATCH 1003/1400] feat: react to globals.should_keep_running becoming true, so the node will cleanly shut down if it's in the middle of mining a block --- .../src/nakamoto_node/sign_coordinator.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index beece7f99e..5e7fe24e0e 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -14,7 +14,9 @@ // along with this program. If not, see . use std::collections::BTreeMap; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Receiver; +use std::sync::Arc; use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; @@ -77,6 +79,7 @@ pub struct SignCoordinator { weight_threshold: u32, total_weight: u32, config: Config, + keep_running: Arc, pub next_signer_bitvec: BitVec<4000>, } @@ -209,6 +212,7 @@ impl SignCoordinator { reward_set: &RewardSet, message_key: Scalar, config: &Config, + keep_running: Arc, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { @@ -307,6 +311,7 @@ impl SignCoordinator { weight_threshold: threshold, total_weight, config: config.clone(), + keep_running, }; return Ok(sign_coordinator); } @@ -329,6 +334,7 @@ impl SignCoordinator { weight_threshold: threshold, total_weight, config: config.clone(), + keep_running, }) } @@ -795,6 +801,12 @@ impl SignCoordinator { } }; + // was the node asked to stop? + if !self.keep_running.load(Ordering::SeqCst) { + info!("SignerCoordinator: received node exit request. Aborting"); + return Err(NakamotoNodeError::ChannelClosed); + } + // check to see if this event we got is a signer event let is_signer_event = event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); From 648aa5bd9bbd71d90161a3a1c8c3d8a2f6fbf5d1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:42:18 -0400 Subject: [PATCH 1004/1400] feat: integration test for retrying a block if signers reject it --- testnet/stacks-node/src/tests/signer/v0.rs | 137 +++++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1c1f4117ed..89d7781192 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2059,6 +2059,143 @@ fn end_of_tenure() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test checks that the miner will retry when enough signers reject the block. +fn retry_on_rejection() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let short_timeout = Duration::from_secs(30); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * 3)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // wait until we get a sortition. + // we might miss a block-commit at the start of epoch 3 + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + loop { + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + + sleep_ms(10_000); + + let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + if tip.sortition { + break; + } + } + + // mine a nakamoto block + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let start_time = Instant::now(); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine the first Nakamoto block"); + + // a tenure has begun, so wait until we mine a block + while mined_blocks.load(Ordering::SeqCst) <= blocks_before { + assert!( + start_time.elapsed() < short_timeout, + "FAIL: Test timed out while waiting for block production", + ); + thread::sleep(Duration::from_secs(1)); + } + + // make all signers reject the block + let rejecting_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .take(num_signers) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + loop { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + if blocks_proposed > proposals_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + info!("Block proposed, verifying that it is not processed"); + // Wait 10 seconds to be sure that the timeout has occurred + std::thread::sleep(Duration::from_secs(10)); + assert_eq!( + signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst), + blocks_before + ); + + // resume signing + info!("Disable unconditional rejection and wait for the block to be processed"); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(vec![]); + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + if blocks_mined > blocks_before { + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks that the signers will broadcast a block once they receive enough signatures. From d2bc6a1a8cd0f01d9d3fc34e86b25e907208a14a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 9 Sep 2024 17:42:52 -0400 Subject: [PATCH 1005/1400] chore: add retry_on_rejection integration test --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e38c862552..6c3aca0e14 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -100,6 +100,7 @@ jobs: - tests::signer::v0::signers_broadcast_signed_blocks - tests::signer::v0::min_gap_between_blocks - tests::signer::v0::duplicate_signers + - tests::signer::v0::retry_on_rejection - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds From 46dd56879c047cf6f799a8870ab6f8d1a5ca0dec Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 9 Sep 2024 14:50:59 -0700 Subject: [PATCH 1006/1400] update hardcoded v2 references to v3 --- docs/rpc-endpoints.md | 2 +- docs/rpc/openapi.yaml | 4 ++-- stackslib/src/net/api/getstackers.rs | 6 +++--- stackslib/src/net/api/postblock_proposal.rs | 6 +++--- stackslib/src/net/api/poststackerdbchunk.rs | 2 +- stackslib/src/net/api/tests/postblock_proposal.rs | 8 ++++---- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 +++--- testnet/stacks-node/src/tests/neon_integrations.rs | 2 +- 8 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6163f27b75..eea916a781 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -427,7 +427,7 @@ Determine whether a given trait is implemented within the specified contract (ei See OpenAPI [spec](./rpc/openapi.yaml) for details. -### POST /v2/block_proposal +### POST /v3/block_proposal Used by miner to validate a proposed Stacks block using JSON encoding. diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index f33e0dca73..d12e800c32 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -568,7 +568,7 @@ paths: description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest known tip (includes unconfirmed state). - /v2/block_proposal: + /v3/block_proposal: post: summary: Validate a proposed Stacks block tags: @@ -600,7 +600,7 @@ paths: example: $ref: ./api/core-node/post-block-proposal-req.example.json - /v2/stacker_set/{cycle_number}: + /v3/stacker_set/{cycle_number}: get: summary: Fetch the stacker and signer set information for a given cycle. tags: diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 4546b66fc9..c6291096d8 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -96,11 +96,11 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v2/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/stacker_set/:cycle_num" + "/v3/stacker_set/:cycle_num" } /// Try to decode this request. @@ -211,7 +211,7 @@ impl StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v2/stacker_set/{cycle_num}"), + format!("/v3/stacker_set/{cycle_num}"), HttpRequestContents::new().for_tip(tip_req), ) .expect("FATAL: failed to construct request from infallible data") diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6c1d5526b5..043c316565 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -163,7 +163,7 @@ impl From> for BlockValidateRespons } } -/// Represents a block proposed to the `v2/block_proposal` endpoint for validation +/// Represents a block proposed to the `v3/block_proposal` endpoint for validation #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct NakamotoBlockProposal { /// Proposed block @@ -431,11 +431,11 @@ impl HttpRequest for RPCBlockProposalRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v2/block_proposal$"#).unwrap() + Regex::new(r#"^/v3/block_proposal$"#).unwrap() } fn metrics_identifier(&self) -> &str { - "/v2/block_proposal" + "/v3/block_proposal" } /// Try to decode this request. diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs index b3c9420602..388f35ccee 100644 --- a/stackslib/src/net/api/poststackerdbchunk.rs +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -83,7 +83,7 @@ impl HttpRequest for RPCPostStackerDBChunkRequestHandler { } fn metrics_identifier(&self) -> &str { - "/v2/block_proposal/:principal/:contract_name/chunks" + "/v3/block_proposal/:principal/:contract_name/chunks" } /// Try to decode this request. diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 6ab465a683..391afc949f 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -69,7 +69,7 @@ fn test_try_parse_request() { let mut request = StacksHttpRequest::new_for_peer( addr.into(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -320,7 +320,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -340,7 +340,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); @@ -360,7 +360,7 @@ fn test_try_make_response() { let mut request = StacksHttpRequest::new_for_peer( rpc_test.peer_1.to_peer_host(), "POST".into(), - "/v2/block_proposal".into(), + "/v3/block_proposal".into(), HttpRequestContents::new().payload_json(serde_json::to_value(proposal).unwrap()), ) .expect("failed to construct request"); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 13de8a350c..56e8b513bc 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -231,7 +231,7 @@ impl TestSigningChannel { pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); - let path = format!("{http_origin}/v2/stacker_set/{cycle}"); + let path = format!("{http_origin}/v3/stacker_set/{cycle}"); let res = client .get(&path) .send() @@ -2234,7 +2234,7 @@ fn correct_burn_outs() { run_loop_thread.join().unwrap(); } -/// Test `/v2/block_proposal` API endpoint +/// Test `/v3/block_proposal` API endpoint /// /// This endpoint allows miners to propose Nakamoto blocks to a node, /// and test if they would be accepted or rejected @@ -2471,7 +2471,7 @@ fn block_proposal_api_endpoint() { .expect("Failed to build `reqwest::Client`"); // Build URL let http_origin = format!("http://{}", &conf.node.rpc_bind); - let path = format!("{http_origin}/v2/block_proposal"); + let path = format!("{http_origin}/v3/block_proposal"); let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap()); for (ix, (test_description, block_proposal, expected_http_code, _)) in diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ac6a3ea978..f4340df347 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1394,7 +1394,7 @@ pub fn get_contract_src( pub fn get_stacker_set(http_origin: &str, reward_cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/stacker_set/{}", http_origin, reward_cycle); + let path = format!("{}/v3/stacker_set/{}", http_origin, reward_cycle); let res = client.get(&path).send().unwrap(); info!("Got stacker_set response {:?}", &res); From 109d3cb5986ad065825082012932872463899f0d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:30:54 -0700 Subject: [PATCH 1007/1400] fix: unused imports Added a cfg to warn about unused imports in `net/mod.rs`, which exposed a few unused imports that I've removed. I've also updated other parts of the code that had imports that were only used for testing. In those cases, I've updated the import to only be used in `cfg(test)`. --- contrib/tools/relay-server/src/http.rs | 2 ++ stacks-common/Cargo.toml | 1 + stackslib/src/net/connection.rs | 2 -- stackslib/src/net/mod.rs | 10 ++++++---- .../src/burnchains/bitcoin_regtest_controller.rs | 7 ++++--- .../stacks-node/src/nakamoto_node/sign_coordinator.rs | 7 ++++++- 6 files changed, 19 insertions(+), 10 deletions(-) diff --git a/contrib/tools/relay-server/src/http.rs b/contrib/tools/relay-server/src/http.rs index c84f833bee..f7bd1e6f89 100644 --- a/contrib/tools/relay-server/src/http.rs +++ b/contrib/tools/relay-server/src/http.rs @@ -8,7 +8,9 @@ use crate::to_io_result::ToIoResult; pub struct Request { pub method: String, pub url: String, + #[allow(dead_code)] pub protocol: String, + #[allow(dead_code)] pub headers: HashMap, pub content: Vec, } diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index d5bfeb44e9..75692d83c6 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -78,6 +78,7 @@ testing = ["canonical"] serde = [] bech32_std = [] bech32_strict = [] +strason = [] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 78c15e0833..878be15d60 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -25,8 +25,6 @@ use std::{io, net}; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::BOUND_VALUE_SERIALIZATION_HEX; -use mio; -use mio::net as mio_net; use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; use stacks_common::types::net::PeerAddress; use stacks_common::util::hash::to_hex; diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3ba4292f1c..865fcd89ba 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -14,8 +14,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::borrow::Borrow; -use std::collections::{HashMap, HashSet}; +#[warn(unused_imports)] +use std::collections::HashMap; +#[cfg(any(test, feature = "testing"))] +use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::io::prelude::*; use std::io::{Read, Write}; @@ -110,7 +112,7 @@ pub mod atlas; /// Other functionality includes (but is not limited to): /// * set up & tear down of sessions /// * dealing with and responding to invalid messages -/// * rate limiting messages +/// * rate limiting messages pub mod chat; /// Implements serialization and deserialization for `StacksMessage` types. /// Also has functionality to sign, verify, and ensure well-formedness of messages. @@ -118,7 +120,7 @@ pub mod codec; pub mod connection; pub mod db; /// Implements `DNSResolver`, a simple DNS resolver state machine. Also implements `DNSClient`, -/// which serves as an API for `DNSResolver`. +/// which serves as an API for `DNSResolver`. pub mod dns; pub mod download; pub mod http; diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 145e73a389..12210d230e 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -74,10 +74,11 @@ use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; use super::{BurnchainController, BurnchainTip, Error as BurnchainControllerError}; +use crate::config::BurnchainConfig; +#[cfg(test)] use crate::config::{ - BurnchainConfig, OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, - OP_TX_PRE_STACKS_ESTIM_SIZE, OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, - OP_TX_VOTE_AGG_ESTIM_SIZE, + OP_TX_ANY_ESTIM_SIZE, OP_TX_DELEGATE_STACKS_ESTIM_SIZE, OP_TX_PRE_STACKS_ESTIM_SIZE, + OP_TX_STACK_STX_ESTIM_SIZE, OP_TX_TRANSFER_STACKS_ESTIM_SIZE, OP_TX_VOTE_AGG_ESTIM_SIZE, }; /// The number of bitcoin blocks that can have diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6810afbb6b..af73ae26db 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -28,7 +28,9 @@ use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoC use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; +use stacks::chainstate::stacks::Error as ChainstateError; +#[cfg(any(test, feature = "testing"))] +use stacks::chainstate::stacks::ThresholdSignature; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; use stacks::types::PublicKey; @@ -39,6 +41,7 @@ use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; use wsts::common::PolyCommitment; +#[cfg(any(test, feature = "testing"))] use wsts::curve::ecdsa; use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; @@ -70,6 +73,7 @@ pub struct SignCoordinator { coordinator: FireCoordinator, receiver: Option>, message_key: Scalar, + #[cfg(any(test, feature = "testing"))] wsts_public_keys: PublicKeys, is_mainnet: bool, miners_session: StackerDBSession, @@ -321,6 +325,7 @@ impl SignCoordinator { coordinator, message_key, receiver: Some(receiver), + #[cfg(any(test, feature = "testing"))] wsts_public_keys, is_mainnet, miners_session, From 2dce84a9f3df144a3659c43e14a1bb9e9c99a735 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:33:48 -0700 Subject: [PATCH 1008/1400] fix: only update `schema_version` if lower Previously, because `apply_schema_8` is called after things like `apply_schema_9`, this would override the schema version to be 8. Also removed some trailing whitespace. --- stackslib/src/chainstate/burn/db/sortdb.rs | 29 ++++++++++++++-------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3cf13a8a55..90cf60ace1 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -574,7 +574,7 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ block_height INTEGER NOT NULL, burn_header_hash TEXT NOT NULL, sortition_id TEXT NOT NULL, - + consensus_hash TEXT NOT NULL, public_key TEXT NOT NULL, memo TEXT, @@ -619,7 +619,7 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ stacked_ustx TEXT NOT NULL, num_cycles INTEGER NOT NULL, - -- The primary key here is (txid, burn_header_hash) because + -- The primary key here is (txid, burn_header_hash) because -- this transaction will be accepted regardless of which sortition -- history it is in. PRIMARY KEY(txid,burn_header_hash) @@ -636,7 +636,7 @@ const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ transfered_ustx TEXT NOT NULL, memo TEXT NOT NULL, - -- The primary key here is (txid, burn_header_hash) because + -- The primary key here is (txid, burn_header_hash) because -- this transaction will be accepted regardless of which sortition -- history it is in. PRIMARY KEY(txid,burn_header_hash) @@ -2261,7 +2261,7 @@ impl<'a> SortitionHandleConn<'a> { /// Get a block commit by txid. In the event of a burnchain fork, this may not be unique. /// this function simply returns one of those block commits: only use data that is - /// immutable across burnchain/pox forks, e.g., parent block ptr, + /// immutable across burnchain/pox forks, e.g., parent block ptr, pub fn get_block_commit_by_txid( &self, sort_id: &SortitionId, @@ -3352,6 +3352,11 @@ impl SortitionDB { ) -> Result<(), db_error> { let canonical_tip = SortitionDB::get_canonical_burn_chain_tip(self.conn())?; + let schema_version = SortitionDB::get_schema_version(self.conn())? + .unwrap_or("0".to_string()) + .parse::() + .unwrap_or(0); + // port over `stacks_chain_tips` table info!("Instantiating `stacks_chain_tips` table..."); self.apply_schema_8_stacks_chain_tips(&canonical_tip)?; @@ -3365,12 +3370,14 @@ impl SortitionDB { info!("No migrator implementation given; `preprocessed_reward_sets` will not be prepopulated"); } - let tx = self.tx_begin()?; - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["8"], - )?; - tx.commit()?; + if schema_version < 8 { + let tx = self.tx_begin()?; + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["8"], + )?; + tx.commit()?; + } Ok(()) } @@ -5017,7 +5024,7 @@ impl SortitionDB { conn: &Connection, sortition: &SortitionId, ) -> Result, db_error> { - let qry = "SELECT vtxindex FROM block_commits WHERE sortition_id = ?1 + let qry = "SELECT vtxindex FROM block_commits WHERE sortition_id = ?1 AND txid = ( SELECT winning_block_txid FROM snapshots WHERE sortition_id = ?2 LIMIT 1) LIMIT 1"; let args = params![sortition, sortition]; From 20e0a042fc2cc9d92a9da7aa2aea7555533b713b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:35:11 -0700 Subject: [PATCH 1009/1400] fix: update log when validating pox treatment I've updated the log line to be a `warn`. More investigation needed on whether we should add explicit validation here to the length of the BitVec. --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 28ba89d59d..ee6ac12aee 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3573,7 +3573,7 @@ impl NakamotoChainState { .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; let bitvec_value = block_bitvec.get(ix) .unwrap_or_else(|| { - info!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); + warn!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); true }); Ok(bitvec_value) From 32cd617f6abeffd621ff0f5e53590b62608de95d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:37:45 -0700 Subject: [PATCH 1010/1400] fix: use consistent error handling when writing to SignerDB Previously, there were some cases where we'd `panic`, and some where we'd just handle the error with a log. This updates the logic to always panic. --- stacks-signer/src/v0/signer.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53a288b7f5..828996c560 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -194,12 +194,13 @@ impl SignerTrait for Signer { self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) { - warn!( + error!( "Failed to write burn block event to signerdb"; "err" => ?e, "burn_header_hash" => %burn_header_hash, "burn_height" => burn_height ); + panic!("{self} Failed to write burn block event to signerdb: {e}"); } *sortition_state = None; } @@ -679,13 +680,13 @@ impl Signer { // record time at which we reached the threshold block_info.signed_group = Some(get_epoch_time_secs()); - let _ = self.signer_db.insert_block(&block_info).map_err(|e| { - warn!( + if let Err(e) = self.signer_db.insert_block(&block_info) { + error!( "Failed to set group threshold signature timestamp for {}: {:?}", block_hash, &e ); - e - }); + panic!("{self} Failed to write block to signerdb: {e}"); + }; // collect signatures for the block let signatures: Vec<_> = self From 84c9ed4c69f98e201e32694cb7f493ecd1d6e742 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:39:04 -0700 Subject: [PATCH 1011/1400] fix: typo in `NAKAMOTO_TENURES_SCHEMA_1` --- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 81380cc93d..059da96b7a 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -132,7 +132,7 @@ pub static NAKAMOTO_TENURES_SCHEMA_1: &'static str = r#" burn_view_consensus_hash TEXT NOT NULL, -- whether or not this tenure was triggered by a sortition (as opposed to a tenure-extension). -- this is equal to the `cause` field in a TenureChange - cause INETGER NOT NULL, + cause INTEGER NOT NULL, -- block hash of start-tenure block block_hash TEXT NOT NULL, -- block ID of this start block (this is the StacksBlockId of the above tenure_id_consensus_hash and block_hash) From 48195afa44a39870d9b9f65f793b8f815f3f5894 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:39:58 -0700 Subject: [PATCH 1012/1400] Fix: allow block timestamps exactly 15 seconds in the future Previously, we'd only allow timestamps _less than_ 15 seconds away, but our docs state that the timestamp can be _no more than_ 15 seconds away. --- stackslib/src/net/api/postblock_proposal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 6112ea0fae..19b556604f 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -283,7 +283,7 @@ impl NakamotoBlockProposal { }); } } - if self.block.header.timestamp > get_epoch_time_secs() + 15 { + if self.block.header.timestamp >= get_epoch_time_secs() + 15 { warn!( "Rejected block proposal"; "reason" => "Block timestamp is too far into the future", From 944184900d5fda18478618830e7e5258a0a47bae Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:41:02 -0700 Subject: [PATCH 1013/1400] fix: update processed_time when replacing block --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 931a00777b..1f49bccaf3 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -593,8 +593,15 @@ impl<'a> NakamotoStagingBlocksTx<'a> { signing_weight: u32, obtain_method: NakamotoBlockObtainMethod, ) -> Result<(), ChainstateError> { - self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3 WHERE consensus_hash = ?4 AND block_hash = ?5", - params![&block.serialize_to_vec(), &signing_weight, &obtain_method.to_string(), &block.header.consensus_hash, &block.header.block_hash()])?; + self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3, processed_time = ?4 WHERE consensus_hash = ?5 AND block_hash = ?6", + params![ + &block.serialize_to_vec(), + &signing_weight, + &obtain_method.to_string(), + u64_to_sql(get_epoch_time_secs())?, + &block.header.consensus_hash, + &block.header.block_hash(), + ])?; Ok(()) } } From 61be12e4c38b899548b19a6c1e119c1babb46a77 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:42:32 -0700 Subject: [PATCH 1014/1400] fix: return correct highest schema version In `get_nakamoto_staging_blocks_db_version`, in the case of an error, the function would default to version 1, which could cause an overwrite of tables. This updates the function to return the correct highest version, which I've also moved to a constant. --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 1f49bccaf3..b83deebac0 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -88,7 +88,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ -- block data, including its header data BLOB NOT NULL, - + PRIMARY KEY(block_hash,consensus_hash) );"#, r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash ON nakamoto_staging_blocks(index_block_hash);"#, @@ -136,7 +136,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ -- block data, including its header data BLOB NOT NULL, - + PRIMARY KEY(block_hash,consensus_hash) );"#, r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash ON nakamoto_staging_blocks(index_block_hash);"#, @@ -149,6 +149,8 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ r#"INSERT INTO db_version (version) VALUES (2)"#, ]; +pub const NAKAMOTO_STAGING_DB_SCHEMA_LATEST: u32 = 2; + pub struct NakamotoStagingBlocksConn(rusqlite::Connection); impl Deref for NakamotoStagingBlocksConn { @@ -527,7 +529,7 @@ impl<'a> NakamotoStagingBlocksTx<'a> { processed_time, obtain_method, signing_weight, - + data ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", params![ @@ -671,7 +673,7 @@ impl StacksChainState { Ok(x) => x, Err(e) => { debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); - return Ok(1); + return Ok(NAKAMOTO_STAGING_DB_SCHEMA_LATEST); } }; From a0ad6864d50c46fbd0e3d65eb5798a1c7cbfd741 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:43:25 -0700 Subject: [PATCH 1015/1400] fix: return error if unable to announce stacks block In `net::relay`, there are many cases where we return an error if unable to announce a new Stacks block, but there was one case where the error was ignored. This updates that case to also return an error. --- stackslib/src/net/relay.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index d022148b3a..35627d9dd4 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2049,9 +2049,11 @@ impl Relayer { } } if !http_uploaded_blocks.is_empty() { - coord_comms.inspect(|comm| { - comm.announce_new_stacks_block(); - }); + if let Some(comm) = coord_comms { + if !comm.announce_new_stacks_block() { + return Err(net_error::CoordinatorClosed); + } + }; } accepted_nakamoto_blocks_and_relayers.extend(pushed_blocks_and_relayers); From e882ec371e131fa5cd2c54e771dde9a2b74f4528 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:44:07 -0700 Subject: [PATCH 1016/1400] fix: log error when failing to push a block This adds more verbose and explicit logs when the signer is unable to post a block when handling a `BlockPushed` event. --- stacks-signer/src/v0/signer.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 828996c560..fd5bbab400 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -151,6 +151,13 @@ impl SignerTrait for Signer { } SignerMessage::BlockPushed(b) => { let block_push_result = stacks_client.post_block(b); + if let Err(ref e) = &block_push_result { + warn!( + "{self}: Failed to post block {} (id {}): {e:?}", + &b.header.signer_signature_hash(), + &b.block_id() + ); + }; info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), From ca22422c42a2182f166805390636e3e17e19cb3d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:45:20 -0700 Subject: [PATCH 1017/1400] fix: typo in SORTITION_DB_SCHEMA_4 --- stackslib/src/chainstate/burn/db/sortdb.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 90cf60ace1..f545c53c8c 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -687,11 +687,11 @@ const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[ delegated_ustx TEXT NOT NULL, until_burn_height INTEGER, - PRIMARY KEY(txid,burn_header_Hash) + PRIMARY KEY(txid,burn_header_hash) );"#, r#" CREATE TABLE ast_rule_heights ( - ast_rule_id INTEGER PRIMAR KEY NOT NULL, + ast_rule_id INTEGER PRIMARY KEY NOT NULL, block_height INTEGER NOT NULL );"#, ]; From cdf4727c1bcae8c721972a2907a8f8e61d36ee77 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 9 Sep 2024 17:46:22 -0700 Subject: [PATCH 1018/1400] fix: more description warning log in NakamotoBlockBuilder Previously, two different error cases had the same log message. This updates one of them to have a more description message about when the error occurred. --- stackslib/src/chainstate/nakamoto/miner.rs | 4 ++-- testnet/stacks-node/src/nakamoto_node/miner.rs | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 66aa6cc1d9..70298db74c 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -108,7 +108,7 @@ impl NakamotoTenureInfo { } pub struct NakamotoBlockBuilder { - /// If there's a parent (i.e., not a genesis), this is Some(parent_header) + /// If there's a parent (i.e., not a genesis), this is Some(parent_header) parent_header: Option, /// Signed coinbase tx, if starting a new tenure coinbase_tx: Option, @@ -280,7 +280,7 @@ impl NakamotoBlockBuilder { &self.header.parent_block_id, ).map_err(|e| { warn!( - "Cannot process Nakamoto block: could not load reward set that elected the block"; + "Cannot process Nakamoto block: could not retrieve coinbase POX height of the elected block"; "err" => ?e, ); Error::NoSuchBlockError diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index cd811a9346..1669ee9484 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1250,7 +1250,11 @@ impl BlockMinerThread { &parent_block_id, ) .map_err(NakamotoNodeError::MiningFailure)?; - debug!("Miner: Extending tenure"; "burn_view_consensus_hash" => %burn_view_consensus_hash, "parent_block_id" => %parent_block_id, "num_blocks_so_far" => num_blocks_so_far); + debug!("Miner: Extending tenure"; + "burn_view_consensus_hash" => %burn_view_consensus_hash, + "parent_block_id" => %parent_block_id, + "num_blocks_so_far" => num_blocks_so_far, + ); payload = payload.extend( *burn_view_consensus_hash, parent_block_id, From a4166be18b21f113e99f19baae3a4fa9e5f9fc5f Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 10 Sep 2024 16:04:05 +0300 Subject: [PATCH 1019/1400] add test for no fee tx anchored directly and failure to submit it to mempool --- .../stacks/tests/block_construction.rs | 219 ++++++++++++++++++ 1 file changed, 219 insertions(+) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 4194207840..3699710535 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -1536,6 +1536,225 @@ fn test_build_anchored_blocks_skip_too_expensive() { } } +#[test] +fn test_build_anchored_blocks_mempool_fee_transaction_too_low() { + let privk = StacksPrivateKey::from_hex( + "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", + ) + .unwrap(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + + let mut peer_config = TestPeerConfig::new(function_name!(), 2032, 2033); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let chainstate_path = peer.chainstate_path.clone(); + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let (burn_ops, stacks_block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let coinbase_tx = make_coinbase(miner, 0); + + // Create a zero-fee transaction + let zero_fee_tx = make_user_stacks_transfer( + &privk, + 0, + 0, // Set fee to 0 + &recipient.to_account_principal(), + 1000, + ); + + let result = mempool.submit( + chainstate, + sortdb, + &parent_consensus_hash, + &parent_header_hash, + &zero_fee_tx, + None, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch20, + ); + + match result { + Ok(_) => panic!("Expected FeeTooLow error but transaction was accepted"), + Err(e) => match e { + MemPoolRejection::FeeTooLow(actual, required) => { + assert_eq!(actual, 0); + assert_eq!(required, 180); + } + _ => panic!("Unexpected error: {:?}", e), + }, + }; + + let anchored_block = StacksBlockBuilder::build_anchored_block( + chainstate, + &sortdb.index_handle_at_tip(), + &mut mempool, + &parent_tip, + tip.total_burn, + vrf_proof, + Hash160([0 as u8; 20]), + &coinbase_tx, + BlockBuilderSettings::max_value(), + None, + &burnchain, + ) + .unwrap(); + + (anchored_block.0, vec![]) + }, + ); + + peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + // Check that the block contains only coinbase transactions (coinbase) + assert_eq!(stacks_block.txs.len(), 1); +} + +#[test] +fn test_build_anchored_blocks_zero_fee_transaction() { + let privk = StacksPrivateKey::from_hex( + "42faca653724860da7a41bfcef7e6ba78db55146f6900de8cb2a9f760ffac70c01", + ) + .unwrap(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + + let mut peer_config = TestPeerConfig::new(function_name!(), 2032, 2033); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; + let burnchain = peer_config.burnchain.clone(); + + let mut peer = TestPeer::new(peer_config); + + let chainstate_path = peer.chainstate_path.clone(); + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let tip = + SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); + + let (burn_ops, stacks_block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let coinbase_tx = make_coinbase(miner, 0); + + // Create a zero-fee transaction + let zero_fee_tx = make_user_stacks_transfer( + &privk, + 0, + 0, // Set fee to 0 + &recipient.to_account_principal(), + 1000, + ); + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof, + tip.total_burn, + Hash160([0 as u8; 20]), + ) + .unwrap(); + + let anchored_block = StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_handle_at_tip(), + vec![coinbase_tx, zero_fee_tx], + ) + .unwrap(); + + (anchored_block.0, vec![]) + }, + ); + + peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + // Check that the block contains 2 transactions (coinbase + zero-fee transaction) + assert_eq!(stacks_block.txs.len(), 2); + + // Verify that the zero-fee transaction is in the block + let zero_fee_tx = &stacks_block.txs[1]; + assert_eq!(zero_fee_tx.get_tx_fee(), 0); +} + #[test] fn test_build_anchored_blocks_multiple_chaintips() { let mut privks = vec![]; From f1435430325b8b9e9f394bd0946b90211191af0e Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Tue, 10 Sep 2024 17:46:59 +0300 Subject: [PATCH 1020/1400] better naming flash blocks functions and add remove waiting for blocks --- .../src/tests/nakamoto_integrations.rs | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c57b59dce6..17f068f474 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -928,10 +928,13 @@ pub fn boot_to_epoch_3( info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); } +/// Boot the chain to just before the Epoch 3.0 boundary to allow for flash blocks +/// This function is similar to `boot_to_epoch_3`, but it stops at epoch 3 start height - 2, +/// allowing for flash blocks to occur when the epoch changes. /// -/// * `stacker_sks` - must be a private key for sending a large `stack-stx` transaction in order -/// for pox-4 to activate -pub fn boot_to_epoch_3_flash_blocks( +/// * `stacker_sks` - private keys for sending large `stack-stx` transactions to activate pox-4 +/// * `signer_sks` - corresponding signer keys for the stackers +pub fn boot_to_pre_epoch_3_boundary( naka_conf: &Config, blocks_processed: &Arc, stacker_sks: &[StacksPrivateKey], @@ -1077,7 +1080,7 @@ pub fn boot_to_epoch_3_flash_blocks( &naka_conf, ); - info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); + info!("Bootstrapped to one block before Epoch 3.0 boundary, Epoch 2.x miner should continue for one more block"); } fn get_signer_index( @@ -1737,7 +1740,7 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); - boot_to_epoch_3_flash_blocks( + boot_to_pre_epoch_3_boundary( &naka_conf, &blocks_processed, &[stacker_sk], @@ -1746,10 +1749,10 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { &mut btc_regtest_controller, ); - // mine 3 blocks which should be the ones for setting up the miner - next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); - next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); - next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 10); + // Mine 3 Bitcoin blocks quickly without waiting for Stacks blocks to be processed + for _ in 0..3 { + btc_regtest_controller.build_next_block(1); + } info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); From e56cef4b7c66914d96704076cd6571c47b9c508c Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 10 Sep 2024 09:49:25 -0700 Subject: [PATCH 1021/1400] fix: update timestamp validation Keep it as "max 15 seconds", and update the documentation comments. --- clarity/src/vm/docs/mod.rs | 2 +- stackslib/src/chainstate/nakamoto/mod.rs | 6 +++--- stackslib/src/net/api/postblock_proposal.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 6bf577b680..65b08e3102 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1831,7 +1831,7 @@ and block times are accurate only to within two hours. See [BIP113](https://gith For a block mined after epoch 3.0, this timestamp comes from the Stacks block header. **Note**: this is the time, according to the miner, when the mining of this block started, but is not guaranteed to be accurate. This time will be validated by the signers to be: - Greater than the timestamp of the previous block - - Less than 15 seconds into the future (according to their own local clocks) + - At most 15 seconds into the future (according to their own local clocks) ", example: "(get-stacks-block-info? time u0) ;; Returns (some u1557860301) (get-stacks-block-info? header-hash u0) ;; Returns (some 0x374708fff7719dd5979ec875d56cd2286f6d3cf7ec317a3b25632aab28ec37bb) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ee6ac12aee..6e775fba56 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -613,7 +613,7 @@ pub struct NakamotoBlockHeader { /// A Unix time timestamp of when this block was mined, according to the miner. /// For the signers to consider a block valid, this timestamp must be: /// * Greater than the timestamp of its parent block - /// * Less than 15 seconds into the future + /// * At most 15 seconds into the future pub timestamp: u64, /// Recoverable ECDSA signature from the tenure's miner. pub miner_signature: MessageSignature, @@ -1877,7 +1877,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? @@ -1889,7 +1889,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; if connected_sort_id != parent_burn_view_sn.sortition_id { warn!( diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 19b556604f..35410b280b 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -266,7 +266,7 @@ impl NakamotoBlockProposal { // Validate the block's timestamp. It must be: // - Greater than the parent block's timestamp - // - Less than 15 seconds into the future + // - At most 15 seconds into the future if let StacksBlockHeaderTypes::Nakamoto(parent_nakamoto_header) = &parent_stacks_header.anchored_header { @@ -283,7 +283,7 @@ impl NakamotoBlockProposal { }); } } - if self.block.header.timestamp >= get_epoch_time_secs() + 15 { + if self.block.header.timestamp > get_epoch_time_secs() + 15 { warn!( "Rejected block proposal"; "reason" => "Block timestamp is too far into the future", From d40a3e541500f9af546e666a421774cab837c619 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 10 Sep 2024 09:49:40 -0700 Subject: [PATCH 1022/1400] fix: better warn message --- stackslib/src/chainstate/nakamoto/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 70298db74c..c4ac2b428c 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -280,7 +280,7 @@ impl NakamotoBlockBuilder { &self.header.parent_block_id, ).map_err(|e| { warn!( - "Cannot process Nakamoto block: could not retrieve coinbase POX height of the elected block"; + "Cannot process Nakamoto block: could not find height at which the PoX reward set was calculated"; "err" => ?e, ); Error::NoSuchBlockError From 30e50d5d44d3021ef9072f4183c8966bd38b181c Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 10 Sep 2024 09:51:08 -0700 Subject: [PATCH 1023/1400] fix: more idiomatic error handling when inserting block --- stacks-signer/src/v0/signer.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fd5bbab400..926c69fc33 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -197,18 +197,17 @@ impl SignerTrait for Signer { received_time, } => { info!("{self}: Received a new burn block event for block height {burn_height}"); - if let Err(e) = - self.signer_db - .insert_burn_block(burn_header_hash, *burn_height, received_time) - { - error!( - "Failed to write burn block event to signerdb"; - "err" => ?e, - "burn_header_hash" => %burn_header_hash, - "burn_height" => burn_height - ); - panic!("{self} Failed to write burn block event to signerdb: {e}"); - } + self.signer_db + .insert_burn_block(burn_header_hash, *burn_height, received_time) + .unwrap_or_else(|e| { + error!( + "Failed to write burn block event to signerdb"; + "err" => ?e, + "burn_header_hash" => %burn_header_hash, + "burn_height" => burn_height + ); + panic!("{self} Failed to write burn block event to signerdb: {e}"); + }); *sortition_state = None; } } From ab7e18ff1261b8272ed2a60386bc04f707428ec9 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 10 Sep 2024 09:53:48 -0700 Subject: [PATCH 1024/1400] fix: add more key/val pairs to error log --- stackslib/src/chainstate/nakamoto/miner.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index c4ac2b428c..1d267b047f 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -282,6 +282,9 @@ impl NakamotoBlockBuilder { warn!( "Cannot process Nakamoto block: could not find height at which the PoX reward set was calculated"; "err" => ?e, + "stacks_tip" => %self.header.parent_block_id, + "elected_height" => elected_height, + "elected_cycle" => elected_in_cycle ); Error::NoSuchBlockError })?; From 1eb20c88edba39a3820675d1794a851304b772f7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 10 Sep 2024 14:00:43 -0400 Subject: [PATCH 1025/1400] fix: disregard tx count if the stacks tip has changed Fixes #5157 --- testnet/stacks-node/src/neon_node.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index d627e081b2..153eb9361e 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1715,6 +1715,10 @@ impl BlockMinerThread { info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); best_attempt = cmp::max(best_attempt, prev_block.attempt); + // Since the chain tip has changed, we should try to mine a new block, even + // if it has less transactions than the previous block we mined, since that + // previous block would now be a reorg. + max_txs = 0; } else { info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); From 47436e7ffed5dadff50739cd5ffbdcd962f2cb84 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Tue, 3 Sep 2024 17:17:35 -0400 Subject: [PATCH 1026/1400] fix: Improve logging in rusqlite busy handler, and fail eventually if probable deadlock detected --- stackslib/src/util_lib/db.rs | 54 ++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index f54a9c97ec..be09de2556 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -655,31 +655,44 @@ impl<'a, C: Clone, T: MarfTrieId> DerefMut for IndexDBTx<'a, C, T> { } } +/// Called by `rusqlite` if we are waiting too long on a database lock pub fn tx_busy_handler(run_count: i32) -> bool { - let mut sleep_count = 2; - if run_count > 0 { - sleep_count = 2u64.saturating_pow(run_count as u32); + const TIMEOUT: Duration = Duration::from_secs(60); + const AVG_SLEEP_TIME_MS: u64 = 100; + + // First, check if this is taking unreasonably long. If so, it's probably a deadlock + let run_count = run_count.unsigned_abs(); + let approx_time_elapsed = + Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); + if approx_time_elapsed > TIMEOUT { + error!("Probable deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + "run_count" => run_count, + "backtrace" => ?Backtrace::capture() + ); + return false; } - sleep_count = sleep_count.saturating_add(thread_rng().gen::() % sleep_count); - if sleep_count > 100 { - let jitter = thread_rng().gen::() % 20; - sleep_count = 100 - jitter; + let mut sleep_time_ms = 2u64.saturating_pow(run_count); + + sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); + + if sleep_time_ms > AVG_SLEEP_TIME_MS { + let bound = 10; + let jitter = thread_rng().gen_range(0..bound * 2); + sleep_time_ms = (AVG_SLEEP_TIME_MS - bound) + jitter; } - debug!( - "Database is locked; sleeping {}ms and trying again", - &sleep_count; - "backtrace" => ?{ - if run_count > 10 && run_count % 10 == 0 { - Some(Backtrace::capture()) - } else { - None - } - }, - ); + let msg = format!("Database is locked; sleeping {sleep_time_ms}ms and trying again"); + if run_count > 10 && run_count % 10 == 0 { + warn!("{msg}"; + "run_count" => run_count, + "backtrace" => ?Backtrace::capture() + ); + } else { + debug!("{msg}"); + } - sleep_ms(sleep_count); + sleep_ms(sleep_time_ms); true } @@ -696,8 +709,7 @@ pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Erro /// Sames as `tx_begin_immediate` except that it returns a rusqlite error. pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result, sqlite_error> { conn.busy_handler(Some(tx_busy_handler))?; - let tx = Transaction::new(conn, TransactionBehavior::Immediate)?; - Ok(tx) + Transaction::new(conn, TransactionBehavior::Immediate) } #[cfg(feature = "profile-sqlite")] From 3b24bd34c15f00ae0549a69717d5e2624ea816e0 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 4 Sep 2024 13:24:07 -0400 Subject: [PATCH 1027/1400] chore: Address PR comment from Brice --- stackslib/src/util_lib/db.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index be09de2556..4728b83e73 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -677,9 +677,9 @@ pub fn tx_busy_handler(run_count: i32) -> bool { sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); if sleep_time_ms > AVG_SLEEP_TIME_MS { - let bound = 10; - let jitter = thread_rng().gen_range(0..bound * 2); - sleep_time_ms = (AVG_SLEEP_TIME_MS - bound) + jitter; + let jitter = 10; + sleep_time_ms = + thread_rng().gen_range((AVG_SLEEP_TIME_MS - jitter)..(AVG_SLEEP_TIME_MS + jitter)); } let msg = format!("Database is locked; sleeping {sleep_time_ms}ms and trying again"); From 8dcfadf8c205c720a3b16086c195c9c2617bcf4e Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 5 Sep 2024 08:56:27 -0400 Subject: [PATCH 1028/1400] chore: Update the `tx_busy_handler()` in stacks-common also --- clarity/src/vm/database/sqlite.rs | 2 +- stacks-common/src/util/db.rs | 64 +++++++++++++++++++++++++++++++ stacks-common/src/util/mod.rs | 27 +------------ stackslib/src/util_lib/db.rs | 38 +----------------- 4 files changed, 67 insertions(+), 64 deletions(-) create mode 100644 stacks-common/src/util/db.rs diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index dc3ad4f5bd..7d2af59eb5 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -21,7 +21,7 @@ use rusqlite::{ }; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::sqlite::NO_PARAMS; -use stacks_common::util::db_common::tx_busy_handler; +use stacks_common::util::db::tx_busy_handler; use stacks_common::util::hash::Sha512Trunc256Sum; use super::clarity_store::{make_contract_hash_key, ContractCommitment}; diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs new file mode 100644 index 0000000000..29ebdd0bb0 --- /dev/null +++ b/stacks-common/src/util/db.rs @@ -0,0 +1,64 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::backtrace::Backtrace; +use std::time::Duration; + +use rand::{thread_rng, Rng}; + +use crate::util::sleep_ms; + +/// Called by `rusqlite` if we are waiting too long on a database lock +/// If called too many times, will fail to avoid deadlocks +pub fn tx_busy_handler(run_count: i32) -> bool { + const TIMEOUT: Duration = Duration::from_secs(60); + const AVG_SLEEP_TIME_MS: u64 = 100; + + // First, check if this is taking unreasonably long. If so, it's probably a deadlock + let run_count = run_count.unsigned_abs(); + let approx_time_elapsed = + Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); + if approx_time_elapsed > TIMEOUT { + error!("Probable deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + "run_count" => run_count, + "backtrace" => ?Backtrace::capture() + ); + return false; + } + + let mut sleep_time_ms = 2u64.saturating_pow(run_count); + + sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); + + if sleep_time_ms > AVG_SLEEP_TIME_MS { + let jitter = 10; + sleep_time_ms = + thread_rng().gen_range((AVG_SLEEP_TIME_MS - jitter)..(AVG_SLEEP_TIME_MS + jitter)); + } + + let msg = format!("Database is locked; sleeping {sleep_time_ms}ms and trying again"); + if run_count > 10 && run_count % 10 == 0 { + warn!("{msg}"; + "run_count" => run_count, + "backtrace" => ?Backtrace::capture() + ); + } else { + debug!("{msg}"); + } + + sleep_ms(sleep_time_ms); + true +} diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 13ab79dcb3..a9dfc47806 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -19,6 +19,7 @@ pub mod log; #[macro_use] pub mod macros; pub mod chunked_encoding; +pub mod db; pub mod hash; pub mod pair; pub mod pipe; @@ -85,32 +86,6 @@ impl error::Error for HexError { } } -pub mod db_common { - use std::{thread, time}; - - use rand::{thread_rng, Rng}; - - pub fn tx_busy_handler(run_count: i32) -> bool { - let mut sleep_count = 10; - if run_count > 0 { - sleep_count = 2u64.saturating_pow(run_count as u32); - } - sleep_count = sleep_count.saturating_add(thread_rng().gen::() % sleep_count); - - if sleep_count > 5000 { - sleep_count = 5000; - } - - debug!( - "Database is locked; sleeping {}ms and trying again", - &sleep_count - ); - - thread::sleep(time::Duration::from_millis(sleep_count)); - true - } -} - /// Write any `serde_json` object directly to a file pub fn serialize_json_to_file(json: &J, path: P) -> Result<(), std::io::Error> where diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 4728b83e73..a0496d3bfc 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -657,43 +657,7 @@ impl<'a, C: Clone, T: MarfTrieId> DerefMut for IndexDBTx<'a, C, T> { /// Called by `rusqlite` if we are waiting too long on a database lock pub fn tx_busy_handler(run_count: i32) -> bool { - const TIMEOUT: Duration = Duration::from_secs(60); - const AVG_SLEEP_TIME_MS: u64 = 100; - - // First, check if this is taking unreasonably long. If so, it's probably a deadlock - let run_count = run_count.unsigned_abs(); - let approx_time_elapsed = - Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); - if approx_time_elapsed > TIMEOUT { - error!("Probable deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); - "run_count" => run_count, - "backtrace" => ?Backtrace::capture() - ); - return false; - } - - let mut sleep_time_ms = 2u64.saturating_pow(run_count); - - sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); - - if sleep_time_ms > AVG_SLEEP_TIME_MS { - let jitter = 10; - sleep_time_ms = - thread_rng().gen_range((AVG_SLEEP_TIME_MS - jitter)..(AVG_SLEEP_TIME_MS + jitter)); - } - - let msg = format!("Database is locked; sleeping {sleep_time_ms}ms and trying again"); - if run_count > 10 && run_count % 10 == 0 { - warn!("{msg}"; - "run_count" => run_count, - "backtrace" => ?Backtrace::capture() - ); - } else { - debug!("{msg}"); - } - - sleep_ms(sleep_time_ms); - true + stacks_common::util::db::tx_busy_handler(run_count) } /// Begin an immediate-mode transaction, and handle busy errors with exponential backoff. From 79455d4949902fc26f89a16400fb8fc27599834d Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:12:20 +0200 Subject: [PATCH 1029/1400] Delete unused `open-api` action --- .github/actions/open-api/Dockerfile.open-api-validate | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 .github/actions/open-api/Dockerfile.open-api-validate diff --git a/.github/actions/open-api/Dockerfile.open-api-validate b/.github/actions/open-api/Dockerfile.open-api-validate deleted file mode 100644 index 4ff6187be0..0000000000 --- a/.github/actions/open-api/Dockerfile.open-api-validate +++ /dev/null @@ -1,10 +0,0 @@ -FROM node:lts-alpine as build - -WORKDIR /src - -COPY . . - -RUN npx redoc-cli@0.10.3 bundle -o /build/open-api-docs.html ./docs/rpc/openapi.yaml - -FROM scratch AS export-stage -COPY --from=build /build/open-api-docs.html / From f0b118d6b22ccf70feaf4ef8a5e56bbee917ff7a Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:13:25 +0200 Subject: [PATCH 1030/1400] Remove non-existent `burn_ops` endpoint --- .../get-burn-ops-peg-in.example.json | 14 -------- .../get-burn-ops-peg-out-fulfill.example.json | 15 -------- .../get-burn-ops-peg-out-request.example.json | 16 --------- docs/rpc/openapi.yaml | 36 ------------------- 4 files changed, 81 deletions(-) delete mode 100644 docs/rpc/api/core-node/get-burn-ops-peg-in.example.json delete mode 100644 docs/rpc/api/core-node/get-burn-ops-peg-out-fulfill.example.json delete mode 100644 docs/rpc/api/core-node/get-burn-ops-peg-out-request.example.json diff --git a/docs/rpc/api/core-node/get-burn-ops-peg-in.example.json b/docs/rpc/api/core-node/get-burn-ops-peg-in.example.json deleted file mode 100644 index 5302a3b624..0000000000 --- a/docs/rpc/api/core-node/get-burn-ops-peg-in.example.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "peg_in": [ - { - "amount": 1337, - "block_height": 218, - "burn_header_hash": "3292a7d2a7e941499b5c0dcff2a5656c159010718450948a60c2be9e1c221dc4", - "memo": "", - "peg_wallet_address": "tb1pqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqkgkkf5", - "recipient": "S0000000000000000000002AA028H.awesome_contract", - "txid": "d81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157771", - "vtxindex": 2 - } - ], -} diff --git a/docs/rpc/api/core-node/get-burn-ops-peg-out-fulfill.example.json b/docs/rpc/api/core-node/get-burn-ops-peg-out-fulfill.example.json deleted file mode 100644 index 45fca8a329..0000000000 --- a/docs/rpc/api/core-node/get-burn-ops-peg-out-fulfill.example.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "peg_out_fulfill": [ - { - "chain_tip": "0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e", - "amount": 1337, - "recipient": "1BixGeiRyKT7NTkJAHpWuP197KXUNqhCU9", - "request_ref": "e81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157772", - "burn_header_hash": "3292a7d2a7e941499b5c0dcff2a5656c159010718450948a60c2be9e1c221dc4", - "txid": "d81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157771", - "block_height": 218, - "vtxindex": 2, - "memo": "00010203" - } - ] -} diff --git a/docs/rpc/api/core-node/get-burn-ops-peg-out-request.example.json b/docs/rpc/api/core-node/get-burn-ops-peg-out-request.example.json deleted file mode 100644 index 0e6efa958b..0000000000 --- a/docs/rpc/api/core-node/get-burn-ops-peg-out-request.example.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "peg_out_request": [ - { - "amount": 1337, - "recipient": "1BixGeiRyKT7NTkJAHpWuP197KXUNqhCU9", - "block_height": 218, - "burn_header_hash": "3292a7d2a7e941499b5c0dcff2a5656c159010718450948a60c2be9e1c221dc4", - "peg_wallet_address": "tb1qqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvps3f3cyq", - "txid": "d81bec73a0ea0bdcf9bc011f567944eb1eae5889bf002bf7ae641d7096157771", - "vtxindex": 2, - "signature": "0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d", - "fulfillment_fee": 0, - "memo": "00010203" - } - ], -} diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index f33e0dca73..741556da8e 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -41,42 +41,6 @@ paths: example: $ref: ./api/transaction/post-core-node-transactions-error.example.json - /v2/burn_ops/{burn_height}/{op_type}: - get: - summary: Get burn operations - description: Get all burn operations of type `op_type` successfully read at `burn_height`. Valid `op_type`s are `peg_in`, `peg_out_request` and `peg_out_fulfill`. - tags: - - Info - operationId: get_burn_ops - parameters: - - name: burn_height - in: path - required: true - description: height of the burnchain (Bitcoin) - schema: - type: integer - - name: op_type - in: path - required: true - description: name of the burnchain operation type - schema: - type: string - responses: - 200: - description: Burn operations list - content: - application/json: - examples: - peg_in: - value: - $ref: ./api/core-node/get-burn-ops-peg-in.example.json - peg_out_request: - value: - $ref: ./api/core-node/get-burn-ops-peg-out-request.example.json - peg_out_fulfill: - value: - $ref: ./api/core-node/get-burn-ops-peg-out-fulfill.example.json - /v2/contracts/interface/{contract_address}/{contract_name}: get: summary: Get contract interface From 7128cc1abe1e4adc37e9ae6870e0fe1ce1605788 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:13:59 +0200 Subject: [PATCH 1031/1400] Fix wrong required parameters --- .../core-node/post-fee-transaction-response.schema.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/rpc/api/core-node/post-fee-transaction-response.schema.json b/docs/rpc/api/core-node/post-fee-transaction-response.schema.json index 8a08591349..af84276b0b 100644 --- a/docs/rpc/api/core-node/post-fee-transaction-response.schema.json +++ b/docs/rpc/api/core-node/post-fee-transaction-response.schema.json @@ -4,7 +4,12 @@ "title": "TransactionFeeEstimateResponse", "type": "object", "additionalProperties": false, - "required": ["estimated_cost", "estimated_cost_scalar", "estimated_fee_rates", "estimated_fees"], + "required": [ + "estimated_cost", + "estimated_cost_scalar", + "cost_scalar_change_by_byte", + "estimations" + ], "properties": { "estimated_cost_scalar": { "type": "integer" From 42566a1591fa85b087f6c7551b529234d8b0b35e Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:14:12 +0200 Subject: [PATCH 1032/1400] Fix comma leading to expected additional params --- docs/rpc/api/trait/get-is-trait-implemented.schema.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/rpc/api/trait/get-is-trait-implemented.schema.json b/docs/rpc/api/trait/get-is-trait-implemented.schema.json index a8b1b65faf..30cb3fa486 100644 --- a/docs/rpc/api/trait/get-is-trait-implemented.schema.json +++ b/docs/rpc/api/trait/get-is-trait-implemented.schema.json @@ -8,6 +8,6 @@ "properties": { "is_implemented": { "type": "boolean" - }, + } } } From 4b275639aa10ff6f908e3059b798ddf62873ad3c Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:14:48 +0200 Subject: [PATCH 1033/1400] Bump `openapi` version and fix license --- docs/rpc/openapi.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 741556da8e..2a269117ec 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -1,13 +1,14 @@ -openapi: 3.0.2 +openapi: 3.1.0 servers: - url: http://localhost:20443 description: Local info: - title: Stacks 2.0+ RPC API + title: Stacks 3.0+ RPC API version: '1.0.0' description: | This is the documentation for the `stacks-node` RPC interface. - license: CC-0 + license: + name: CC-0 paths: /v2/transactions: From b5e1222344b2b2352c8e1ff4f82ff1b7ced58ca8 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:15:42 +0200 Subject: [PATCH 1034/1400] HTTP status codes MUST be enclosed in quotes https://spec.openapis.org/oas/latest.html#patterned-fields-0 --- docs/rpc/openapi.yaml | 56 +++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 2a269117ec..abcc92e982 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -26,14 +26,14 @@ paths: format: binary example: binary format of 00000000010400bed38c2aadffa348931bcb542880ff79d607afec000000000000000000000000000000c800012b0b1fff6cccd0974966dcd665835838f0985be508e1322e09fb3d751eca132c492bda720f9ef1768d14fdabed6127560ba52d5e3ac470dcb60b784e97dc88c9030200000000000516df0ba3e79792be7be5e50a370289accfc8c9e032000000000000303974657374206d656d6f00000000000000000000000000000000000000000000000000 responses: - 200: - description: Transaction id of successful post of a raw tx to the node's mempool + "200": + description: Transaction ID of successful post of a raw tx to the node's mempool content: text/plain: schema: type: string example: '"e161978626f216b2141b156ade10501207ae535fa365a13ef5d7a7c9310a09f2"' - 400: + "400": description: Rejections result in a 400 error content: application/json: @@ -50,7 +50,7 @@ paths: - Smart Contracts operationId: get_contract_interface responses: - 200: + "200": description: Contract interface content: application/json: @@ -90,7 +90,7 @@ paths: In the response, `data` is the hex serialization of the map response. Note that map responses are Clarity option types, for non-existent values, this is a serialized none, and for all other responses, it is a serialized (some ...) object. responses: - 200: + "200": description: Success content: application/json: @@ -98,7 +98,7 @@ paths: $ref: ./api/core-node/get-contract-data-map-entry.schema.json example: $ref: ./api/core-node/get-contract-data-map-entry.example.json - 400: + "400": description: Failed loading data map parameters: - name: contract_address @@ -147,7 +147,7 @@ paths: operationId: get_contract_source description: Returns the Clarity source code of a given contract, along with the block height it was published in, and the MARF proof for the data responses: - 200: + "200": description: Success content: application/json: @@ -192,7 +192,7 @@ paths: The smart contract and function are specified using the URL path. The arguments and the simulated tx-sender are supplied via the POST body in the following JSON format: responses: - 200: + "200": description: Success content: application/json: @@ -273,7 +273,7 @@ paths: description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest known tip (includes unconfirmed state). responses: - 200: + "200": description: Success content: application/json: @@ -363,7 +363,7 @@ paths: example: $ref: ./api/core-node/post-fee-transaction.example.json responses: - 200: + "200": description: Estimated fees for the transaction content: application/json: @@ -380,7 +380,7 @@ paths: operationId: get_fee_transfer description: Get an estimated fee rate for STX transfer transactions. This a a fee rate / byte, and is returned as a JSON integer responses: - 200: + "200": description: Success content: application/json: @@ -397,7 +397,7 @@ paths: - Info operationId: get_core_api_info responses: - 200: + "200": description: Success content: application/json: @@ -414,7 +414,7 @@ paths: - Info operationId: get_pox_info responses: - 200: + "200": description: Success content: application/json: @@ -438,7 +438,7 @@ paths: - Smart Contracts operationId: get_is_trait_implemented responses: - 200: + "200": description: Success content: application/json: @@ -484,7 +484,7 @@ paths: description: | The Stacks chain tip to query from. If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). - If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). + If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). /v2/constant_val/{contract_address}/{contract_name}/{constant_name}: post: @@ -497,7 +497,7 @@ paths: In the response, `data` is the hex serialization of the constant value. responses: - 200: + "200": description: Success content: application/json: @@ -505,7 +505,7 @@ paths: $ref: ./api/core-node/get-constant-val.schema.json example: $ref: ./api/core-node/get-constant-val.example.json - 400: + "400": description: Failed to retrieve constant value from contract parameters: - name: contract_address @@ -544,15 +544,15 @@ paths: **This API endpoint requires a basic Authorization header.** responses: - 202: + "202": description: Block proposal has been accepted for processing. The result will be returned via the event observer. content: application/json: example: $ref: ./api/core-node/post-block-proposal-response.example.json - 403: + "403": description: Request not over loopback interface - 429: + "429": description: There is an ongoing proposal validation being processed, the new request cannot be accepted until the prior request has been processed. content: @@ -583,13 +583,13 @@ paths: schema: type: integer responses: - 200: + "200": description: Information for the given reward cycle content: application/json: example: $ref: ./api/core-node/get_stacker_set.example.json - 400: + "400": description: Could not fetch the given reward set content: application/json: @@ -598,7 +598,7 @@ paths: /v3/blocks/{block_id}: get: - summary: Fetch a Nakamoto block + summary: Fetch a Nakamoto block tags: - Blocks operationId: get_block_v3 @@ -612,14 +612,14 @@ paths: schema: type: string responses: - 200: + "200": description: The raw SIP-003-encoded block will be returned. content: application/octet-stream: schema: type: string format: binary - 404: + "404": description: The block could not be found content: application/text-plain: {} @@ -633,13 +633,13 @@ paths: description: Fetch metadata about the ongoing Nakamoto tenure. This information is sufficient to obtain and authenticate the highest complete tenure, as well as obtain new tenure blocks. responses: - 200: + "200": description: Metadata about the ongoing tenure content: application/json: example: $ref: ./api/core-node/get_tenure_info.json - + /v3/tenures/{block_id}: get: summary: Fetch a sequence of Nakamoto blocks in a tenure @@ -649,7 +649,7 @@ paths: description: Fetch a sequence of Nakamoto blocks in a tenure. The blocks will be served in order from highest to lowest. The blocks will be encoded in their SIP-003 wire format, and concatenated together. responses: - 200: + "200": description: SIP-003-encoded Nakamoto blocks, concatenated together content: application/octet-stream: From a9b0afe6d3d1bca96f177b1fd5fa7b62dc4293f1 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 16:21:06 +0200 Subject: [PATCH 1035/1400] Wrap examples in `value` --- .../api/contract/post-call-read-only-fn-fail.example.json | 6 ++++-- .../contract/post-call-read-only-fn-success.example.json | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/rpc/api/contract/post-call-read-only-fn-fail.example.json b/docs/rpc/api/contract/post-call-read-only-fn-fail.example.json index 5680a225ca..9017085887 100644 --- a/docs/rpc/api/contract/post-call-read-only-fn-fail.example.json +++ b/docs/rpc/api/contract/post-call-read-only-fn-fail.example.json @@ -1,4 +1,6 @@ { - "okay": false, - "cause": "Unchecked(PublicFunctionNotReadOnly(..." + "value": { + "okay": false, + "cause": "Unchecked(PublicFunctionNotReadOnly(..." + } } diff --git a/docs/rpc/api/contract/post-call-read-only-fn-success.example.json b/docs/rpc/api/contract/post-call-read-only-fn-success.example.json index cc94dccd1d..c2f5d845f1 100644 --- a/docs/rpc/api/contract/post-call-read-only-fn-success.example.json +++ b/docs/rpc/api/contract/post-call-read-only-fn-success.example.json @@ -1,4 +1,6 @@ { - "okay": true, - "result": "0x111..." + "value": { + "okay": true, + "result": "0x111..." + } } From 31b5360b69735b19666348eba6ae3ab683389cc3 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 11 Sep 2024 17:45:35 +0200 Subject: [PATCH 1036/1400] Update docs for `/v3/block_proposal` --- docs/rpc/openapi.yaml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index abcc92e982..6fc4985967 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -533,7 +533,7 @@ paths: description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest known tip (includes unconfirmed state). - /v2/block_proposal: + /v3/block_proposal: post: summary: Validate a proposed Stacks block tags: @@ -545,16 +545,19 @@ paths: **This API endpoint requires a basic Authorization header.** responses: "202": - description: Block proposal has been accepted for processing. The result will be returned via the event observer. + description: Block proposal has been accepted for processing. + The result will be returned via the event observer. content: application/json: example: $ref: ./api/core-node/post-block-proposal-response.example.json - "403": - description: Request not over loopback interface + "400": + description: Endpoint not enabled. + "401": + description: Unauthorized. "429": - description: There is an ongoing proposal validation being processed, the new request cannot be accepted - until the prior request has been processed. + description: There is an ongoing proposal validation being processed, + the new request cannot be accepted until the prior request has been processed. content: application/json: example: From 021ab583d2bbfde9e8e93de5b6674a7bc62d6091 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Sep 2024 12:31:31 -0400 Subject: [PATCH 1037/1400] chore: Add `LOCK_TABLE` to print out all DB locks if deadlock detected --- stacks-common/src/util/db.rs | 29 ++++++++++++++++++++++++----- stackslib/src/util_lib/db.rs | 10 ++++++++-- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 29ebdd0bb0..fff39a3297 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -15,16 +15,32 @@ // along with this program. If not, see . use std::backtrace::Backtrace; -use std::time::Duration; +use std::sync::{LazyLock, Mutex}; +use std::thread; +use std::time::{Duration, Instant}; +use hashbrown::HashMap; use rand::{thread_rng, Rng}; use crate::util::sleep_ms; +/// Keep track of DB locks, for deadlock debugging +/// - **key:** `rusqlite::Connection` debug print +/// - **value:** Lock holder (thread name + timestamp) +/// +/// This uses a `Mutex` inside of `LazyLock` because: +/// - Using `Mutex` alone, it can't be statically initialized because `HashMap::new()` isn't `const` +/// - Using `LazyLock` alone doesn't allow interior mutability +pub static LOCK_TABLE: LazyLock>> = + LazyLock::new(|| Mutex::new(HashMap::new())); +/// Generate timestanps for use in `LOCK_TABLE` +/// `Instant` is preferable to `SystemTime` because it uses `CLOCK_MONOTONIC` and is not affected by NTP adjustments +pub static LOCK_TABLE_TIMER: LazyLock = LazyLock::new(Instant::now); + /// Called by `rusqlite` if we are waiting too long on a database lock -/// If called too many times, will fail to avoid deadlocks +/// If called too many times, will assume a deadlock and panic pub fn tx_busy_handler(run_count: i32) -> bool { - const TIMEOUT: Duration = Duration::from_secs(60); + const TIMEOUT: Duration = Duration::from_secs(300); const AVG_SLEEP_TIME_MS: u64 = 100; // First, check if this is taking unreasonably long. If so, it's probably a deadlock @@ -32,11 +48,14 @@ pub fn tx_busy_handler(run_count: i32) -> bool { let approx_time_elapsed = Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); if approx_time_elapsed > TIMEOUT { - error!("Probable deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + error!("Deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); "run_count" => run_count, "backtrace" => ?Backtrace::capture() ); - return false; + for (k, v) in LOCK_TABLE.lock().unwrap().iter() { + error!("Database '{k}' last locked by {v}"); + } + panic!("Deadlock in thread {:?}", thread::current().name()); } let mut sleep_time_ms = 2u64.saturating_pow(run_count); diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index a0496d3bfc..70850d372c 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -18,7 +18,7 @@ use std::backtrace::Backtrace; use std::io::Error as IOError; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; -use std::time::Duration; +use std::time::{Duration, SystemTime}; use std::{error, fmt, fs, io}; use clarity::vm::types::QualifiedContractIdentifier; @@ -32,6 +32,7 @@ use serde_json::Error as serde_error; use stacks_common::types::chainstate::{SortitionId, StacksAddress, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::Address; +use stacks_common::util::db::{LOCK_TABLE, LOCK_TABLE_TIMER}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; @@ -673,7 +674,12 @@ pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Erro /// Sames as `tx_begin_immediate` except that it returns a rusqlite error. pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result, sqlite_error> { conn.busy_handler(Some(tx_busy_handler))?; - Transaction::new(conn, TransactionBehavior::Immediate) + let tx = Transaction::new(conn, TransactionBehavior::Immediate)?; + let time = LOCK_TABLE_TIMER.elapsed().as_millis(); + let k = format!("{:?}", tx.deref()); + let v = format!("{:?}@{time}", std::thread::current().name()); + LOCK_TABLE.lock().unwrap().insert(k, v); + Ok(tx) } #[cfg(feature = "profile-sqlite")] From 67d0f308edd39a7270ea327c92c510fe59fd27ff Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Sep 2024 12:45:16 -0400 Subject: [PATCH 1038/1400] refactor: Move logic for updating `LOCK_TABLE` into stacks-common --- stacks-common/src/util/db.rs | 14 ++++++++++++-- stackslib/src/util_lib/db.rs | 7 ++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index fff39a3297..257a98aab9 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -21,6 +21,7 @@ use std::time::{Duration, Instant}; use hashbrown::HashMap; use rand::{thread_rng, Rng}; +use rusqlite::Connection; use crate::util::sleep_ms; @@ -31,11 +32,20 @@ use crate::util::sleep_ms; /// This uses a `Mutex` inside of `LazyLock` because: /// - Using `Mutex` alone, it can't be statically initialized because `HashMap::new()` isn't `const` /// - Using `LazyLock` alone doesn't allow interior mutability -pub static LOCK_TABLE: LazyLock>> = +static LOCK_TABLE: LazyLock>> = LazyLock::new(|| Mutex::new(HashMap::new())); /// Generate timestanps for use in `LOCK_TABLE` /// `Instant` is preferable to `SystemTime` because it uses `CLOCK_MONOTONIC` and is not affected by NTP adjustments -pub static LOCK_TABLE_TIMER: LazyLock = LazyLock::new(Instant::now); +static LOCK_TABLE_TIMER: LazyLock = LazyLock::new(Instant::now); + +/// Call when using an operation which locks a database +/// Updates `LOCK_TABLE` +pub fn update_lock_table(conn: &Connection) { + let timestamp = LOCK_TABLE_TIMER.elapsed().as_millis(); + let k = format!("{conn:?}"); + let v = format!("{:?}@{timestamp}", std::thread::current().name()); + LOCK_TABLE.lock().unwrap().insert(k, v); +} /// Called by `rusqlite` if we are waiting too long on a database lock /// If called too many times, will assume a deadlock and panic diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 70850d372c..53f597daa2 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -32,7 +32,7 @@ use serde_json::Error as serde_error; use stacks_common::types::chainstate::{SortitionId, StacksAddress, StacksBlockId, TrieHash}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::Address; -use stacks_common::util::db::{LOCK_TABLE, LOCK_TABLE_TIMER}; +use stacks_common::util::db::update_lock_table; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; @@ -675,10 +675,7 @@ pub fn tx_begin_immediate<'a>(conn: &'a mut Connection) -> Result, Erro pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result, sqlite_error> { conn.busy_handler(Some(tx_busy_handler))?; let tx = Transaction::new(conn, TransactionBehavior::Immediate)?; - let time = LOCK_TABLE_TIMER.elapsed().as_millis(); - let k = format!("{:?}", tx.deref()); - let v = format!("{:?}@{time}", std::thread::current().name()); - LOCK_TABLE.lock().unwrap().insert(k, v); + update_lock_table(tx.deref()); Ok(tx) } From 0d44ca4bc4d262f9d06cbbddd7edcdb10d272e55 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 08:40:44 -0400 Subject: [PATCH 1039/1400] feat: use `timeout` param from burnchain config --- .../stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 2 +- testnet/stacks-node/src/config.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 145e73a389..2593c79002 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2803,7 +2803,7 @@ impl BitcoinRPCRequest { fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); - let timeout = Duration::from_secs(60); + let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); let host = request.preamble().host.hostname(); let port = request.preamble().host.port(); diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d1b115d9cf..73ebf23176 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1416,6 +1416,7 @@ pub struct BurnchainConfig { pub rpc_ssl: bool, pub username: Option, pub password: Option, + /// Timeout, in seconds, for communication with bitcoind pub timeout: u32, pub magic_bytes: MagicBytes, pub local_mining_public_key: Option, @@ -1457,7 +1458,7 @@ impl BurnchainConfig { rpc_ssl: false, username: None, password: None, - timeout: 300, + timeout: 5, magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), local_mining_public_key: None, process_exit_at_block_height: None, @@ -1551,6 +1552,7 @@ pub struct BurnchainConfigFile { pub rpc_ssl: Option, pub username: Option, pub password: Option, + /// Timeout, in seconds, for communication with bitcoind pub timeout: Option, pub magic_bytes: Option, pub local_mining_public_key: Option, From 292cd8902286c3b9224d31c2050cff2de7049774 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 11 Sep 2024 12:50:04 -0400 Subject: [PATCH 1040/1400] chore: Add comment --- stacks-common/src/util/db.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 257a98aab9..89fe4677c7 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -42,8 +42,9 @@ static LOCK_TABLE_TIMER: LazyLock = LazyLock::new(Instant::now); /// Updates `LOCK_TABLE` pub fn update_lock_table(conn: &Connection) { let timestamp = LOCK_TABLE_TIMER.elapsed().as_millis(); + // The debug format for `Connection` includes the path let k = format!("{conn:?}"); - let v = format!("{:?}@{timestamp}", std::thread::current().name()); + let v = format!("{:?}@{timestamp}", thread::current().name()); LOCK_TABLE.lock().unwrap().insert(k, v); } From f9f23fb3e7a41f0a147ab0ff91a00c3e91f30e1f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 11 Sep 2024 10:39:17 -0700 Subject: [PATCH 1041/1400] Check if we are the sortition winner before attempting to mock sign Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + .../src/nakamoto_node/sign_coordinator.rs | 13 +- testnet/stacks-node/src/neon_node.rs | 79 +++--- testnet/stacks-node/src/tests/signer/v0.rs | 255 +++++++++++++++--- 4 files changed, 277 insertions(+), 71 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 6241c521e1..e9f3b7735f 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -94,6 +94,7 @@ jobs: - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 + - tests::signer::v0::multiple_miners_mock_sign_epoch_25 - tests::signer::v0::signer_set_rollover - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 6810afbb6b..40d3e96937 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -406,13 +406,14 @@ impl SignCoordinator { match miners_session.put_chunk(&chunk) { Ok(ack) => { - debug!("Wrote message to stackerdb: {ack:?}"); - Ok(()) - } - Err(e) => { - warn!("Failed to write message to stackerdb {e:?}"); - Err("Failed to write message to stackerdb".into()) + if ack.accepted { + debug!("Wrote message to stackerdb: {ack:?}"); + Ok(()) + } else { + Err(format!("{ack:?}")) + } } + Err(e) => Err(format!("{e:?}")), } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index d627e081b2..acec391ba0 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2340,17 +2340,12 @@ impl BlockMinerThread { } /// Read any mock signatures from stackerdb and respond to them - pub fn send_mock_miner_messages(&mut self) -> Result<(), ChainstateError> { - let miner_config = self.config.get_miner_config(); - if !miner_config.pre_nakamoto_mock_signing { - debug!("Pre-Nakamoto mock signing is disabled"); - return Ok(()); - } - + pub fn send_mock_miner_messages(&mut self) -> Result<(), String> { let burn_db_path = self.config.get_burn_db_file_path(); let burn_db = SortitionDB::open(&burn_db_path, false, self.burnchain.pox_constants.clone()) .expect("FATAL: could not open sortition DB"); - let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height)? + let epoch_id = SortitionDB::get_stacks_epoch(burn_db.conn(), self.burn_block.block_height) + .map_err(|e| e.to_string())? .expect("FATAL: no epoch defined") .epoch_id; if epoch_id != StacksEpochId::Epoch25 { @@ -2360,6 +2355,12 @@ impl BlockMinerThread { return Ok(()); } + let miner_config = self.config.get_miner_config(); + if !miner_config.pre_nakamoto_mock_signing { + debug!("Pre-Nakamoto mock signing is disabled"); + return Ok(()); + } + let mining_key = miner_config .mining_key .expect("Cannot mock sign without mining key"); @@ -2374,25 +2375,31 @@ impl BlockMinerThread { } // find out which slot we're in. If we are not the latest sortition winner, we should not be sending anymore messages anyway - let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false)?; - let (_, miners_info) = - NakamotoChainState::make_miners_stackerdb_config(&burn_db, &self.burn_block)?; - let idx = miners_info.get_latest_winner_index(); - let sortitions = miners_info.get_sortitions(); - let election_sortition = *sortitions - .get(idx as usize) - .expect("FATAL: latest winner index out of bounds"); - - let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); - let mut miners_stackerdb = - StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); - + let ih = burn_db.index_handle(&self.burn_block.sortition_id); + let last_winner_snapshot = ih + .get_last_snapshot_with_sortition(self.burn_block.block_height) + .map_err(|e| e.to_string())?; + + if last_winner_snapshot.miner_pk_hash + != Some(Hash160::from_node_public_key( + &StacksPublicKey::from_private(&mining_key), + )) + { + return Ok(()); + } + let election_sortition = last_winner_snapshot.consensus_hash; let mock_proposal = MockProposal::new(peer_info, self.config.burnchain.chain_id, &mining_key); info!("Sending mock proposal to stackerdb: {mock_proposal:?}"); - if let Err(e) = SignCoordinator::send_miners_message( + let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), false) + .map_err(|e| e.to_string())?; + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + + SignCoordinator::send_miners_message( &mining_key, &burn_db, &self.burn_block, @@ -2402,15 +2409,17 @@ impl BlockMinerThread { self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, - ) { - warn!("Failed to send mock proposal to stackerdb: {:?}", &e); - return Ok(()); - } + ) + .map_err(|e| { + warn!("Failed to write mock proposal to stackerdb."); + e + })?; // Retrieve any MockSignatures from stackerdb info!("Waiting for mock signatures..."); - let mock_signatures = - self.wait_for_mock_signatures(&mock_proposal, &stackerdbs, Duration::from_secs(10))?; + let mock_signatures = self + .wait_for_mock_signatures(&mock_proposal, &stackerdbs, Duration::from_secs(10)) + .map_err(|e| e.to_string())?; let mock_block = MockBlock { mock_proposal, @@ -2418,8 +2427,8 @@ impl BlockMinerThread { }; info!("Sending mock block to stackerdb: {mock_block:?}"); - if let Err(e) = SignCoordinator::send_miners_message( - &miner_config.mining_key.expect("BUG: no mining key"), + SignCoordinator::send_miners_message( + &mining_key, &burn_db, &self.burn_block, &stackerdbs, @@ -2428,9 +2437,11 @@ impl BlockMinerThread { self.config.is_mainnet(), &mut miners_stackerdb, &election_sortition, - ) { - warn!("Failed to send mock block to stackerdb: {:?}", &e); - } + ) + .map_err(|e| { + warn!("Failed to write mock block to stackerdb."); + e + })?; Ok(()) } @@ -3795,7 +3806,7 @@ impl RelayerThread { } let Some(mut miner_thread_state) = - self.create_block_miner(registered_key, last_burn_block, issue_timestamp_ms) + self.create_block_miner(registered_key, last_burn_block.clone(), issue_timestamp_ms) else { return false; }; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b931441230..6cfc4b0399 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -39,7 +39,7 @@ use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::MerkleHashFunc; +use stacks::util::hash::{Hash160, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -63,8 +63,9 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, - wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_mine_commit, + setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -163,26 +164,17 @@ impl SignerTest { .get_burnchain() .pox_constants .reward_cycle_length as u64; - let prepare_phase_len = self - .running_nodes - .conf - .get_burnchain() - .pox_constants - .prepare_length as u64; let epoch_25_reward_cycle_boundary = epoch_25_start_height.saturating_sub(epoch_25_start_height % reward_cycle_len); - let epoch_25_reward_set_calculation_boundary = epoch_25_reward_cycle_boundary - .saturating_sub(prepare_phase_len) - .wrapping_add(reward_cycle_len) - .wrapping_add(1); - let next_reward_cycle_boundary = epoch_25_reward_cycle_boundary.wrapping_add(reward_cycle_len); + let target_height = next_reward_cycle_boundary - 1; + info!("Advancing to burn block height {target_height}...",); run_until_burnchain_height( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, - epoch_25_reward_set_calculation_boundary, + target_height, &self.running_nodes.conf, ); debug!("Waiting for signer set calculation."); @@ -210,6 +202,7 @@ impl SignerTest { debug!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state debug!("Waiting for signers to initialize."); + info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); next_block_and_wait( &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.blocks_processed, @@ -217,14 +210,6 @@ impl SignerTest { self.wait_for_registered(30); debug!("Signers initialized"); - info!("Advancing to the first full Epoch 2.5 reward cycle boundary..."); - run_until_burnchain_height( - &mut self.running_nodes.btc_regtest_controller, - &self.running_nodes.blocks_processed, - next_reward_cycle_boundary, - &self.running_nodes.conf, - ); - let current_burn_block_height = self .running_nodes .btc_regtest_controller @@ -1492,9 +1477,10 @@ fn multiple_miners() { let mut miner_1_tenures = 0; let mut miner_2_tenures = 0; while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { - if btc_blocks_mined > max_nakamoto_tenures { - panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); - } + assert!( + max_nakamoto_tenures >= btc_blocks_mined, + "Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting" + ); let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); @@ -1811,14 +1797,13 @@ fn miner_forking() { // (a) its the first nakamoto tenure // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) let mut expects_miner_2_to_be_valid = true; - + let max_sortitions = 20; // due to the random nature of mining sortitions, the way this test is structured // is that keeps track of two scenarios that we want to cover, and once enough sortitions // have been produced to cover those scenarios, it stops and checks the results at the end. while !(won_by_miner_2_but_no_tenure && won_by_miner_1_after_tenureless_miner_2) { - if sortitions_seen.len() >= 20 { - panic!("Produced 20 sortitions, but didn't cover the test scenarios, aborting"); - } + let nmb_sortitions_seen = sortitions_seen.len(); + assert!(max_sortitions >= nmb_sortitions_seen, "Produced {nmb_sortitions_seen} sortitions, but didn't cover the test scenarios, aborting"); let (sortition_data, had_tenure) = run_sortition(); sortitions_seen.push((sortition_data.clone(), had_tenure)); @@ -2537,7 +2522,6 @@ fn mock_sign_epoch_25() { }; if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height { - assert_eq!(mock_block.mock_signatures.len(), num_signers); mock_block .mock_signatures .iter() @@ -2567,6 +2551,215 @@ fn mock_sign_epoch_25() { } } +#[test] +#[ignore] +fn multiple_miners_mock_sign_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + let localhost = "127.0.0.1"; + config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); + config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); + config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); + config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + config.miner.pre_nakamoto_mock_signing = true; + let epochs = config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); + conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Reached Epoch 2.5 Reward Cycle-------------------------"); + + // Mine until epoch 3.0 and ensure that no more mock signatures are received + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + let signer_keys = signer_test.get_signer_public_keys(reward_cycle); + let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + + // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. + while signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + < epoch_3_boundary + { + let mut mock_block_mesage = None; + let mock_poll_time = Instant::now(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); + while mock_block_mesage.is_none() { + std::thread::sleep(Duration::from_millis(100)); + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .filter_map(|chunk| { + if chunk.contract_id != miners_stackerdb_contract { + return None; + } + Some(chunk.modified_slots) + }) + .flatten() + { + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockBlock(mock_block) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage") + else { + continue; + }; + if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height + { + mock_block + .mock_signatures + .iter() + .for_each(|mock_signature| { + assert!(signer_public_keys.iter().any(|signer| { + mock_signature + .verify( + &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) + .unwrap(), + ) + .expect("Failed to verify mock signature") + })); + }); + mock_block_mesage = Some(mock_block); + break; + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock miner message within timeout" + ); + } + } +} + #[test] #[ignore] /// This test asserts that signer set rollover works as expected. From b7e29b073e4b1e45471cb9f3af1bf0c4d4b37f87 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 11 Sep 2024 11:05:19 -0700 Subject: [PATCH 1042/1400] Fix build issues during cherry pick failure Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6cfc4b0399..9b04206975 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -39,7 +39,7 @@ use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{Hash160, MerkleHashFunc}; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -63,9 +63,8 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_mine_commit, - setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, @@ -2629,7 +2628,8 @@ fn multiple_miners_mock_sign_epoch_25() { false }) }, - &[btc_miner_1_pk.clone(), btc_miner_2_pk.clone()], + Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + None, ); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); From 8462f901d4cbd258e99b5176e8f23d3e320077f7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 11 Sep 2024 11:10:17 -0700 Subject: [PATCH 1043/1400] fix: revert change to update processed time --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index b83deebac0..be904395c2 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -595,7 +595,7 @@ impl<'a> NakamotoStagingBlocksTx<'a> { signing_weight: u32, obtain_method: NakamotoBlockObtainMethod, ) -> Result<(), ChainstateError> { - self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3, processed_time = ?4 WHERE consensus_hash = ?5 AND block_hash = ?6", + self.execute("UPDATE nakamoto_staging_blocks SET data = ?1, signing_weight = ?2, obtain_method = ?3 WHERE consensus_hash = ?4 AND block_hash = ?5", params![ &block.serialize_to_vec(), &signing_weight, From 2ffa044f91014e6707e6125ff99ddd651ab25d94 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 11 Sep 2024 12:08:01 -0700 Subject: [PATCH 1044/1400] fix: apply schema 8 migration before 9 --- stackslib/src/chainstate/burn/db/sortdb.rs | 30 ++++++++++------------ 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index f545c53c8c..eb156cbb96 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2862,7 +2862,7 @@ impl SortitionDB { sql_pragma(self.conn(), "journal_mode", &"WAL")?; sql_pragma(self.conn(), "foreign_keys", &true)?; - let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; + let db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; // create first (sentinel) snapshot debug!("Make first snapshot"); @@ -2888,6 +2888,12 @@ impl SortitionDB { SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; SortitionDB::apply_schema_7(&db_tx, epochs_ref)?; SortitionDB::apply_schema_8_tables(&db_tx, epochs_ref)?; + // `apply_schema_8_migration` creates new transactions, so + // commit this first. + db_tx.commit()?; + // NOTE: we don't need to provide a migrator here because we're not migrating + self.apply_schema_8_migration(None)?; + let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -2906,9 +2912,6 @@ impl SortitionDB { db_tx.commit()?; - // NOTE: we don't need to provide a migrator here because we're not migrating - self.apply_schema_8_migration(None)?; - self.add_indexes()?; debug!("Instantiated SortDB"); @@ -3352,11 +3355,6 @@ impl SortitionDB { ) -> Result<(), db_error> { let canonical_tip = SortitionDB::get_canonical_burn_chain_tip(self.conn())?; - let schema_version = SortitionDB::get_schema_version(self.conn())? - .unwrap_or("0".to_string()) - .parse::() - .unwrap_or(0); - // port over `stacks_chain_tips` table info!("Instantiating `stacks_chain_tips` table..."); self.apply_schema_8_stacks_chain_tips(&canonical_tip)?; @@ -3370,14 +3368,12 @@ impl SortitionDB { info!("No migrator implementation given; `preprocessed_reward_sets` will not be prepopulated"); } - if schema_version < 8 { - let tx = self.tx_begin()?; - tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &["8"], - )?; - tx.commit()?; - } + let tx = self.tx_begin()?; + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["8"], + )?; + tx.commit()?; Ok(()) } From 5574d118e835482cb628b164501c5c21e85ee254 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 11 Sep 2024 12:10:34 -0700 Subject: [PATCH 1045/1400] fix: add mutant skip to `make_tenure_start_info` --- testnet/stacks-node/src/nakamoto_node/miner.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 1669ee9484..3de7d2e512 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1206,6 +1206,7 @@ impl BlockMinerThread { Ok(block) } + #[cfg_attr(test, mutants::skip)] /// Create the tenure start info for the block we're going to build fn make_tenure_start_info( &self, From 120348ac38175e549c901ab6d0184293e9ead089 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 15:43:07 -0400 Subject: [PATCH 1046/1400] chore: avoid warning from duplicate block-commit This change ended up being a lot larger than I'd hoped, but with a recent investigation into some networking failures when communicating with the bitcoin node, these warnings were annoying. I'm just trying to reduce false-positive warning logs. The problematic case shows up like this in the logs before this change: ``` INFO [1725975417.732025] [testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs:1634] [miner-block-http://127.0.0.1:35738] Abort attempt to re-submit identical LeaderBlockCommit WARN [1725975417.732029] [testnet/stacks-node/src/neon_node.rs:2759] [miner-block-http://127.0.0.1:35738] Relayer: Failed to submit Bitcoin transaction ``` --- .../burnchains/bitcoin_regtest_controller.rs | 143 ++++++++++-------- .../src/burnchains/mocknet_controller.rs | 4 +- testnet/stacks-node/src/burnchains/mod.rs | 14 +- .../stacks-node/src/nakamoto_node/relayer.rs | 10 +- testnet/stacks-node/src/neon_node.rs | 27 ++-- testnet/stacks-node/src/run_loop/neon.rs | 2 +- testnet/stacks-node/src/tests/epoch_205.rs | 2 +- testnet/stacks-node/src/tests/epoch_21.rs | 10 +- .../src/tests/nakamoto_integrations.rs | 12 +- .../src/tests/neon_integrations.rs | 20 +-- 10 files changed, 135 insertions(+), 109 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 145e73a389..1990487253 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -845,7 +845,7 @@ impl BitcoinRegtestController { payload: LeaderKeyRegisterOp, signer: &mut BurnchainOpSigner, _attempt: u64, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); // reload the config to find satoshis_per_byte changes @@ -890,7 +890,7 @@ impl BitcoinRegtestController { &mut utxos, signer, true, // key register op requires change output to exist - )?; + ); increment_btc_ops_sent_counter(); @@ -899,7 +899,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg(not(test))] @@ -909,7 +909,7 @@ impl BitcoinRegtestController { _payload: TransferStxOp, _signer: &mut BurnchainOpSigner, _utxo: Option, - ) -> Option { + ) -> Result { unimplemented!() } @@ -920,7 +920,7 @@ impl BitcoinRegtestController { _payload: DelegateStxOp, _signer: &mut BurnchainOpSigner, _utxo: Option, - ) -> Option { + ) -> Result { unimplemented!() } @@ -931,7 +931,7 @@ impl BitcoinRegtestController { operation: BlockstackOperationType, op_signer: &mut BurnchainOpSigner, utxo: Option, - ) -> Option { + ) -> Result { let transaction = match operation { BlockstackOperationType::LeaderBlockCommit(_) | BlockstackOperationType::LeaderKeyRegister(_) @@ -950,11 +950,7 @@ impl BitcoinRegtestController { let ser_transaction = SerializedTx::new(transaction.clone()); - if self.send_transaction(ser_transaction).is_some() { - Some(transaction) - } else { - None - } + self.send_transaction(ser_transaction).map(|_| transaction) } #[cfg(test)] @@ -970,7 +966,7 @@ impl BitcoinRegtestController { payload: TransferStxOp, signer: &mut BurnchainOpSigner, utxo_to_use: Option, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_TRANSFER_STACKS_ESTIM_SIZE; let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { @@ -1000,7 +996,9 @@ impl BitcoinRegtestController { // Serialize the payload let op_bytes = { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; + payload + .consensus_serialize(&mut bytes) + .map_err(|_| BurnchainControllerError::SerializerError)?; bytes }; @@ -1028,7 +1026,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1037,7 +1035,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg(test)] @@ -1053,7 +1051,7 @@ impl BitcoinRegtestController { payload: DelegateStxOp, signer: &mut BurnchainOpSigner, utxo_to_use: Option, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_DELEGATE_STACKS_ESTIM_SIZE; @@ -1084,7 +1082,9 @@ impl BitcoinRegtestController { // Serialize the payload let op_bytes = { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; + payload + .consensus_serialize(&mut bytes) + .map_err(|_| BurnchainControllerError::SerializerError)?; bytes }; @@ -1112,7 +1112,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1121,7 +1121,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg(test)] @@ -1132,7 +1132,7 @@ impl BitcoinRegtestController { payload: VoteForAggregateKeyOp, signer: &mut BurnchainOpSigner, utxo_to_use: Option, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_VOTE_AGG_ESTIM_SIZE; @@ -1163,7 +1163,9 @@ impl BitcoinRegtestController { // Serialize the payload let op_bytes = { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; + payload + .consensus_serialize(&mut bytes) + .map_err(|_| BurnchainControllerError::SerializerError)?; bytes }; @@ -1187,7 +1189,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1196,7 +1198,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg(not(test))] @@ -1207,7 +1209,7 @@ impl BitcoinRegtestController { _payload: VoteForAggregateKeyOp, _signer: &mut BurnchainOpSigner, _utxo_to_use: Option, - ) -> Option { + ) -> Result { unimplemented!() } @@ -1217,7 +1219,7 @@ impl BitcoinRegtestController { _epoch_id: StacksEpochId, _payload: PreStxOp, _signer: &mut BurnchainOpSigner, - ) -> Option { + ) -> Result { unimplemented!() } @@ -1227,7 +1229,7 @@ impl BitcoinRegtestController { epoch_id: StacksEpochId, payload: PreStxOp, signer: &mut BurnchainOpSigner, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_PRE_STACKS_ESTIM_SIZE; @@ -1266,7 +1268,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1275,7 +1277,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } #[cfg_attr(test, mutants::skip)] @@ -1286,7 +1288,7 @@ impl BitcoinRegtestController { _payload: StackStxOp, _signer: &mut BurnchainOpSigner, _utxo_to_use: Option, - ) -> Option { + ) -> Result { unimplemented!() } @@ -1297,7 +1299,7 @@ impl BitcoinRegtestController { payload: StackStxOp, signer: &mut BurnchainOpSigner, utxo_to_use: Option, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let max_tx_size = OP_TX_STACK_STX_ESTIM_SIZE; @@ -1328,7 +1330,9 @@ impl BitcoinRegtestController { // Serialize the payload let op_bytes = { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; + payload + .consensus_serialize(&mut bytes) + .map_err(|_| BurnchainControllerError::SerializerError)?; bytes }; @@ -1354,7 +1358,7 @@ impl BitcoinRegtestController { &mut utxos, signer, false, - )?; + ); increment_btc_ops_sent_counter(); @@ -1363,7 +1367,7 @@ impl BitcoinRegtestController { public_key.to_hex() ); - Some(tx) + Ok(tx) } fn magic_bytes(&self) -> Vec { @@ -1389,9 +1393,14 @@ impl BitcoinRegtestController { utxos_to_exclude: Option, previous_fees: Option, previous_txids: &Vec, - ) -> Option { + ) -> Result { let _ = self.sortdb_mut(); - let burn_chain_tip = self.burnchain_db.as_ref()?.get_canonical_chain_tip().ok()?; + let burn_chain_tip = self + .burnchain_db + .as_ref() + .ok_or(BurnchainControllerError::BurnchainError)? + .get_canonical_chain_tip() + .map_err(|_| BurnchainControllerError::BurnchainError)?; let estimated_fees = match previous_fees { Some(fees) => fees.fees_from_previous_tx(&payload, &self.config), None => LeaderBlockCommitFees::estimated_fees_from_payload(&payload, &self.config), @@ -1419,7 +1428,7 @@ impl BitcoinRegtestController { mut estimated_fees: LeaderBlockCommitFees, previous_txids: &Vec, burnchain_block_height: u64, - ) -> Option { + ) -> Result { let public_key = signer.get_public_key(); let (mut tx, mut utxos) = self.prepare_tx( epoch_id, @@ -1467,7 +1476,7 @@ impl BitcoinRegtestController { &mut utxos, signer, true, // block commit op requires change output to exist - )?; + ); let serialized_tx = SerializedTx::new(tx.clone()); @@ -1500,7 +1509,7 @@ impl BitcoinRegtestController { increment_btc_ops_sent_counter(); - Some(tx) + Ok(tx) } fn build_leader_block_commit_tx( @@ -1509,7 +1518,7 @@ impl BitcoinRegtestController { payload: LeaderBlockCommitOp, signer: &mut BurnchainOpSigner, _attempt: u64, - ) -> Option { + ) -> Result { // Are we currently tracking an operation? if self.ongoing_block_commit.is_none() || !self.allow_rbf { // Good to go, let's build the transaction and send it. @@ -1560,7 +1569,9 @@ impl BitcoinRegtestController { // Did a re-org occur since we fetched our UTXOs, or are the UTXOs so stale that they should be abandoned? let mut traversal_depth = 0; - let mut burn_chain_tip = burnchain_db.get_canonical_chain_tip().ok()?; + let mut burn_chain_tip = burnchain_db + .get_canonical_chain_tip() + .map_err(|_| BurnchainControllerError::BurnchainError)?; let mut found_last_mined_at = false; while traversal_depth < UTXO_CACHE_STALENESS_LIMIT { if &burn_chain_tip.block_hash == &ongoing_op.utxos.bhh { @@ -1572,7 +1583,7 @@ impl BitcoinRegtestController { &burnchain_db.conn(), &burn_chain_tip.parent_block_hash, ) - .ok()?; + .map_err(|_| BurnchainControllerError::BurnchainError)?; burn_chain_tip = parent.header; traversal_depth += 1; @@ -1604,7 +1615,7 @@ impl BitcoinRegtestController { get_max_rbf(&self.config) ); self.ongoing_block_commit = Some(ongoing_op); - return None; + return Err(BurnchainControllerError::MaxFeeRateExceeded); } // An ongoing operation is in the mempool and we received a new block. The desired behaviour is the following: @@ -1619,7 +1630,7 @@ impl BitcoinRegtestController { if payload == ongoing_op.payload { info!("Abort attempt to re-submit identical LeaderBlockCommit"); self.ongoing_block_commit = Some(ongoing_op); - return None; + return Err(BurnchainControllerError::IdenticalOperation); } // Let's proceed and early return 2) i) @@ -1649,7 +1660,7 @@ impl BitcoinRegtestController { ) }; - if res.is_none() { + if res.is_ok() { self.ongoing_block_commit = Some(ongoing_op); } @@ -1688,7 +1699,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, block_height: u64, - ) -> Option<(Transaction, UTXOSet)> { + ) -> Result<(Transaction, UTXOSet), BurnchainControllerError> { let utxos = if let Some(utxos) = utxos_to_include { // in RBF, you have to consume the same UTXOs utxos @@ -1710,7 +1721,7 @@ impl BitcoinRegtestController { &addr2str(&addr), epoch_id ); - return None; + return Err(BurnchainControllerError::NoUtxos); } }; utxos @@ -1724,7 +1735,7 @@ impl BitcoinRegtestController { lock_time: 0, }; - Some((transaction, utxos)) + Ok((transaction, utxos)) } fn finalize_tx( @@ -1738,7 +1749,7 @@ impl BitcoinRegtestController { utxos_set: &mut UTXOSet, signer: &mut BurnchainOpSigner, force_change_output: bool, - ) -> Option<()> { + ) { // spend UTXOs in order by confirmations. Spend the least-confirmed UTXO first, and in the // event of a tie, spend the smallest-value UTXO first. utxos_set.utxos.sort_by(|u1, u2| { @@ -1788,7 +1799,6 @@ impl BitcoinRegtestController { force_change_output, ); signer.dispose(); - Some(()) } /// Sign and serialize a tx, consuming the UTXOs in utxo_set and spending total_to_spend @@ -1918,22 +1928,21 @@ impl BitcoinRegtestController { /// Send a serialized tx to the Bitcoin node. Return Some(txid) on successful send; None on /// failure. - pub fn send_transaction(&self, transaction: SerializedTx) -> Option { - debug!("Send raw transaction: {}", transaction.to_hex()); - let result = BitcoinRPCRequest::send_raw_transaction(&self.config, transaction.to_hex()); - match result { - Ok(_) => { - debug!("Sent transaction {}", &transaction.txid); - Some(transaction.txid()) - } - Err(e) => { - error!( - "Bitcoin RPC failure: transaction submission failed - {:?}", - e - ); - None - } - } + pub fn send_transaction( + &self, + transaction: SerializedTx, + ) -> Result { + debug!("Sending raw transaction: {}", transaction.to_hex()); + + BitcoinRPCRequest::send_raw_transaction(&self.config, transaction.to_hex()) + .map(|_| { + debug!("Transaction {} sent successfully", &transaction.txid()); + transaction.txid() + }) + .map_err(|e| { + error!("Bitcoin RPC error: transaction submission failed - {:?}", e); + BurnchainControllerError::TransactionSubmissionFailed + }) } /// wait until the ChainsCoordinator has processed sortitions up to @@ -2066,7 +2075,7 @@ impl BitcoinRegtestController { operation: BlockstackOperationType, op_signer: &mut BurnchainOpSigner, attempt: u64, - ) -> Option { + ) -> Result { let transaction = match operation { BlockstackOperationType::LeaderBlockCommit(payload) => { self.build_leader_block_commit_tx(epoch_id, payload, op_signer, attempt) @@ -2263,7 +2272,7 @@ impl BurnchainController for BitcoinRegtestController { operation: BlockstackOperationType, op_signer: &mut BurnchainOpSigner, attempt: u64, - ) -> Option { + ) -> Result { let transaction = self.make_operation_tx(epoch_id, operation, op_signer, attempt)?; self.send_transaction(transaction) } diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 6bb958e070..d518f5bdea 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -168,10 +168,10 @@ impl BurnchainController for MocknetController { operation: BlockstackOperationType, _op_signer: &mut BurnchainOpSigner, _attempt: u64, - ) -> Option { + ) -> Result { let txid = operation.txid(); self.queued_operations.push_back(operation); - Some(txid) + Ok(txid) } fn sync( diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index 5506cf6231..d6706a0e1c 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -19,6 +19,12 @@ use super::operations::BurnchainOpSigner; pub enum Error { CoordinatorClosed, IndexerError(burnchains::Error), + BurnchainError, + MaxFeeRateExceeded, + IdenticalOperation, + NoUtxos, + TransactionSubmissionFailed, + SerializerError, } impl fmt::Display for Error { @@ -26,6 +32,12 @@ impl fmt::Display for Error { match self { Error::CoordinatorClosed => write!(f, "ChainsCoordinator closed"), Error::IndexerError(ref e) => write!(f, "Indexer error: {:?}", e), + Error::BurnchainError => write!(f, "Burnchain error"), + Error::MaxFeeRateExceeded => write!(f, "Max fee rate exceeded"), + Error::IdenticalOperation => write!(f, "Identical operation, not submitting"), + Error::NoUtxos => write!(f, "No UTXOs available"), + Error::TransactionSubmissionFailed => write!(f, "Transaction submission failed"), + Error::SerializerError => write!(f, "Serializer error"), } } } @@ -45,7 +57,7 @@ pub trait BurnchainController { operation: BlockstackOperationType, op_signer: &mut BurnchainOpSigner, attempt: u64, - ) -> Option; + ) -> Result; fn sync(&mut self, target_block_height_opt: Option) -> Result<(BurnchainTip, u64), Error>; fn sortdb_ref(&self) -> &SortitionDB; fn sortdb_mut(&mut self) -> &mut SortitionDB; diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 435305472a..9ae219659c 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -530,9 +530,9 @@ impl RelayerThread { let op = Self::make_key_register_op(vrf_pk, burnchain_tip_consensus_hash, &miner_pkh); let mut op_signer = self.keychain.generate_op_signer(); - if let Some(txid) = - self.bitcoin_controller - .submit_operation(cur_epoch, op, &mut op_signer, 1) + if let Ok(txid) = self + .bitcoin_controller + .submit_operation(cur_epoch, op, &mut op_signer, 1) { // advance key registration state self.last_vrf_key_burn_height = Some(burn_block.block_height); @@ -1048,8 +1048,8 @@ impl RelayerThread { &mut op_signer, 1, ) - .ok_or_else(|| { - warn!("Failed to submit block-commit bitcoin transaction"); + .map_err(|e| { + warn!("Failed to submit block-commit bitcoin transaction: {}", e); NakamotoNodeError::BurnchainSubmissionFailed })?; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index d627e081b2..b7e2843ece 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -213,7 +213,7 @@ use super::{BurnchainController, Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::{ addr2str, burnchain_params_from_config, BitcoinRegtestController, OngoingBlockCommit, }; -use crate::burnchains::make_bitcoin_indexer; +use crate::burnchains::{make_bitcoin_indexer, Error as BurnchainControllerError}; use crate::chain_data::MinerStats; use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; @@ -2753,16 +2753,21 @@ impl BlockMinerThread { } = self.config.get_node_config(false); let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); - if res.is_none() { - self.failed_to_submit_last_attempt = true; - if !mock_mining { - warn!("Relayer: Failed to submit Bitcoin transaction"); - return None; + self.failed_to_submit_last_attempt = match res { + Ok(_) => false, + Err(BurnchainControllerError::IdenticalOperation) => { + info!("Relayer: Block-commit already submitted"); + true } - debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); - } else { - self.failed_to_submit_last_attempt = false; - } + Err(_) if mock_mining => { + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + true + } + Err(e) => { + warn!("Relayer: Failed to submit Bitcoin transaction: {:?}", e); + true + } + }; let assembled_block = AssembledAnchorBlock { parent_consensus_hash: parent_block_info.parent_consensus_hash, @@ -3620,7 +3625,7 @@ impl RelayerThread { ); let mut one_off_signer = self.keychain.generate_op_signer(); - if let Some(txid) = + if let Ok(txid) = self.bitcoin_controller .submit_operation(cur_epoch, op, &mut one_off_signer, 1) { diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 36777c4912..b1fa0ff53b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -497,7 +497,7 @@ impl RunLoop { return burnchain_error::ShutdownInitiated; } } - Error::IndexerError(_) => {} + _ => {} } error!("Burnchain controller stopped: {}", e); panic!(); diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 68f37b4fb8..2006abb05e 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -629,7 +629,7 @@ fn transition_empty_blocks() { &mut op_signer, 1, ); - assert!(res.is_some(), "Failed to submit block-commit"); + assert!(res.is_ok(), "Failed to submit block-commit"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index bb168b28b9..2f74ffa770 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -678,7 +678,7 @@ fn transition_fixes_bitcoin_rigidity() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -713,7 +713,7 @@ fn transition_fixes_bitcoin_rigidity() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Transfer operation should submit successfully" ); @@ -835,7 +835,7 @@ fn transition_fixes_bitcoin_rigidity() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -866,7 +866,7 @@ fn transition_fixes_bitcoin_rigidity() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Transfer operation should submit successfully" ); @@ -1946,7 +1946,7 @@ fn transition_empty_blocks() { let mut op_signer = keychain.generate_op_signer(); let res = bitcoin_controller.submit_operation(StacksEpochId::Epoch21, op, &mut op_signer, 1); - assert!(res.is_some(), "Failed to submit block-commit"); + assert!(res.is_ok(), "Failed to submit block-commit"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4839bee3be..fe5d594221 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2876,7 +2876,7 @@ fn vote_for_aggregate_key_burn_op() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -2952,7 +2952,7 @@ fn vote_for_aggregate_key_burn_op() { &mut signer_burnop_signer, 1 ) - .is_some(), + .is_ok(), "Vote for aggregate key operation should submit successfully" ); @@ -3433,7 +3433,7 @@ fn stack_stx_burn_op_integration_test() { &mut miner_signer_1, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -3463,7 +3463,7 @@ fn stack_stx_burn_op_integration_test() { &mut miner_signer_2, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); info!("Submitted 2 pre-stx ops at block {block_height}, mining a few blocks..."); @@ -3604,7 +3604,7 @@ fn stack_stx_burn_op_integration_test() { &mut signer_burnop_signer_1, 1 ) - .is_some(), + .is_ok(), "Stack STX operation should submit successfully" ); @@ -3631,7 +3631,7 @@ fn stack_stx_burn_op_integration_test() { &mut signer_burnop_signer_2, 1 ) - .is_some(), + .is_ok(), "Stack STX operation should submit successfully" ); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 3dc9669a9e..cd568bc438 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1934,7 +1934,7 @@ fn stx_transfer_btc_integration_test() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -1964,7 +1964,7 @@ fn stx_transfer_btc_integration_test() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Transfer operation should submit successfully" ); // should be elected in the same block as the transfer, so balances should be unchanged. @@ -2215,7 +2215,7 @@ fn stx_delegate_btc_integration_test() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -2244,7 +2244,7 @@ fn stx_delegate_btc_integration_test() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Delegate operation should submit successfully" ); @@ -2507,7 +2507,7 @@ fn stack_stx_burn_op_test() { &mut miner_signer_1, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -2528,7 +2528,7 @@ fn stack_stx_burn_op_test() { &mut miner_signer_2, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); info!("Submitted 2 pre-stx ops at block {block_height}, mining a few blocks..."); @@ -2614,7 +2614,7 @@ fn stack_stx_burn_op_test() { &mut spender_signer_1, 1 ) - .is_some(), + .is_ok(), "Stack STX operation with some signer key should submit successfully" ); @@ -2642,7 +2642,7 @@ fn stack_stx_burn_op_test() { &mut spender_signer_2, 1 ) - .is_some(), + .is_ok(), "Stack STX operation with no signer key should submit successfully" ); @@ -2949,7 +2949,7 @@ fn vote_for_aggregate_key_burn_op_test() { &mut miner_signer, 1 ) - .is_some(), + .is_ok(), "Pre-stx operation should submit successfully" ); @@ -3006,7 +3006,7 @@ fn vote_for_aggregate_key_burn_op_test() { &mut spender_signer, 1 ) - .is_some(), + .is_ok(), "Vote for aggregate key operation should submit successfully" ); From 3a0bc7ae6e838079879b8eb27720507c8b2872e5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 11 Sep 2024 16:23:27 -0400 Subject: [PATCH 1047/1400] chore: have block acceptance return a BlockAcceptResult instead of a bool --- .../chainstate/nakamoto/coordinator/tests.rs | 8 +- .../src/chainstate/nakamoto/tests/node.rs | 10 +-- stackslib/src/net/api/postblock.rs | 14 +-- stackslib/src/net/api/postblock_v3.rs | 18 ++-- stackslib/src/net/relay.rs | 86 +++++++++++++------ stackslib/src/net/rpc.rs | 3 +- stackslib/src/net/tests/mod.rs | 6 +- stackslib/src/net/tests/relay/epoch2x.rs | 24 +++++- 8 files changed, 110 insertions(+), 59 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 188e2e5a3e..ddeea51573 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -65,7 +65,7 @@ use crate::chainstate::stacks::{ }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::net::tests::NakamotoBootPlan; @@ -338,8 +338,10 @@ fn replay_reward_cycle( None, NakamotoBlockObtainMethod::Pushed, ) - .unwrap_or(false); - if accepted { + .unwrap_or(BlockAcceptResponse::Rejected( + "encountered error on acceptance".into(), + )); + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!("Accepted Nakamoto block {block_id}"); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index bd12072a01..4377f74876 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -70,7 +70,7 @@ use crate::chainstate::stacks::{ use crate::core::{BOOT_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER}; use crate::cost_estimates::metrics::UnitMetric; use crate::cost_estimates::UnitEstimator; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::test::{TestPeer, TestPeerConfig, *}; use crate::util_lib::boot::boot_code_addr; use crate::util_lib::db::Error as db_error; @@ -822,9 +822,9 @@ impl TestStacksNode { } } } else { - false + BlockAcceptResponse::Rejected("try_to_process is false".into()) }; - if accepted { + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!("Accepted Nakamoto block {}", &block_to_store.block_id()); coord.handle_new_nakamoto_stacks_block().unwrap(); processed_blocks.push(block_to_store.clone()); @@ -1247,7 +1247,7 @@ impl<'a> TestPeer<'a> { None, NakamotoBlockObtainMethod::Pushed, )?; - if !accepted { + if !matches!(BlockAcceptResponse::Accepted, accepted) { return Ok(false); } let sort_tip = SortitionDB::get_canonical_sortition_tip(self.sortdb().conn()).unwrap(); @@ -1491,7 +1491,7 @@ impl<'a> TestPeer<'a> { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted { + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!("Accepted Nakamoto block {}", &block_id); self.coord.handle_new_nakamoto_stacks_block().unwrap(); diff --git a/stackslib/src/net/api/postblock.rs b/stackslib/src/net/api/postblock.rs index 46612a2f8d..4fc50244f9 100644 --- a/stackslib/src/net/api/postblock.rs +++ b/stackslib/src/net/api/postblock.rs @@ -47,7 +47,7 @@ use crate::net::httpcore::{ StacksHttpRequest, StacksHttpResponse, }; use crate::net::p2p::PeerNetwork; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::{ Attachment, BlocksData, BlocksDatum, Error as NetError, StacksMessageType, StacksNodeState, }; @@ -177,16 +177,10 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { ) { Ok(accepted) => { debug!( - "{} Stacks block {}/{}", - if accepted { - "Accepted" - } else { - "Did not accept" - }, - &consensus_hash, - &block_hash, + "Received POSTed Stacks block {}/{}: {:?}", + &consensus_hash, &block_hash, &accepted ); - return Ok(accepted); + return Ok(BlockAcceptResponse::Accepted == accepted); } Err(e) => { let msg = format!( diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 39ff26087f..602e307fd4 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -29,7 +29,7 @@ use crate::net::http::{ use crate::net::httpcore::{ HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, }; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; pub static PATH: &'static str = "/v3/blocks/upload/"; @@ -179,10 +179,18 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { }); let data_resp = match response { - Ok(accepted) => StacksBlockAcceptedData { - accepted, - stacks_block_id: block.block_id(), - }, + Ok(accepted) => { + debug!( + "Received POSTed Nakamoto block {}/{}: {:?}", + &block.header.consensus_hash, + &block.header.block_hash(), + &accepted + ); + StacksBlockAcceptedData { + accepted: matches!(accepted, BlockAcceptResponse::Accepted), + stacks_block_id: block.block_id(), + } + } Err(e) => { return e.try_into_contents().map_err(NetError::from); } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 1b08f5cd35..123f78f422 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -517,6 +517,17 @@ pub struct AcceptedNakamotoBlocks { pub blocks: Vec, } +/// Block processed result +#[derive(Debug, Clone, PartialEq)] +pub enum BlockAcceptResponse { + /// block was accepted to the staging DB + Accepted, + /// we already had this block + AlreadyStored, + /// block was rejected for some reason + Rejected(String), +} + impl Relayer { pub fn new( handle: NetworkHandle, @@ -735,7 +746,7 @@ impl Relayer { consensus_hash: &ConsensusHash, block: &StacksBlock, download_time: u64, - ) -> Result { + ) -> Result { info!( "Handle incoming block {}/{}", consensus_hash, @@ -748,7 +759,9 @@ impl Relayer { if chainstate.fault_injection.hide_blocks && Self::fault_injection_is_block_hidden(&block.header, block_sn.block_height) { - return Ok(false); + return Ok(BlockAcceptResponse::Rejected( + "Fault injection: block is hidden".into(), + )); } // find the snapshot of the parent of this block @@ -758,7 +771,9 @@ impl Relayer { Some(sn) => sn, None => { // doesn't correspond to a PoX-valid sortition - return Ok(false); + return Ok(BlockAcceptResponse::Rejected( + "Block does not correspond to a known sortition".into(), + )); } }; @@ -790,7 +805,7 @@ impl Relayer { "sortition_height" => block_sn.block_height, "ast_rules" => ?ast_rules, ); - return Ok(false); + return Ok(BlockAcceptResponse::Rejected("Block is problematic".into())); } let res = chainstate.preprocess_anchored_block( @@ -806,8 +821,10 @@ impl Relayer { consensus_hash, &block.block_hash() ); + return Ok(BlockAcceptResponse::Accepted); + } else { + return Ok(BlockAcceptResponse::AlreadyStored); } - Ok(res) } /// Wrapper around inner_process_new_nakamoto_block @@ -820,7 +837,7 @@ impl Relayer { block: &NakamotoBlock, coord_comms: Option<&CoordinatorChannels>, obtained_method: NakamotoBlockObtainMethod, - ) -> Result { + ) -> Result { Self::process_new_nakamoto_block_ext( burnchain, sortdb, @@ -856,7 +873,7 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, obtained_method: NakamotoBlockObtainMethod, force_broadcast: bool, - ) -> Result { + ) -> Result { info!( "Handle incoming Nakamoto block {}/{} obtained via {}", &block.header.consensus_hash, @@ -882,13 +899,13 @@ impl Relayer { // it's possible that the signer sent this block to us, in which case, we should // broadcast it debug!( - "Already have Nakamoto block {}, but broadcasting anyway", + "Already have Nakamoto block {}, but treating a new anyway so we can broadcast it", &block.header.block_id() ); - return Ok(true); + return Ok(BlockAcceptResponse::Accepted); } else { debug!("Already have Nakamoto block {}", &block.header.block_id()); - return Ok(false); + return Ok(BlockAcceptResponse::AlreadyStored); } } @@ -931,7 +948,9 @@ impl Relayer { "burn_height" => block.header.chain_length, "sortition_height" => block_sn.block_height, ); - return Ok(false); + return Ok(BlockAcceptResponse::Rejected( + "Nakamoto block is problematic".into(), + )); } let accept_msg = format!( @@ -1006,11 +1025,11 @@ impl Relayer { return Err(chainstate_error::NetError(net_error::CoordinatorClosed)); } } + return Ok(BlockAcceptResponse::Accepted); } else { info!("{}", &reject_msg); + return Ok(BlockAcceptResponse::AlreadyStored); } - - Ok(accepted) } #[cfg_attr(test, mutants::skip)] @@ -1046,7 +1065,7 @@ impl Relayer { continue; } }; - if accept { + if BlockAcceptResponse::Accepted == accept { accepted.push(block); } } @@ -1163,8 +1182,8 @@ impl Relayer { block, *download_time, ) { - Ok(accepted) => { - if accepted { + Ok(accept_response) => { + if BlockAcceptResponse::Accepted == accept_response { debug!( "Accepted downloaded block {}/{}", consensus_hash, @@ -1173,9 +1192,10 @@ impl Relayer { new_blocks.insert((*consensus_hash).clone(), block.clone()); } else { debug!( - "Rejected downloaded block {}/{}", + "Rejected downloaded block {}/{}: {:?}", consensus_hash, - &block.block_hash() + &block.block_hash(), + &accept_response ); } } @@ -1302,8 +1322,8 @@ impl Relayer { block, 0, ) { - Ok(accepted) => { - if accepted { + Ok(accept_response) => { + if BlockAcceptResponse::Accepted == accept_response { debug!( "Accepted block {}/{} from {}", &consensus_hash, &bhh, &neighbor_key @@ -1311,8 +1331,8 @@ impl Relayer { new_blocks.insert(consensus_hash.clone(), block.clone()); } else { debug!( - "Rejected block {}/{} from {}", - &consensus_hash, &bhh, &neighbor_key + "Rejected block {}/{} from {}: {:?}", + &consensus_hash, &bhh, &neighbor_key, &accept_response ); } } @@ -1670,20 +1690,30 @@ impl Relayer { coord_comms, NakamotoBlockObtainMethod::Pushed, ) { - Ok(accepted) => { - if accepted { + Ok(accept_response) => match accept_response { + BlockAcceptResponse::Accepted => { debug!( "Accepted Nakamoto block {} ({}) from {}", &block_id, &nakamoto_block.header.consensus_hash, neighbor_key ); accepted_blocks.push(nakamoto_block); - } else { - warn!( - "Rejected Nakamoto block {} ({}) from {}", + } + BlockAcceptResponse::AlreadyStored => { + debug!( + "Rejected Nakamoto block {} ({}) from {}: already stored", &block_id, &nakamoto_block.header.consensus_hash, &neighbor_key, ); } - } + BlockAcceptResponse::Rejected(msg) => { + warn!( + "Rejected Nakamoto block {} ({}) from {}: {:?}", + &block_id, + &nakamoto_block.header.consensus_hash, + &neighbor_key, + &msg + ); + } + }, Err(chainstate_error::InvalidStacksBlock(msg)) => { warn!("Invalid pushed Nakamoto block {}: {}", &block_id, msg); bad_neighbors.push((*neighbor_key).clone()); diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 78b1ff096b..3c1fec15c8 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -559,7 +559,8 @@ impl ConversationHttp { "processing_time_ms" => start_time.elapsed().as_millis(), "latency_ms" => latency, "conn_id" => self.conn_id, - "peer_addr" => &self.peer_addr); + "peer_addr" => &self.peer_addr, + "p2p_msg" => ?msg_opt); if let Some(msg) = msg_opt { ret.push(msg); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 4d7cdac375..d8ee197f42 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -62,7 +62,7 @@ use crate::chainstate::stacks::{ }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::{StacksEpoch, StacksEpochExtension}; -use crate::net::relay::Relayer; +use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::util_lib::boot::boot_code_id; @@ -256,7 +256,7 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted { + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!("Accepted Nakamoto block {block_id} to other peer {}", i); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { @@ -293,7 +293,7 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if accepted { + if matches!(BlockAcceptResponse::Accepted, accepted) { test_debug!( "Accepted malleablized Nakamoto block {block_id} to other peer {}", i diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index b234460ddc..23d1dd60a8 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -3315,7 +3315,11 @@ fn test_block_pay_to_contract_gated_at_v210() { 123, ) { Ok(x) => { - assert!(x, "Failed to process valid pay-to-contract block"); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Failed to process valid pay-to-contract block" + ); } Err(e) => { panic!("Got unexpected error {:?}", &e); @@ -3491,7 +3495,11 @@ fn test_block_versioned_smart_contract_gated_at_v210() { 123, ) { Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Failed to process valid versioned smart contract block" + ); } Err(e) => { panic!("Got unexpected error {:?}", &e); @@ -3649,7 +3657,11 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { 123, ) { Ok(x) => { - assert!(x, "Did not accept valid block"); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Did not accept valid block" + ); } Err(e) => { panic!("Got unexpected error {:?}", &e); @@ -3702,7 +3714,11 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { 123, ) { Ok(x) => { - assert!(x, "Failed to process valid versioned smart contract block"); + assert_eq!( + x, + BlockAcceptResponse::Accepted, + "Failed to process valid versioned smart contract block" + ); } Err(e) => { panic!("Got unexpected error {:?}", &e); From ca885d07e101fd7bbe8db0c4b6ca62cc23dc1033 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 16:22:02 -0400 Subject: [PATCH 1048/1400] chore: address review comments --- .../src/burnchains/bitcoin_regtest_controller.rs | 4 ++-- testnet/stacks-node/src/burnchains/mod.rs | 10 ++++++---- testnet/stacks-node/src/nakamoto_node.rs | 3 ++- testnet/stacks-node/src/nakamoto_node/relayer.rs | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 1990487253..631651c907 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1721,7 +1721,7 @@ impl BitcoinRegtestController { &addr2str(&addr), epoch_id ); - return Err(BurnchainControllerError::NoUtxos); + return Err(BurnchainControllerError::NoUTXOs); } }; utxos @@ -1941,7 +1941,7 @@ impl BitcoinRegtestController { }) .map_err(|e| { error!("Bitcoin RPC error: transaction submission failed - {:?}", e); - BurnchainControllerError::TransactionSubmissionFailed + BurnchainControllerError::TransactionSubmissionFailed(format!("{:?}", e)) }) } diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index d6706a0e1c..7cfde884ec 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -22,8 +22,8 @@ pub enum Error { BurnchainError, MaxFeeRateExceeded, IdenticalOperation, - NoUtxos, - TransactionSubmissionFailed, + NoUTXOs, + TransactionSubmissionFailed(String), SerializerError, } @@ -35,8 +35,10 @@ impl fmt::Display for Error { Error::BurnchainError => write!(f, "Burnchain error"), Error::MaxFeeRateExceeded => write!(f, "Max fee rate exceeded"), Error::IdenticalOperation => write!(f, "Identical operation, not submitting"), - Error::NoUtxos => write!(f, "No UTXOs available"), - Error::TransactionSubmissionFailed => write!(f, "Transaction submission failed"), + Error::NoUTXOs => write!(f, "No UTXOs available"), + Error::TransactionSubmissionFailed(e) => { + write!(f, "Transaction submission failed: {e}") + } Error::SerializerError => write!(f, "Serializer error"), } } diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 6e57b8023e..7cda49e10d 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -33,6 +33,7 @@ use stacks_common::types::StacksEpochId; use super::{Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::addr2str; +use crate::burnchains::Error as BurnchainsError; use crate::neon_node::{LeaderKeyRegistrationState, StacksNode as NeonNode}; use crate::run_loop::boot_nakamoto::Neon2NakaData; use crate::run_loop::nakamoto::{Globals, RunLoop}; @@ -93,7 +94,7 @@ pub enum Error { /// Something unexpected happened (e.g., hash mismatches) UnexpectedChainState, /// A burnchain operation failed when submitting it to the burnchain - BurnchainSubmissionFailed, + BurnchainSubmissionFailed(BurnchainsError), /// A new parent has been discovered since mining started NewParentDiscovered, /// A failure occurred while constructing a VRF Proof diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 9ae219659c..5b04eb6ff6 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1050,7 +1050,7 @@ impl RelayerThread { ) .map_err(|e| { warn!("Failed to submit block-commit bitcoin transaction: {}", e); - NakamotoNodeError::BurnchainSubmissionFailed + NakamotoNodeError::BurnchainSubmissionFailed(e) })?; info!( From d85c52e040ae704c41f948812d06d38d9a64be63 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 16:31:37 -0400 Subject: [PATCH 1049/1400] chore: propagate codec error --- .../src/burnchains/bitcoin_regtest_controller.rs | 8 ++++---- testnet/stacks-node/src/burnchains/mod.rs | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 631651c907..c4718cc68a 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -998,7 +998,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|_| BurnchainControllerError::SerializerError)?; + .map_err(|e| BurnchainControllerError::SerializerError(e))?; bytes }; @@ -1084,7 +1084,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|_| BurnchainControllerError::SerializerError)?; + .map_err(|e| BurnchainControllerError::SerializerError(e))?; bytes }; @@ -1165,7 +1165,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|_| BurnchainControllerError::SerializerError)?; + .map_err(|e| BurnchainControllerError::SerializerError(e))?; bytes }; @@ -1332,7 +1332,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|_| BurnchainControllerError::SerializerError)?; + .map_err(|e| BurnchainControllerError::SerializerError(e))?; bytes }; diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index 7cfde884ec..0c9446304d 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -10,6 +10,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::BlockstackOperationType; use stacks::chainstate::burn::BlockSnapshot; use stacks::core::{StacksEpoch, StacksEpochId}; +use stacks_common::codec::Error as CodecError; pub use self::bitcoin_regtest_controller::{make_bitcoin_indexer, BitcoinRegtestController}; pub use self::mocknet_controller::MocknetController; @@ -24,7 +25,7 @@ pub enum Error { IdenticalOperation, NoUTXOs, TransactionSubmissionFailed(String), - SerializerError, + SerializerError(CodecError), } impl fmt::Display for Error { @@ -39,7 +40,7 @@ impl fmt::Display for Error { Error::TransactionSubmissionFailed(e) => { write!(f, "Transaction submission failed: {e}") } - Error::SerializerError => write!(f, "Serializer error"), + Error::SerializerError(e) => write!(f, "Serializer error: {e}"), } } } From 1c13216f40e101695442d889d53b65984c6d8e9c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 11 Sep 2024 16:33:03 -0400 Subject: [PATCH 1050/1400] chore: formatting --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 5b04eb6ff6..707237f7c6 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1049,7 +1049,7 @@ impl RelayerThread { 1, ) .map_err(|e| { - warn!("Failed to submit block-commit bitcoin transaction: {}", e); + warn!("Failed to submit block-commit bitcoin transaction: {e}"); NakamotoNodeError::BurnchainSubmissionFailed(e) })?; From eb52ad90d9603c7d6556a45856543807213baeab Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 12 Sep 2024 04:43:58 +0300 Subject: [PATCH 1051/1400] fix flash block integration test --- .../src/tests/nakamoto_integrations.rs | 80 ++++++++++--------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17f068f474..6aca29b315 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -100,8 +100,8 @@ use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, - get_pox_info, next_block_and_wait, next_block_and_wait_with_timeout, - run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, + get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, + wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, @@ -1749,13 +1749,43 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { &mut btc_regtest_controller, ); - // Mine 3 Bitcoin blocks quickly without waiting for Stacks blocks to be processed - for _ in 0..3 { + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let block_height_before_mining = tip.block_height; + + // Mine 3 Bitcoin blocks rapidly without waiting for Stacks blocks to be processed. + // These blocks won't be considered "mined" until the next_block_and_wait call. + for _i in 0..3 { btc_regtest_controller.build_next_block(1); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Verify that the canonical burn chain tip hasn't advanced yet + assert_eq!( + tip.block_height, + btc_regtest_controller.get_headers_height() - 1 + ); + assert_eq!(tip.block_height, block_height_before_mining); } info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + // Mine a new block and wait for it to be processed. + // This should update the canonical burn chain tip to include all 4 new blocks. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // Verify that the burn chain tip has advanced by 4 blocks + assert_eq!( + tip.block_height, + block_height_before_mining + 4, + "Burn chain tip should have advanced by 4 blocks" + ); + + assert_eq!( + tip.block_height, + btc_regtest_controller.get_headers_height() - 1 + ); + let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let (mut chainstate, _) = StacksChainState::open( @@ -1772,21 +1802,6 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { .unwrap() .stacks_block_height; - // query for prometheus metrics - #[cfg(feature = "monitoring_prom")] - { - let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); - assert!(res.contains(&expected_result)); - } - info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); @@ -1883,24 +1898,17 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); - // Check that we aren't missing burn blocks + // Check that we have the expected burn blocks + // We expect to have blocks 220-230 and 234 onwards, with a gap for the flash blocks let bhh = u64::from(tip.burn_header_height); - test_observer::contains_burn_block_range(220..=bhh).unwrap(); - - // make sure prometheus returns an updated height - #[cfg(feature = "monitoring_prom")] - { - let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); - assert!(res.contains(&expected_result)); - } + test_observer::contains_burn_block_range(220..=230).unwrap(); + test_observer::contains_burn_block_range(234..=bhh).unwrap(); + // Verify that we're missing the expected flash blocks + assert!( + test_observer::contains_burn_block_range(231..=233).is_err(), + "Expected to be missing burn blocks 231-233 due to flash blocks" + ); + info!("Verified burn block ranges, including expected gap for flash blocks"); coord_channel .lock() From 4d3a200cb128a580426438b7dca9a930033dd5a4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 12 Sep 2024 11:48:51 -0500 Subject: [PATCH 1052/1400] test: assert block times increasing Co-authored-by: Brice Dobry --- .../src/tests/nakamoto_integrations.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5bd9ba87e7..4f09a7a102 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2271,16 +2271,18 @@ fn correct_burn_outs() { "Blocks should be sorted by cycle number already" ); - let block_times: Vec = new_blocks_with_reward_set - .iter() - .filter_map(|block| block.get("block_time").and_then(|cn| cn.as_u64())) - .collect(); - // Assert that block_times are all greater than 0 - assert!(block_times.iter().all(|&t| t > 0)); - + let mut last_block_time = None; for block in new_blocks_with_reward_set.iter() { let cycle_number = block["cycle_number"].as_u64().unwrap(); let reward_set = block["reward_set"].as_object().unwrap(); + if let Some(block_time) = block["block_time"].as_u64() { + if let Some(last) = last_block_time { + assert!(block_time > last, "Block times should be increasing"); + } + last_block_time = Some(block_time); + } + let cycle_number = block["cycle_number"].as_u64().unwrap(); + let reward_set = block["reward_set"].as_object().unwrap(); if cycle_number < first_epoch_3_cycle { assert!( From df6eae3aef916b9a9a748726bf90b6bb42ef9765 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 12 Sep 2024 13:15:36 -0400 Subject: [PATCH 1053/1400] chore: add signer logs --- stacks-signer/src/chainstate.rs | 8 ++++++++ stacks-signer/src/v0/signer.rs | 1 + 2 files changed, 9 insertions(+) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index c35ceb67e0..49cf237a03 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -193,10 +193,18 @@ impl SortitionsView { .cur_sortition .is_timed_out(self.config.block_proposal_timeout, signer_db)? { + info!( + "Current miner timed out, marking as invalid."; + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + ); self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; } if let Some(last_sortition) = self.last_sortition.as_mut() { if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { + info!( + "Last miner timed out, marking as invalid."; + "last_sortition_consensus_hash" => ?last_sortition.consensus_hash, + ); last_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 53a288b7f5..82e6d78ea5 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -353,6 +353,7 @@ impl Signer { "{self}: received a block proposal for a new block. Submit block for validation. "; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), + "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); let mut block_info = BlockInfo::from(block_proposal.clone()); From 11c312eda80eac7023a0919a88487ab906ad84c0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 12 Sep 2024 13:15:48 -0400 Subject: [PATCH 1054/1400] chore: change default `nakamoto_attempt_time_ms` to 5s --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d1b115d9cf..5ab9396ff4 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2370,7 +2370,7 @@ impl Default for MinerConfig { first_attempt_time_ms: 10, subsequent_attempt_time_ms: 120_000, microblock_attempt_time_ms: 30_000, - nakamoto_attempt_time_ms: 20_000, + nakamoto_attempt_time_ms: 5_000, probability_pick_no_estimate_tx: 25, block_reward_recipient: None, segwit: false, From bfdb6e42f9343f489680b158059ed9ecd1d2452e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 12 Sep 2024 12:53:32 -0500 Subject: [PATCH 1055/1400] fix: matches macro usage in block acceptance tests --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/chainstate/nakamoto/tests/node.rs | 6 +++--- stackslib/src/net/api/postblock_v3.rs | 4 ++-- stackslib/src/net/relay.rs | 7 +++++++ stackslib/src/net/tests/mod.rs | 4 ++-- 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index ddeea51573..469c72e777 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -341,7 +341,7 @@ fn replay_reward_cycle( .unwrap_or(BlockAcceptResponse::Rejected( "encountered error on acceptance".into(), )); - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {block_id}"); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 4377f74876..aa00430f89 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -824,7 +824,7 @@ impl TestStacksNode { } else { BlockAcceptResponse::Rejected("try_to_process is false".into()) }; - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {}", &block_to_store.block_id()); coord.handle_new_nakamoto_stacks_block().unwrap(); processed_blocks.push(block_to_store.clone()); @@ -1247,7 +1247,7 @@ impl<'a> TestPeer<'a> { None, NakamotoBlockObtainMethod::Pushed, )?; - if !matches!(BlockAcceptResponse::Accepted, accepted) { + if !accepted.is_accepted() { return Ok(false); } let sort_tip = SortitionDB::get_canonical_sortition_tip(self.sortdb().conn()).unwrap(); @@ -1491,7 +1491,7 @@ impl<'a> TestPeer<'a> { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {}", &block_id); self.coord.handle_new_nakamoto_stacks_block().unwrap(); diff --git a/stackslib/src/net/api/postblock_v3.rs b/stackslib/src/net/api/postblock_v3.rs index 602e307fd4..9bd174d322 100644 --- a/stackslib/src/net/api/postblock_v3.rs +++ b/stackslib/src/net/api/postblock_v3.rs @@ -29,7 +29,7 @@ use crate::net::http::{ use crate::net::httpcore::{ HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, }; -use crate::net::relay::{BlockAcceptResponse, Relayer}; +use crate::net::relay::Relayer; use crate::net::{Error as NetError, NakamotoBlocksData, StacksMessageType, StacksNodeState}; pub static PATH: &'static str = "/v3/blocks/upload/"; @@ -187,7 +187,7 @@ impl RPCRequestHandler for RPCPostBlockRequestHandler { &accepted ); StacksBlockAcceptedData { - accepted: matches!(accepted, BlockAcceptResponse::Accepted), + accepted: accepted.is_accepted(), stacks_block_id: block.block_id(), } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 123f78f422..750651ec73 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -528,6 +528,13 @@ pub enum BlockAcceptResponse { Rejected(String), } +impl BlockAcceptResponse { + /// Does this response indicate that the block was accepted to the staging DB + pub fn is_accepted(&self) -> bool { + matches!(self, Self::Accepted) + } +} + impl Relayer { pub fn new( handle: NetworkHandle, diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index d8ee197f42..09d150d893 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -256,7 +256,7 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!("Accepted Nakamoto block {block_id} to other peer {}", i); peer.coord.handle_new_nakamoto_stacks_block().unwrap(); } else { @@ -293,7 +293,7 @@ impl NakamotoBootPlan { NakamotoBlockObtainMethod::Pushed, ) .unwrap(); - if matches!(BlockAcceptResponse::Accepted, accepted) { + if accepted.is_accepted() { test_debug!( "Accepted malleablized Nakamoto block {block_id} to other peer {}", i From 16f127cb80a19d41cad75102e210f04b52b8e9a2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 12 Sep 2024 14:21:12 -0400 Subject: [PATCH 1056/1400] chore: change default `burnchain.timeout` to 60s This matches the hard-coded timeout what was previously in the code. --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 5ab9396ff4..611bd7cdaf 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1457,7 +1457,7 @@ impl BurnchainConfig { rpc_ssl: false, username: None, password: None, - timeout: 300, + timeout: 60, magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), local_mining_public_key: None, process_exit_at_block_height: None, From f065a189fce5b4320dc5292db5512b1769fc6d0f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Sep 2024 11:27:33 -0700 Subject: [PATCH 1057/1400] Add state machine fixes to enable moving from the same state into the same state and add tests Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 8 +-- stacks-signer/src/runloop.rs | 8 +-- stacks-signer/src/signerdb.rs | 61 +++++++++++++++++++---- stacks-signer/src/v0/signer.rs | 32 +++++------- 4 files changed, 70 insertions(+), 39 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 85fa7fd34b..5afec3f76e 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -564,13 +564,13 @@ impl StacksClient { warn!("Failed to parse the GetStackers error response: {e}"); backoff::Error::permanent(e.into()) })?; - if &error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - return Err(backoff::Error::transient(ClientError::NoSortitionOnChain)); + if error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { + Err(backoff::Error::transient(ClientError::NoSortitionOnChain)) } else { warn!("Got error response ({status}): {}", error_data.err_msg); - return Err(backoff::Error::permanent(ClientError::RequestFailure( + Err(backoff::Error::permanent(ClientError::RequestFailure( status, - ))); + ))) } }; let stackers_response = diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index cb29221ba9..0ce706b3c4 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -430,10 +430,10 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo if !Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle) { self.refresh_signer_config(current_reward_cycle); } - if is_in_next_prepare_phase { - if !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) { - self.refresh_signer_config(next_reward_cycle); - } + if is_in_next_prepare_phase + && !Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle) + { + self.refresh_signer_config(next_reward_cycle); } self.cleanup_stale_signers(current_reward_cycle); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 6f5b6c6e06..5e47f09193 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -267,10 +267,11 @@ impl BlockInfo { /// Check if the block state transition is valid fn check_state(&self, state: BlockState) -> bool { let prev_state = &self.state; + if *prev_state == state { + return true; + } match state { - BlockState::Unprocessed => { - matches!(prev_state, BlockState::Unprocessed) - } + BlockState::Unprocessed => false, BlockState::LocallyAccepted => { matches!(prev_state, BlockState::Unprocessed) } @@ -687,7 +688,7 @@ impl SignerDb { .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); - let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, &hash)?; + let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, @@ -736,7 +737,7 @@ impl SignerDb { let qry = "INSERT OR REPLACE INTO block_signatures (signer_signature_hash, signature) VALUES (?1, ?2);"; let args = params![ block_sighash, - serde_json::to_string(signature).map_err(|e| DBError::SerializationError(e))? + serde_json::to_string(signature).map_err(DBError::SerializationError)? ]; debug!("Inserting block signature."; @@ -825,7 +826,7 @@ impl SignerDb { if broadcasted == 0 { return Ok(None); } - Ok(u64::try_from(broadcasted).ok()) + Ok(Some(broadcasted)) } /// Get the current state of a given block in the database @@ -1152,15 +1153,12 @@ mod tests { assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![]); db.add_block_signature(&block_id, &sig1).unwrap(); - assert_eq!( - db.get_block_signatures(&block_id).unwrap(), - vec![sig1.clone()] - ); + assert_eq!(db.get_block_signatures(&block_id).unwrap(), vec![sig1]); db.add_block_signature(&block_id, &sig2).unwrap(); assert_eq!( db.get_block_signatures(&block_id).unwrap(), - vec![sig1.clone(), sig2.clone()] + vec![sig1, sig2] ); } @@ -1223,4 +1221,45 @@ mod tests { 12345 ); } + #[test] + fn state_machine() { + let (mut block, _) = create_block(); + assert_eq!(block.state, BlockState::Unprocessed); + assert!(block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::LocallyAccepted).unwrap(); + assert_eq!(block.state, BlockState::LocallyAccepted); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(block.check_state(BlockState::LocallyAccepted)); + assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::GloballyAccepted).unwrap(); + assert_eq!(block.state, BlockState::GloballyAccepted); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(!block.check_state(BlockState::LocallyAccepted)); + assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(!block.check_state(BlockState::GloballyRejected)); + + // Must manually override as will not be able to move from GloballyAccepted to LocallyAccepted + block.state = BlockState::LocallyRejected; + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(!block.check_state(BlockState::LocallyAccepted)); + assert!(block.check_state(BlockState::LocallyRejected)); + assert!(block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + + block.move_to(BlockState::GloballyRejected).unwrap(); + assert!(!block.check_state(BlockState::Unprocessed)); + assert!(!block.check_state(BlockState::LocallyAccepted)); + assert!(!block.check_state(BlockState::LocallyRejected)); + assert!(!block.check_state(BlockState::GloballyAccepted)); + assert!(block.check_state(BlockState::GloballyRejected)); + } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 639ace66d2..d47f84d9fe 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -289,7 +289,7 @@ impl From for Signer { .signer_entries .signer_ids .iter() - .map(|(addr, id)| (*id, addr.clone())) + .map(|(addr, id)| (*id, *addr)) .collect(); let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); @@ -307,7 +307,7 @@ impl From for Signer { signer_id, addr ); }; - (addr.clone(), key_ids.len()) + (*addr, key_ids.len()) }) .collect(); @@ -671,7 +671,7 @@ impl Signer { addrs: impl Iterator, ) -> u32 { let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { - let stacker_weight = self.signer_weights.get(&stacker_address).unwrap_or(&0); + let stacker_weight = self.signer_weights.get(stacker_address).unwrap_or(&0); signing_weight.saturating_add(*stacker_weight) }); u32::try_from(signing_weight) @@ -727,14 +727,10 @@ impl Signer { let signer_address = StacksAddress::p2pkh(self.mainnet, &public_key); // authenticate the signature -- it must be signed by one of the stacking set - let is_valid_sig = self - .signer_addresses - .iter() - .find(|addr| { - // it only matters that the address hash bytes match - signer_address.bytes == addr.bytes - }) - .is_some(); + let is_valid_sig = self.signer_addresses.iter().any(|addr| { + // it only matters that the address hash bytes match + signer_address.bytes == addr.bytes + }); if !is_valid_sig { debug!("{self}: Receive block rejection with an invalid signature. Will not store."; @@ -822,16 +818,12 @@ impl Signer { }; // authenticate the signature -- it must be signed by one of the stacking set - let is_valid_sig = self - .signer_addresses - .iter() - .find(|addr| { - let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); + let is_valid_sig = self.signer_addresses.iter().any(|addr| { + let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); - // it only matters that the address hash bytes match - stacker_address.bytes == addr.bytes - }) - .is_some(); + // it only matters that the address hash bytes match + stacker_address.bytes == addr.bytes + }); if !is_valid_sig { debug!("{self}: Receive invalid signature {signature}. Will not store."); From 9883957ed105887efb228e9121dfe55241455e9f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 12 Sep 2024 11:30:14 -0700 Subject: [PATCH 1058/1400] Do not modify self if fail to move to the appropriate state Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5e47f09193..b9604726ce 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -228,6 +228,7 @@ impl BlockInfo { /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { + self.move_to(BlockState::LocallyAccepted)?; self.valid = Some(true); self.signed_over = true; if group_signed { @@ -235,28 +236,31 @@ impl BlockInfo { } else { self.signed_self.get_or_insert(get_epoch_time_secs()); } - self.move_to(BlockState::LocallyAccepted) + Ok(()) } /// Mark this block as valid, signed over, and records a group timestamp in the block info if it wasn't /// already set. pub fn mark_globally_accepted(&mut self) -> Result<(), String> { + self.move_to(BlockState::GloballyAccepted)?; self.valid = Some(true); self.signed_over = true; self.signed_group.get_or_insert(get_epoch_time_secs()); - self.move_to(BlockState::GloballyAccepted) + Ok(()) } /// Mark the block as locally rejected and invalid pub fn mark_locally_rejected(&mut self) -> Result<(), String> { + self.move_to(BlockState::LocallyRejected); self.valid = Some(false); - self.move_to(BlockState::LocallyRejected) + Ok(()) } /// Mark the block as globally rejected and invalid pub fn mark_globally_rejected(&mut self) -> Result<(), String> { + self.move_to(BlockState::GloballyRejected); self.valid = Some(false); - self.move_to(BlockState::GloballyRejected) + Ok(()) } /// Return the block's signer signature hash From ba1ce0c7bcdc52836f54d960f8e9d777a4d6be01 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 12 Sep 2024 15:09:27 -0500 Subject: [PATCH 1059/1400] fix merge artifact --- stackslib/src/net/relay.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index e1498327c6..aebb2b7609 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -934,7 +934,7 @@ impl Relayer { ); if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { - return Ok(false); + return Ok(BlockAcceptResponse::Rejected("Fault injection: ignoring block".into())) } // do we have this block? don't lock the DB needlessly if so. From 80c069f3eab8858be0be8ff6c0436509db11ee5e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 12 Sep 2024 15:18:39 -0500 Subject: [PATCH 1060/1400] cargo fmt --- stackslib/src/net/relay.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index aebb2b7609..21699ad14c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -934,7 +934,9 @@ impl Relayer { ); if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { - return Ok(BlockAcceptResponse::Rejected("Fault injection: ignoring block".into())) + return Ok(BlockAcceptResponse::Rejected( + "Fault injection: ignoring block".into(), + )); } // do we have this block? don't lock the DB needlessly if so. From d0bdfe3cb2c5bf90b043103d105fd54eba846986 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 12 Sep 2024 16:21:01 -0400 Subject: [PATCH 1061/1400] chore: change default `timeout` to 60s This matches the actual timeout that was hard-coded before this change. --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 73ebf23176..f2f366dedd 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1458,7 +1458,7 @@ impl BurnchainConfig { rpc_ssl: false, username: None, password: None, - timeout: 5, + timeout: 60, magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), local_mining_public_key: None, process_exit_at_block_height: None, From b8f8b141255618b6755ca8f303726e1c52919a74 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 6 Sep 2024 11:17:10 -0400 Subject: [PATCH 1062/1400] chore: Add `signer_stats` table to chainstate DB --- stackslib/src/chainstate/nakamoto/mod.rs | 20 +++++ stackslib/src/chainstate/stacks/db/mod.rs | 95 +++++++++++++---------- 2 files changed, 72 insertions(+), 43 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 4ffa76c1a7..e53c81f5dd 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -268,6 +268,26 @@ lazy_static! { ADD COLUMN height_in_tenure; "#.into(), ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_4: [&'static str; 2] = [ + r#" + UPDATE db_config SET version = "7"; + "#, + // Add a `signer_stats` table to keep track of how many blocks have been signed by each signer + r#" + -- Table for signer stats + CREATE TABLE signer_stats ( + -- Signers public key + public_key TEXT NOT NULL, + -- Stacking rewards cycle ID + reward_cycle INTEGER NOT NULL, + -- Number of blocks signed during reward cycle + blocks_signed INTEGER DEFAULT 0 NOT NULL, + + PRIMARY KEY(public_key,reward_cycle) + ); + "#, + ]; } #[cfg(test)] diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index e8b5c7bb41..198de0354e 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -55,7 +55,7 @@ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, - NAKAMOTO_CHAINSTATE_SCHEMA_3, + NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -679,7 +679,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "6"; +pub const CHAINSTATE_VERSION: &'static str = "7"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1062,55 +1062,64 @@ impl StacksChainState { return Err(Error::InvalidChainstateDB); } - if db_config.version != CHAINSTATE_VERSION { - while db_config.version != CHAINSTATE_VERSION { - match db_config.version.as_str() { - "1" => { - // migrate to 2 - info!("Migrating chainstate schema from version 1 to 2"); - for cmd in CHAINSTATE_SCHEMA_2.iter() { - tx.execute_batch(cmd)?; - } + while db_config.version != CHAINSTATE_VERSION { + match db_config.version.as_str() { + "1" => { + // migrate to 2 + info!("Migrating chainstate schema from version 1 to 2"); + for cmd in CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; } - "2" => { - // migrate to 3 - info!("Migrating chainstate schema from version 2 to 3"); - for cmd in CHAINSTATE_SCHEMA_3.iter() { - tx.execute_batch(cmd)?; - } + } + "2" => { + // migrate to 3 + info!("Migrating chainstate schema from version 2 to 3"); + for cmd in CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; } - "3" => { - // migrate to nakamoto 1 - info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { - tx.execute_batch(cmd)?; - } + } + "3" => { + // migrate to nakamoto 1 + info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { + tx.execute_batch(cmd)?; } - "4" => { - // migrate to nakamoto 2 - info!("Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { - tx.execute_batch(cmd)?; - } + } + "4" => { + // migrate to nakamoto 2 + info!( + "Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_2.iter() { + tx.execute_batch(cmd)?; } - "5" => { - // migrate to nakamoto 3 - info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); - for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { - tx.execute_batch(cmd)?; - } + } + "5" => { + // migrate to nakamoto 3 + info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { + tx.execute_batch(cmd)?; } - _ => { - error!( - "Invalid chain state database: expected version = {}, got {}", - CHAINSTATE_VERSION, db_config.version - ); - return Err(Error::InvalidChainstateDB); + } + "6" => { + // migrate to nakamoto 3 + info!( + "Migrating chainstate schema from version 6 to 7: adds signer_stats table" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_4.iter() { + tx.execute_batch(cmd)?; } } - db_config = - StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); + _ => { + error!( + "Invalid chain state database: expected version = {}, got {}", + CHAINSTATE_VERSION, db_config.version + ); + return Err(Error::InvalidChainstateDB); + } } + db_config = + StacksChainState::load_db_config(tx).expect("CORRUPTION: no db_config found"); } Ok(()) } From c454b35cabda0a6c1068e81e2e6fdcd8d3b830b0 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 6 Sep 2024 15:20:50 -0400 Subject: [PATCH 1063/1400] chore: Record block signers in `append_block()` --- stackslib/src/chainstate/nakamoto/mod.rs | 55 ++++++++++++++++-------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e53c81f5dd..2450bac3c6 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -14,13 +14,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::HashMap; use std::fs; use std::ops::{Deref, DerefMut, Range}; use std::path::PathBuf; -use clarity::types::PublicKey; -use clarity::util::secp256k1::{secp256k1_recover, Secp256k1PublicKey}; +use clarity::util::secp256k1::Secp256k1PublicKey; use clarity::vm::ast::ASTRules; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::database::{BurnStateDB, ClarityDatabase}; @@ -282,7 +281,7 @@ lazy_static! { -- Stacking rewards cycle ID reward_cycle INTEGER NOT NULL, -- Number of blocks signed during reward cycle - blocks_signed INTEGER DEFAULT 0 NOT NULL, + blocks_signed INTEGER DEFAULT 1 NOT NULL, PRIMARY KEY(public_key,reward_cycle) ); @@ -3276,6 +3275,24 @@ impl NakamotoChainState { .map_err(ChainstateError::from) } + /// Keep track of how many blocks each signer is signing + fn record_block_signers( + tx: &mut ChainstateTx, + block: &NakamotoBlock, + reward_cycle: u64, + ) -> Result<(), ChainstateError> { + let signer_sighash = block.header.signer_signature_hash(); + for signer_signature in &block.header.signer_signature { + let signer_pubkey = + StacksPublicKey::recover_to_pubkey(signer_sighash.bits(), &signer_signature) + .map_err(|e| ChainstateError::InvalidStacksBlock(e.to_string()))?; + let sql = "INSERT INTO signer_stats(public_key,reward_cycle) VALUES(?1,?2) ON CONFLICT(public_key,reward_cycle) DO UPDATE SET blocks_signed=blocks_signed+1"; + let args = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + tx.execute(sql, args)?; + } + Ok(()) + } + /// Begin block-processing and return all of the pre-processed state within a /// `SetupBlockResult`. /// @@ -4059,6 +4076,11 @@ impl NakamotoChainState { let new_block_id = new_tip.index_block_hash(); chainstate_tx.log_transactions_processed(&new_block_id, &tx_receipts); + let reward_cycle = pox_constants.block_height_to_reward_cycle( + first_block_height.into(), + chain_tip_burn_header_height.into(), + ); + // store the reward set calculated during this block if it happened // NOTE: miner and proposal evaluation should not invoke this because // it depends on knowing the StacksBlockId. @@ -4067,19 +4089,12 @@ impl NakamotoChainState { if let Some(signer_calculation) = signer_set_calc { Self::write_reward_set(chainstate_tx, &new_block_id, &signer_calculation.reward_set)?; - let cycle_number = if let Some(cycle) = pox_constants.reward_cycle_of_prepare_phase( - first_block_height.into(), - chain_tip_burn_header_height.into(), - ) { - Some(cycle) - } else { - pox_constants - .block_height_to_reward_cycle( - first_block_height.into(), - chain_tip_burn_header_height.into(), - ) - .map(|cycle| cycle + 1) - }; + let cycle_number = pox_constants + .reward_cycle_of_prepare_phase( + first_block_height.into(), + chain_tip_burn_header_height.into(), + ) + .or_else(|| reward_cycle.map(|cycle| cycle + 1)); if let Some(cycle) = cycle_number { reward_set_data = Some(RewardSetData::new( @@ -4089,6 +4104,12 @@ impl NakamotoChainState { } } + if let Some(reward_cycle) = reward_cycle { + Self::record_block_signers(chainstate_tx, block, reward_cycle)?; + } else { + warn!("No reward cycle found, skipping record_block_signers()"); + } + monitoring::set_last_block_transaction_count(u64::try_from(block.txs.len()).unwrap()); monitoring::set_last_execution_cost_observed(&block_execution_cost, &block_limit); From 32e74fb382c40b49fdec2b0b2477bdab41895599 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 6 Sep 2024 17:17:55 -0400 Subject: [PATCH 1064/1400] chore: Add `/v3/signer/` endpoint --- stackslib/src/net/api/getsigner.rs | 268 +++++++++++++++++++++++++++++ stackslib/src/net/api/mod.rs | 1 + 2 files changed, 269 insertions(+) create mode 100644 stackslib/src/net/api/getsigner.rs diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs new file mode 100644 index 0000000000..46a0e4229f --- /dev/null +++ b/stackslib/src/net/api/getsigner.rs @@ -0,0 +1,268 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use clarity::util::secp256k1::Secp256k1PublicKey; +use regex::{Captures, Regex}; +use serde_json::json; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::net::PeerHost; +use stacks_common::util::hash::Sha256Sum; + +use crate::burnchains::Burnchain; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::stacks::boot::{ + PoxVersions, RewardSet, POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME, +}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::core::mempool::MemPoolDB; +use crate::net::http::{ + parse_json, Error, HttpBadRequest, HttpNotFound, HttpRequest, HttpRequestContents, + HttpRequestPreamble, HttpResponse, HttpResponseContents, HttpResponsePayload, + HttpResponsePreamble, HttpServerError, +}; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, StacksHttpResponse, +}; +use crate::net::p2p::PeerNetwork; +use crate::net::{Error as NetError, StacksNodeState, TipRequest}; +use crate::util_lib::boot::boot_code_id; +use crate::util_lib::db::Error as DBError; + +#[derive(Clone, Default)] +pub struct GetSignerRequestHandler { + signer_pubkey: Option, + reward_cycle: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct GetSignerResponse { + pub blocks_signed: u64, +} + +pub enum GetSignerErrors { + NotAvailableYet(crate::chainstate::coordinator::Error), + Other(String), +} + +impl GetSignerErrors { + pub const NOT_AVAILABLE_ERR_TYPE: &'static str = "not_available_try_again"; + pub const OTHER_ERR_TYPE: &'static str = "other"; + + pub fn error_type_string(&self) -> &'static str { + match self { + Self::NotAvailableYet(_) => Self::NOT_AVAILABLE_ERR_TYPE, + Self::Other(_) => Self::OTHER_ERR_TYPE, + } + } +} + +impl From<&str> for GetSignerErrors { + fn from(value: &str) -> Self { + GetSignerErrors::Other(value.into()) + } +} + +impl std::fmt::Display for GetSignerErrors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GetSignerErrors::NotAvailableYet(e) => write!(f, "Could not read reward set. Prepare phase may not have started for this cycle yet. Err = {e:?}"), + GetSignerErrors::Other(msg) => write!(f, "{msg}") + } + } +} + +/// Decode the HTTP request +impl HttpRequest for GetSignerRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new( + r#"^/v3/stacker_set/(?P[0-9a-f]{66})/(?P[0-9]{1,10})$"#, + ) + .unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/signer/:signer_pubkey/:cycle_num" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".into(), + )); + } + + let Some(cycle_num_str) = captures.name("cycle_num") else { + return Err(Error::DecodeError( + "Missing in request path: `cycle_num`".into(), + )); + }; + let Some(signer_pubkey_str) = captures.name("signer_pubkey") else { + return Err(Error::DecodeError( + "Missing in request path: `signer_pubkey`".into(), + )); + }; + + let cycle_num = u64::from_str_radix(cycle_num_str.into(), 10) + .map_err(|e| Error::DecodeError(format!("Failed to parse cycle number: {e}")))?; + + let signer_pubkey = Secp256k1PublicKey::from_hex(signer_pubkey_str.into()) + .map_err(|e| Error::DecodeError(format!("Failed to signer public key: {e}")))?; + + self.signer_pubkey = Some(signer_pubkey); + self.reward_cycle = Some(cycle_num); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for GetSignerRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.signer_pubkey = None; + self.reward_cycle = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let signer_pubkey = self + .signer_pubkey + .take() + .ok_or(NetError::SendError("Missing `signer_pubkey`".into()))?; + + let reward_cycle = self + .reward_cycle + .take() + .ok_or(NetError::SendError("Missing `reward_cycle`".into()))?; + + let result = node.with_node_state(|_network, _sortdb, _chainstate, _mempool, _rpc_args| { + // TODO + if true { + Ok(0u64) + } else { + Err("Something went wrong") + } + }); + + let response = match result { + Ok(response) => response, + Err(error) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(error.to_string()), + ) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let mut preamble = HttpResponsePreamble::ok_json(&preamble); + preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); + let body = HttpResponseContents::try_from_json(&response)?; + Ok((preamble, body)) + } +} + +impl HttpResponse for GetSignerRequestHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let response: GetSignerResponse = parse_json(preamble, body)?; + Ok(HttpResponsePayload::try_from_json(response)?) + } +} + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_getsigner( + host: PeerHost, + signer_pubkey: &Secp256k1PublicKey, + cycle_num: u64, + tip_req: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/signer/{}/{cycle_num}", signer_pubkey.to_hex()), + HttpRequestContents::new().for_tip(tip_req), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} + +impl StacksHttpResponse { + pub fn decode_signer(self) -> Result { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let response: GetSignerResponse = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; + Ok(response) + } +} + +#[cfg(test)] +mod test { + use super::GetSignerErrors; + + #[test] + // Test the formatting and error type strings of GetSignerErrors + fn get_signer_errors() { + let not_available_err = GetSignerErrors::NotAvailableYet( + crate::chainstate::coordinator::Error::PoXNotProcessedYet, + ); + let other_err = GetSignerErrors::Other("foo".into()); + + assert_eq!( + not_available_err.error_type_string(), + GetSignerErrors::NOT_AVAILABLE_ERR_TYPE + ); + assert_eq!( + other_err.error_type_string(), + GetSignerErrors::OTHER_ERR_TYPE + ); + + assert!(not_available_err + .to_string() + .starts_with("Could not read reward set")); + assert_eq!(other_err.to_string(), "foo".to_string()); + } +} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 5bbc6281a2..6eff1a1c53 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -55,6 +55,7 @@ pub mod getmicroblocks_indexed; pub mod getmicroblocks_unconfirmed; pub mod getneighbors; pub mod getpoxinfo; +pub mod getsigner; pub mod getsortition; pub mod getstackerdbchunk; pub mod getstackerdbmetadata; From 66ee0e1c4cfbb9e9e8fbea08ac4b34fc8b2cb88e Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 9 Sep 2024 10:38:42 -0400 Subject: [PATCH 1065/1400] chore: Implement DB read for `/v3/signer/` endpoint --- stackslib/src/chainstate/nakamoto/mod.rs | 30 +++++++- stackslib/src/net/api/getsigner.rs | 89 +++--------------------- 2 files changed, 39 insertions(+), 80 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 2450bac3c6..aa3722505b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -30,6 +30,7 @@ use lazy_static::{__Deref, lazy_static}; use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension}; +use sha2::digest::typenum::Integer; use sha2::{Digest as Sha2Digest, Sha512_256}; use stacks_common::bitvec::BitVec; use stacks_common::codec::{ @@ -3287,12 +3288,37 @@ impl NakamotoChainState { StacksPublicKey::recover_to_pubkey(signer_sighash.bits(), &signer_signature) .map_err(|e| ChainstateError::InvalidStacksBlock(e.to_string()))?; let sql = "INSERT INTO signer_stats(public_key,reward_cycle) VALUES(?1,?2) ON CONFLICT(public_key,reward_cycle) DO UPDATE SET blocks_signed=blocks_signed+1"; - let args = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; - tx.execute(sql, args)?; + let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + tx.execute(sql, params)?; } Ok(()) } + /// Fetch number of blocks signed for a given signer and reward cycle + /// This is the data tracked by `record_block_signers()` + pub fn get_signer_block_count( + chainstate_db: &Connection, + signer_pubkey: &Secp256k1PublicKey, + reward_cycle: u64, + ) -> Result, ChainstateError> { + let sql = + "SELECT blocks_signed FROM signer_stats WHERE public_key = ?1 AND reward_cycle = ?2"; + let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + chainstate_db + .query_row(sql, params, |row| { + let value: String = row.get(2)?; + value.parse::().map_err(|e| { + rusqlite::Error::FromSqlConversionFailure( + size_of::(), + rusqlite::types::Type::Integer, + e.into(), + ) + }) + }) + .optional() + .map_err(ChainstateError::from) + } + /// Begin block-processing and return all of the pre-processed state within a /// `SetupBlockResult`. /// diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs index 46a0e4229f..7a9b418cc3 100644 --- a/stackslib/src/net/api/getsigner.rs +++ b/stackslib/src/net/api/getsigner.rs @@ -22,6 +22,7 @@ use stacks_common::util::hash::Sha256Sum; use crate::burnchains::Burnchain; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{ PoxVersions, RewardSet, POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME, }; @@ -53,38 +54,6 @@ pub struct GetSignerResponse { pub blocks_signed: u64, } -pub enum GetSignerErrors { - NotAvailableYet(crate::chainstate::coordinator::Error), - Other(String), -} - -impl GetSignerErrors { - pub const NOT_AVAILABLE_ERR_TYPE: &'static str = "not_available_try_again"; - pub const OTHER_ERR_TYPE: &'static str = "other"; - - pub fn error_type_string(&self) -> &'static str { - match self { - Self::NotAvailableYet(_) => Self::NOT_AVAILABLE_ERR_TYPE, - Self::Other(_) => Self::OTHER_ERR_TYPE, - } - } -} - -impl From<&str> for GetSignerErrors { - fn from(value: &str) -> Self { - GetSignerErrors::Other(value.into()) - } -} - -impl std::fmt::Display for GetSignerErrors { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - GetSignerErrors::NotAvailableYet(e) => write!(f, "Could not read reward set. Prepare phase may not have started for this cycle yet. Err = {e:?}"), - GetSignerErrors::Other(msg) => write!(f, "{msg}") - } - } -} - /// Decode the HTTP request impl HttpRequest for GetSignerRequestHandler { fn verb(&self) -> &'static str { @@ -128,12 +97,12 @@ impl HttpRequest for GetSignerRequestHandler { )); }; - let cycle_num = u64::from_str_radix(cycle_num_str.into(), 10) - .map_err(|e| Error::DecodeError(format!("Failed to parse cycle number: {e}")))?; - let signer_pubkey = Secp256k1PublicKey::from_hex(signer_pubkey_str.into()) .map_err(|e| Error::DecodeError(format!("Failed to signer public key: {e}")))?; + let cycle_num = u64::from_str_radix(cycle_num_str.into(), 10) + .map_err(|e| Error::DecodeError(format!("Failed to parse cycle number: {e}")))?; + self.signer_pubkey = Some(signer_pubkey); self.reward_cycle = Some(cycle_num); @@ -152,16 +121,9 @@ impl RPCRequestHandler for GetSignerRequestHandler { fn try_handle_request( &mut self, preamble: HttpRequestPreamble, - contents: HttpRequestContents, + _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let tip = match node.load_stacks_chain_tip(&preamble, &contents) { - Ok(tip) => tip, - Err(error_resp) => { - return error_resp.try_into_contents().map_err(NetError::from); - } - }; - let signer_pubkey = self .signer_pubkey .take() @@ -172,13 +134,12 @@ impl RPCRequestHandler for GetSignerRequestHandler { .take() .ok_or(NetError::SendError("Missing `reward_cycle`".into()))?; - let result = node.with_node_state(|_network, _sortdb, _chainstate, _mempool, _rpc_args| { - // TODO - if true { - Ok(0u64) - } else { - Err("Something went wrong") - } + let result = node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + NakamotoChainState::get_signer_block_count( + &chainstate.index_conn(), + &signer_pubkey, + reward_cycle, + ) }); let response = match result { @@ -238,31 +199,3 @@ impl StacksHttpResponse { Ok(response) } } - -#[cfg(test)] -mod test { - use super::GetSignerErrors; - - #[test] - // Test the formatting and error type strings of GetSignerErrors - fn get_signer_errors() { - let not_available_err = GetSignerErrors::NotAvailableYet( - crate::chainstate::coordinator::Error::PoXNotProcessedYet, - ); - let other_err = GetSignerErrors::Other("foo".into()); - - assert_eq!( - not_available_err.error_type_string(), - GetSignerErrors::NOT_AVAILABLE_ERR_TYPE - ); - assert_eq!( - other_err.error_type_string(), - GetSignerErrors::OTHER_ERR_TYPE - ); - - assert!(not_available_err - .to_string() - .starts_with("Could not read reward set")); - assert_eq!(other_err.to_string(), "foo".to_string()); - } -} From ec195f907992bc20c4a8da38580f7ff4bb405fea Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 9 Sep 2024 13:48:52 -0400 Subject: [PATCH 1066/1400] docs: Add `/v3/signer/` --- CHANGELOG.md | 1 + docs/rpc-endpoints.md | 5 ++++ docs/rpc/openapi.yaml | 30 ++++++++++++++++++++++++ stackslib/src/chainstate/nakamoto/mod.rs | 3 ++- 4 files changed, 38 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb6061cb9d..add9240af3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-stacks-block-info?` added - `get-tenure-info?` added - `get-block-info?` removed +- Added `/v3/signer/` endpoint ## [2.5.0.0.5] diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index eea916a781..9f0e09fd20 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -533,3 +533,8 @@ highest sortition), `reward_cycle` identifies the reward cycle number of this tenure, `tip_block_id` identifies the highest-known block in this tenure, and `tip_height` identifies that block's height. +### GET /v3/signer/[Signer Pubkey]/[Reward Cycle] + +Get number of blocks signed by signer during a given reward cycle + +Returns a non-negative integer diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 6fc4985967..9492fd785d 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -675,3 +675,33 @@ paths: schema: type: string + /v3/signer/{signer}/{cycle_number}: + get: + summary: Get number of blocks signed by signer during a given reward cycle + tags: + - Blocks + - Signers + operationId: get_signer + description: Get number of blocks signed by signer during a given reward cycle + parameters: + - name: signer + in: path + required: true + description: Hex-encoded compressed Secp256k1 public key of signer + schema: + type: string + parameters: + - name: cycle_number + in: path + required: true + description: Reward cycle number + schema: + type: integer + responses: + 200: + description: Number of blocks signed + content: + text/plain: + schema: + type: integer + example: 7 \ No newline at end of file diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index aa3722505b..ff38a47691 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3300,7 +3300,7 @@ impl NakamotoChainState { chainstate_db: &Connection, signer_pubkey: &Secp256k1PublicKey, reward_cycle: u64, - ) -> Result, ChainstateError> { + ) -> Result { let sql = "SELECT blocks_signed FROM signer_stats WHERE public_key = ?1 AND reward_cycle = ?2"; let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; @@ -3316,6 +3316,7 @@ impl NakamotoChainState { }) }) .optional() + .map(Option::unwrap_or_default) // It's fine to map `NONE` to `0`, because it's impossible to have `Some(0)` .map_err(ChainstateError::from) } From 99f40cf944c844e4bb83e7187c7d5ecde3fb7829 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 12 Sep 2024 14:57:47 -0400 Subject: [PATCH 1067/1400] test: Add integration test for `/v3/signer` endpoint --- .github/workflows/bitcoin-tests.yml | 1 + .../src/tests/nakamoto_integrations.rs | 130 ++++++++++++++++++ 2 files changed, 131 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a5710c7c66..713ab5a986 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -113,6 +113,7 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover + - tests::nakamoto_integrations::v3_signer_api_endpoint - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork # Do not run this one until we figure out why it fails in CI diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b9b9c9844d..8296c153d4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7910,3 +7910,133 @@ fn utxo_check_on_startup_recover() { run_loop_stopper.store(false, Ordering::SeqCst); run_loop_thread.join().unwrap(); } + +/// Test `/v3/signer` API endpoint +/// +/// This endpoint returns a count of how many blocks a signer has signed during a given reward cycle +#[test] +#[ignore] +fn v3_signer_api_endpoint() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::new(); + let signer_addr = tests::to_addr(&signer_sk); + let signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + + // only subscribe to the block proposal events + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::BlockProposal], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk.clone()]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + blind_signer(&conf, &signers, proposals_submitted); + wait_for_first_naka_block_commit(60, &commits_submitted); + + // TODO (hack) instantiate the sortdb in the burnchain + _ = btc_regtest_controller.sortdb_mut(); + + info!("------------------------- Setup finished, run test -------------------------"); + + let naka_tenures = 20; + let pre_naka_reward_cycle = 1; + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { + let url = format!( + "{http_origin}/v3/signer/{pk}/{reward_cycle}", + pk = pubkey.to_hex() + ); + info!("Sending GET {url}"); + reqwest::blocking::get(url) + .unwrap_or_else(|e| panic!("GET request failed: {e}")) + .text() + .expect("Empty response") + .parse::() + .unwrap_or_else(|e| panic!("Failed to parse response as `u64`: {e}")) + }; + + // Check reward cycle 1, should be 0 (pre-nakamoto) + let blocks_signed_pre_naka = get_v3_signer(&signer_pubkey, pre_naka_reward_cycle); + assert_eq!(blocks_signed_pre_naka, 0); + + // Keep track of reward cycles encountered + let mut reward_cycles = HashSet::new(); + + // Mine some nakamoto tenures + for _ in 0..naka_tenures { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + reward_cycles.insert(reward_cycle); + } + + // Make sure we got a couple cycles + assert!(reward_cycles.len() > 1); + assert!(!reward_cycles.contains(&pre_naka_reward_cycle)); + + // Since we have only one signer, it must be signing at least 1 block per reward cycle + for reward_cycle in reward_cycles.into_iter() { + let blocks_signed = get_v3_signer(&signer_pubkey, reward_cycle); + assert_ne!(blocks_signed, 0); + } + + info!("------------------------- Test finished, clean up -------------------------"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From 25008560b6f8a1969e6aaa1749d8ca2e8aa518b3 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 12 Sep 2024 17:52:03 -0400 Subject: [PATCH 1068/1400] fix: Varoius fixes so that `/v3/signer/` endpoint works now --- stackslib/src/chainstate/nakamoto/mod.rs | 11 +---------- stackslib/src/chainstate/stacks/db/mod.rs | 16 ++++++++-------- stackslib/src/net/api/getsigner.rs | 6 ++---- stackslib/src/net/api/mod.rs | 1 + .../src/tests/nakamoto_integrations.rs | 5 +++-- 5 files changed, 15 insertions(+), 24 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ff38a47691..821af99ed0 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3305,16 +3305,7 @@ impl NakamotoChainState { "SELECT blocks_signed FROM signer_stats WHERE public_key = ?1 AND reward_cycle = ?2"; let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; chainstate_db - .query_row(sql, params, |row| { - let value: String = row.get(2)?; - value.parse::().map_err(|e| { - rusqlite::Error::FromSqlConversionFailure( - size_of::(), - rusqlite::types::Type::Integer, - e.into(), - ) - }) - }) + .query_row(sql, params, |row| row.get("blocks_signed")) .optional() .map(Option::unwrap_or_default) // It's fine to map `NONE` to `0`, because it's impossible to have `Some(0)` .map_err(ChainstateError::from) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 198de0354e..e829a8c030 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -298,14 +298,14 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 6, - StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 6, - StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 6, - StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 6, + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 7, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 7, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 7, } } } diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs index 7a9b418cc3..1231e195c1 100644 --- a/stackslib/src/net/api/getsigner.rs +++ b/stackslib/src/net/api/getsigner.rs @@ -61,10 +61,8 @@ impl HttpRequest for GetSignerRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new( - r#"^/v3/stacker_set/(?P[0-9a-f]{66})/(?P[0-9]{1,10})$"#, - ) - .unwrap() + Regex::new(r#"^/v3/signer/(?P[0-9a-f]{66})/(?P[0-9]{1,10})$"#) + .unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 6eff1a1c53..eac4876115 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -125,6 +125,7 @@ impl StacksHttp { self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), ); + self.register_rpc_endpoint(getsigner::GetSignerRequestHandler::default()); self.register_rpc_endpoint( liststackerdbreplicas::RPCListStackerDBReplicasRequestHandler::new(), ); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8296c153d4..e241c08326 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7983,14 +7983,15 @@ fn v3_signer_api_endpoint() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { - let url = format!( + let url = &format!( "{http_origin}/v3/signer/{pk}/{reward_cycle}", pk = pubkey.to_hex() ); - info!("Sending GET {url}"); + info!("Send request: GET {url}"); reqwest::blocking::get(url) .unwrap_or_else(|e| panic!("GET request failed: {e}")) .text() + .inspect(|response| info!("Recieved response: GET {url} -> {response}")) .expect("Empty response") .parse::() .unwrap_or_else(|e| panic!("Failed to parse response as `u64`: {e}")) From 6945aa8a7a68b1f3107e95d602149730741939ad Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 12 Sep 2024 20:48:13 -0400 Subject: [PATCH 1069/1400] chore: address PR feedback -- the signer coordinator should exit immediately on burncahin tip change --- stacks-signer/src/signerdb.rs | 5 ++++- .../src/nakamoto_node/sign_coordinator.rs | 20 ++++--------------- 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 6f5b6c6e06..06b1e2efbe 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -272,7 +272,10 @@ impl BlockInfo { matches!(prev_state, BlockState::Unprocessed) } BlockState::LocallyAccepted => { - matches!(prev_state, BlockState::Unprocessed) + matches!( + prev_state, + BlockState::Unprocessed | BlockState::LocallyAccepted + ) } BlockState::LocallyRejected => { matches!(prev_state, BlockState::Unprocessed) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index e86bb724e5..29a64cfb27 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Receiver; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::Duration; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; @@ -78,7 +78,6 @@ pub struct SignCoordinator { signer_entries: HashMap, weight_threshold: u32, total_weight: u32, - config: Config, keep_running: Arc, pub next_signer_bitvec: BitVec<4000>, } @@ -310,7 +309,6 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, total_weight, - config: config.clone(), keep_running, }; return Ok(sign_coordinator); @@ -333,7 +331,6 @@ impl SignCoordinator { signer_entries: signer_public_keys, weight_threshold: threshold, total_weight, - config: config.clone(), keep_running, }) } @@ -752,8 +749,6 @@ impl SignCoordinator { "threshold" => self.weight_threshold, ); - let mut new_burn_tip_ts = None; - loop { // look in the nakamoto staging db -- a block can only get stored there if it has // enough signing weight to clear the threshold @@ -774,16 +769,9 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - if new_burn_tip_ts.is_none() { - if Self::check_burn_tip_changed(&sortdb, &burn_tip.consensus_hash) { - new_burn_tip_ts = Some(Instant::now()); - } - } - if let Some(ref new_burn_tip_ts) = new_burn_tip_ts.as_ref() { - if new_burn_tip_ts.elapsed() >= self.config.miner.wait_on_interim_blocks { - debug!("SignCoordinator: Exiting due to new burnchain tip"); - return Err(NakamotoNodeError::BurnchainTipChanged); - } + if Self::check_burn_tip_changed(&sortdb, &burn_tip.consensus_hash) { + debug!("SignCoordinator: Exiting due to new burnchain tip"); + return Err(NakamotoNodeError::BurnchainTipChanged); } // one of two things can happen: From c77c024db6192cf8bbbac99249fd985418d246d5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 13 Sep 2024 10:52:47 -0400 Subject: [PATCH 1070/1400] chore: add info to signature gathering failure logs --- .../stacks-node/src/nakamoto_node/miner.rs | 40 ++++++++++++------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index c3a1baea60..ef2a5e609d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -358,22 +358,32 @@ impl BlockMinerThread { &mut attempts, ) { Ok(x) => x, - Err(e) => { - match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"); - return Err(e); - } - _ => { - error!("Error while gathering signatures: {e:?}. Will try mining again."); - continue; - } + Err(e) => match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); } - } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + _ => { + error!("Error while gathering signatures: {e:?}. Will try mining again."; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + continue; + } + }, }; new_block.header.signer_signature = signer_signature; From d8ac38b270e8f168e1f4fff81ef49feebeb85e1e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 11:31:09 -0500 Subject: [PATCH 1071/1400] test: fix nakamoto_integrations::mock_mining() --- .../src/tests/nakamoto_integrations.rs | 65 ++++++++++++------- 1 file changed, 40 insertions(+), 25 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 583fea4309..d696c53688 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -99,9 +99,9 @@ use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ - call_read_only, get_account, get_account_result, get_chain_info_result, get_neighbors, - get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, - wait_for_runloop, + call_read_only, get_account, get_account_result, get_chain_info_opt, get_chain_info_result, + get_neighbors, get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, + test_observer, wait_for_runloop, }; use crate::tests::{ get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, @@ -7723,7 +7723,21 @@ fn mock_mining() { .spawn(move || follower_run_loop.start(None, 0)) .unwrap(); - debug!("Booted follower-thread"); + info!("Booting follower-thread, waiting for the follower to sync to the chain tip"); + + wait_for(120, || { + let Some(miner_node_info) = get_chain_info_opt(&naka_conf) else { + return Ok(false); + }; + let Some(follower_node_info) = get_chain_info_opt(&follower_conf) else { + return Ok(false); + }; + Ok(miner_node_info.stacks_tip_height == follower_node_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + let miner_node_info = get_chain_info(&naka_conf); + let follower_node_info = get_chain_info(&follower_conf); + info!("Node heights"; "miner" => miner_node_info.stacks_tip_height, "follower" => follower_node_info.stacks_tip_height); // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { @@ -7767,25 +7781,26 @@ fn mock_mining() { last_tip_height = info.stacks_tip_height; } - let mock_miner_timeout = Instant::now(); - while follower_naka_mined_blocks.load(Ordering::SeqCst) <= follower_naka_mined_blocks_before - { - if mock_miner_timeout.elapsed() >= Duration::from_secs(60) { - panic!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - ); - } - thread::sleep(Duration::from_millis(100)); - } + let miner_node_info = get_chain_info(&naka_conf); + let follower_node_info = get_chain_info(&follower_conf); + info!("Node heights"; "miner" => miner_node_info.stacks_tip_height, "follower" => follower_node_info.stacks_tip_height); - let start_time = Instant::now(); - while commits_submitted.load(Ordering::SeqCst) <= commits_before { - if start_time.elapsed() >= Duration::from_secs(20) { - panic!("Timed out waiting for block-commit"); - } - thread::sleep(Duration::from_millis(100)); - } + wait_for(60, || { + Ok(follower_naka_mined_blocks.load(Ordering::SeqCst) + > follower_naka_mined_blocks_before) + }) + .expect(&format!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + )); + + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .expect(&format!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + )); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -7809,9 +7824,9 @@ fn mock_mining() { // Check follower's mock miner let mock_mining_blocks_end = follower_naka_mined_blocks.load(Ordering::SeqCst); let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; - assert_eq!( - blocks_mock_mined, tenure_count, - "Should have mock mined `tenure_count` nakamoto blocks" + assert!( + blocks_mock_mined > tenure_count, + "Should have mock mined at least `tenure_count` nakamoto blocks" ); // wait for follower to reach the chain tip From 16bad25cd6667c506eb8364c78f0aabcf7954695 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 13:08:57 -0500 Subject: [PATCH 1072/1400] test: fix signer::v0::end_of_tenure() --- testnet/stacks-node/src/tests/signer/v0.rs | 38 ++++++---------------- 1 file changed, 10 insertions(+), 28 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 229a99fe6b..5a1b508228 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1926,17 +1926,13 @@ fn end_of_tenure() { // give the system a chance to mine a Nakamoto block // But it doesn't have to mine one for this test to succeed? - let start = Instant::now(); - while start.elapsed() <= short_timeout { + wait_for(short_timeout.as_secs(), || { let mined_blocks = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - if mined_blocks > blocks_before { - break; - } - sleep_ms(100); - } + Ok(mined_blocks > blocks_before) + }); info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( @@ -1957,10 +1953,7 @@ fn end_of_tenure() { .running_nodes .nakamoto_blocks_proposed .load(Ordering::SeqCst); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + let blocks_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; let info = get_chain_info(&signer_test.running_nodes.conf); let start_height = info.stacks_tip_height; @@ -2020,29 +2013,18 @@ fn end_of_tenure() { info!("Block proposed and burn blocks consumed. Verifying that stacks block is still not processed"); assert_eq!( - signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst), + get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height, blocks_before ); info!("Unpausing block validation and waiting for block to be processed"); // Disable the stall and wait for the block to be processed TEST_VALIDATE_STALL.lock().unwrap().replace(false); - let start_time = Instant::now(); - while signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst) - <= blocks_before - { - assert!( - start_time.elapsed() <= short_timeout, - "Timed out waiting for block to be mined" - ); - std::thread::sleep(Duration::from_millis(100)); - } + wait_for(short_timeout.as_secs(), || { + let processed_now = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + Ok(processed_now > blocks_before) + }) + .expect("Timed out waiting for block to be mined"); let info = get_chain_info(&signer_test.running_nodes.conf); assert_eq!(info.stacks_tip_height, start_height + 1); From 12a06ead0b138dd14db9fa55a8eeeb0f164cb04f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 13 Sep 2024 14:28:28 -0400 Subject: [PATCH 1073/1400] chore: add debug logs before all `stacks_node_client` use --- stacks-signer/src/client/stacks_client.rs | 28 +++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cd65f7914b..e8ad18fd87 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -219,6 +219,7 @@ impl StacksClient { &self, tx: &StacksTransaction, ) -> Result { + debug!("stacks_node_client: Getting estimated fee..."); let request = FeeRateEstimateRequestBody { estimated_len: Some(tx.tx_len()), transaction_payload: to_hex(&tx.payload.serialize_to_vec()), @@ -283,6 +284,11 @@ impl StacksClient { /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { + debug!("stacks_node_client: Submitting block for validation..."; + "signer_sighash" => %block.header.signer_signature_hash(), + "block_id" => %block.header.block_id(), + "block_height" => %block.header.chain_length, + ); let block_proposal = NakamotoBlockProposal { block, chain_id: self.chain_id, @@ -416,6 +422,10 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { + debug!("stacks_node_client: Getting tenure forking info..."; + "chosen_parent" => %chosen_parent, + "last_sortition" => %last_sortition, + ); let send_request = || { self.stacks_node_client .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) @@ -433,6 +443,7 @@ impl StacksClient { /// Get the sortition information for the latest sortition pub fn get_latest_sortition(&self) -> Result { + debug!("stacks_node_client: Getting latest sortition..."); let send_request = || { self.stacks_node_client .get(self.sortition_info_path()) @@ -452,6 +463,7 @@ impl StacksClient { /// Get the sortition information for a given sortition pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { + debug!("stacks_node_client: Getting sortition with consensus hash {ch}..."); let send_request = || { self.stacks_node_client .get(format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex())) @@ -471,7 +483,7 @@ impl StacksClient { /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { - debug!("Getting stacks node info..."); + debug!("stacks_node_client: Getting peer info..."); let timer = crate::monitoring::new_rpc_call_timer(&self.core_info_path(), &self.http_origin); let send_request = || { @@ -521,6 +533,7 @@ impl StacksClient { &self, reward_cycle: u64, ) -> Result>, ClientError> { + debug!("stacks_node_client: Getting reward set signers for reward cycle {reward_cycle}..."); let timer = crate::monitoring::new_rpc_call_timer( &self.reward_set_path(reward_cycle), &self.http_origin, @@ -558,7 +571,7 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { - debug!("Getting pox data..."); + debug!("stacks_node_client: Getting pox data..."); #[cfg(feature = "monitoring_prom")] let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { @@ -606,7 +619,7 @@ impl StacksClient { &self, address: &StacksAddress, ) -> Result { - debug!("Getting account info..."); + debug!("stacks_node_client: Getting account info..."); let timer = crate::monitoring::new_rpc_call_timer(&self.accounts_path(address), &self.http_origin); let send_request = || { @@ -683,6 +696,10 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { + debug!("stacks_node_client: Posting block to the stacks node..."; + "block_id" => block.header.block_id(), + "block_height" => block.header.chain_length, + ); let response = self .stacks_node_client .post(format!( @@ -705,6 +722,9 @@ impl StacksClient { pub fn submit_transaction(&self, tx: &StacksTransaction) -> Result { let txid = tx.txid(); let tx = tx.serialize_to_vec(); + debug!("stacks_node_client: Submitting transaction to the stacks node..."; + "txid" => %txid, + ); let timer = crate::monitoring::new_rpc_call_timer(&self.transaction_path(), &self.http_origin); let send_request = || { @@ -734,7 +754,7 @@ impl StacksClient { function_name: &ClarityName, function_args: &[ClarityValue], ) -> Result { - debug!("Calling read-only function {function_name} with args {function_args:?}..."); + debug!("stacks_node_client: Calling read-only function {function_name} with args {function_args:?}..."); let args = function_args .iter() .filter_map(|arg| arg.serialize_to_hex().ok()) From ebd4234da987371b515b80791a9f7dc8b051ff8b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 13 Sep 2024 15:16:34 -0400 Subject: [PATCH 1074/1400] chore: fix typo in new logs --- stacks-signer/src/client/stacks_client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index e8ad18fd87..4bd844b181 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -697,8 +697,8 @@ impl StacksClient { /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { debug!("stacks_node_client: Posting block to the stacks node..."; - "block_id" => block.header.block_id(), - "block_height" => block.header.chain_length, + "block_id" => %block.header.block_id(), + "block_height" => %block.header.chain_length, ); let response = self .stacks_node_client From e89746a55c8932825e5989a1505ab7076656024e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 14:47:23 -0500 Subject: [PATCH 1075/1400] test: fix signer::v0::miner_forking() --- testnet/stacks-node/src/tests/signer/v0.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5a1b508228..ecda37c66e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1742,6 +1742,10 @@ fn miner_forking() { TEST_BROADCAST_STALL.lock().unwrap().replace(true); let rl2_commits_before = second_miner_commits_submitted.load(Ordering::SeqCst); + let rl1_commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); signer_test .running_nodes @@ -1755,6 +1759,15 @@ fn miner_forking() { Ok(commits_count > rl2_commits_before) }) .unwrap(); + // wait until a commit is submitted by run_loop_1 + wait_for(60, || { + let commits_count = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits_count > rl1_commits_before) + }) + .unwrap(); // fetch the current sortition info let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); @@ -1815,7 +1828,10 @@ fn miner_forking() { let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) .into_iter() - .map(|header| (header.consensus_hash.clone(), header)) + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %sortition_data.consensus_hash); + (header.consensus_hash.clone(), header) + }) .collect(); if had_tenure { From fcc24986c44c30e90bb6bd273d62c716825bb5f3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Sep 2024 13:45:58 -0700 Subject: [PATCH 1076/1400] Do not have an overflow occur in the test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a2d7be2bac..af7d646204 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3141,6 +3141,7 @@ fn min_gap_between_blocks() { .as_stacks_nakamoto() .unwrap() .timestamp; + assert!(blocks.len() >= 2, "Expected at least 2 mined blocks"); let penultimate_block = blocks.get(blocks.len() - 2).unwrap(); let penultimate_block_time = penultimate_block .anchored_header From 02cc0c5583bea83eb5129af25800bb3791be16aa Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 16:14:08 -0500 Subject: [PATCH 1077/1400] test: fix signer::v0::partial_tenure_fork() --- testnet/stacks-node/src/tests/signer/v0.rs | 114 ++++++++++++++++++--- 1 file changed, 100 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ecda37c66e..687b06c2a5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -72,8 +72,8 @@ use crate::tests::nakamoto_integrations::{ wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ - get_account, get_chain_info, next_block_and_wait, run_until_burnchain_height, submit_tx, - submit_tx_fallible, test_observer, + get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, + run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; use crate::tests::{self, make_stacks_transfer}; use crate::{nakamoto_node, BurnchainController, Config, Keychain}; @@ -1948,7 +1948,8 @@ fn end_of_tenure() { .nakamoto_blocks_mined .load(Ordering::SeqCst); Ok(mined_blocks > blocks_before) - }); + }) + .unwrap(); info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( @@ -3591,12 +3592,7 @@ fn partial_tenure_fork() { (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { - let node_host = if signer_config.endpoint.port() % 2 == 0 { - &node_1_rpc_bind - } else { - &node_2_rpc_bind - }; - signer_config.node_host = node_host.to_string(); + signer_config.node_host = node_1_rpc_bind.clone(); }, |config| { let localhost = "127.0.0.1"; @@ -3656,6 +3652,17 @@ fn partial_tenure_fork() { signer_test.boot_to_epoch_3(); let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + info!("------------------------- Reached Epoch 3.0 -------------------------"); // due to the random nature of mining sortitions, the way this test is structured @@ -3680,8 +3687,26 @@ fn partial_tenure_fork() { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + let proposed_before_1 = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); sleep_ms(1000); + + info!( + "Next tenure checking"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_1" => proposed_before_1, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + ); + next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3695,7 +3720,32 @@ fn partial_tenure_fork() { || mined_2 > mined_before_2) }, ) - .unwrap(); + .unwrap_or_else(|_| { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_1 = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + error!( + "Next tenure failed to tick"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_1" => proposed_before_1, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + "mined_1" => mined_1, + "mined_2" => mined_2, + "proposed_1" => proposed_1, + "proposed_2" => proposed_2, + ); + panic!(); + }); btc_blocks_mined += 1; let mined_1 = blocks_mined1.load(Ordering::SeqCst); @@ -3720,12 +3770,23 @@ fn partial_tenure_fork() { } // mine (or attempt to mine) the interim blocks - info!("Mining interim blocks"); for interim_block_ix in 0..inter_blocks_per_tenure { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + info!( + "Mining interim blocks"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + ); + // submit a tx so that the miner will mine an extra block let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; let transfer_tx = @@ -3743,7 +3804,32 @@ fn partial_tenure_fork() { || mined_1 > mined_before_1 || mined_2 > mined_before_2) }) - .unwrap(); + .unwrap_or_else(|_| { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + let proposed_1 = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); + error!( + "Next tenure failed to tick"; + "fork_initiated?" => fork_initiated, + "miner_1_tenures" => miner_1_tenures, + "miner_2_tenures" => miner_2_tenures, + "min_miner_1_tenures" => min_miner_2_tenures, + "min_miner_2_tenures" => min_miner_2_tenures, + "proposed_before_1" => proposed_before_1, + "proposed_before_2" => proposed_before_2, + "mined_before_1" => mined_before_1, + "mined_before_2" => mined_before_2, + "mined_1" => mined_1, + "mined_2" => mined_2, + "proposed_1" => proposed_1, + "proposed_2" => proposed_2, + ); + panic!(); + }); } Err(e) => { if e.to_string().contains("TooMuchChaining") { @@ -3766,8 +3852,8 @@ fn partial_tenure_fork() { miner_2_tenures += 1; } info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures + "Miner 1 tenures: {}, Miner 2 tenures: {}, Miner 1 before: {}, Miner 2 before: {}", + miner_1_tenures, miner_2_tenures, mined_before_1, mined_before_2, ); let mined_1 = blocks_mined1.load(Ordering::SeqCst); From ff5b49d2fd9dc51fc75ad5e66f1afc9b0647464a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 16:32:30 -0500 Subject: [PATCH 1078/1400] reduce some flakiness in partial_tenure_fork() --- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 687b06c2a5..66aad1a00f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3582,7 +3582,6 @@ fn partial_tenure_fork() { let node_2_p2p = 51025; let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); - let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); // All signers are listening to node 1 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( @@ -3644,12 +3643,13 @@ fn partial_tenure_fork() { naka_proposed_blocks: blocks_proposed2, .. } = run_loop_2.counters(); + + signer_test.boot_to_epoch_3(); let _run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); - signer_test.boot_to_epoch_3(); let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; wait_for(120, || { From 8c077d5104766c102733a4006efdff47e9f0193a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 13 Sep 2024 18:10:54 -0700 Subject: [PATCH 1079/1400] Fix confusion over what configured for cycle means by adding is_registered_for_cycle Signed-off-by: Jacinta Ferrant --- stacks-signer/src/runloop.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 9e1083047b..ad29ded57a 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -421,7 +421,9 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo "reward_cycle_before_refresh" => reward_cycle_before_refresh, "current_reward_cycle" => current_reward_cycle, "configured_for_current" => Self::is_configured_for_cycle(&self.stacks_signers, current_reward_cycle), + "registered_for_current" => Self::is_registered_for_cycle(&self.stacks_signers, current_reward_cycle), "configured_for_next" => Self::is_configured_for_cycle(&self.stacks_signers, next_reward_cycle), + "registered_for_next" => Self::is_registered_for_cycle(&self.stacks_signers, next_reward_cycle), "is_in_next_prepare_phase" => is_in_next_prepare_phase, ); @@ -456,6 +458,17 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo signer.reward_cycle() == reward_cycle } + fn is_registered_for_cycle( + stacks_signers: &HashMap>, + reward_cycle: u64, + ) -> bool { + let Some(signer) = stacks_signers.get(&(reward_cycle % 2)) else { + return false; + }; + signer.reward_cycle() == reward_cycle + && matches!(signer, ConfiguredSigner::RegisteredSigner(_)) + } + fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { let mut to_delete = Vec::new(); for (idx, signer) in &mut self.stacks_signers { From df5623aec39a1591f2f54b80b0b69590514ec56f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 13 Sep 2024 20:36:04 -0500 Subject: [PATCH 1080/1400] test: fix CI flake in signer::v0::min_gap_between_blocks() --- testnet/stacks-node/src/tests/signer/v0.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 66aad1a00f..0e62e0f05d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3127,11 +3127,6 @@ fn min_gap_between_blocks() { signer_test.boot_to_epoch_3(); - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - info!("Ensure that the first Nakamoto block is mined after the gap is exceeded"); let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); assert_eq!(blocks.len(), 1); @@ -3164,11 +3159,8 @@ fn min_gap_between_blocks() { info!("Submitted transfer tx and waiting for block to be processed. Ensure it does not arrive before the gap is exceeded"); wait_for(60, || { - let blocks_processed = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - Ok(blocks_processed > blocks_before) + let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + Ok(blocks.len() >= 2) }) .unwrap(); From 9822ef15484acdf04d2f364d5175e96ab4228a6e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 14 Sep 2024 08:41:11 -0500 Subject: [PATCH 1081/1400] test: attempt to fix signers::v0::signer_set_rollover() in CI --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 0e62e0f05d..c4b1ae7d55 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -26,7 +26,6 @@ use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; -use rand::RngCore; use stacks::address::AddressHashMode; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; @@ -2850,11 +2849,8 @@ fn signer_set_rollover() { initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); let run_stamp = rand::random(); - let mut rng = rand::thread_rng(); - let mut buf = [0u8; 2]; - rng.fill_bytes(&mut buf); - let rpc_port = u16::from_be_bytes(buf.try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + let rpc_port = 51024; let rpc_bind = format!("127.0.0.1:{}", rpc_port); // Setup the new signers that will take over From ec89da91a178d3f507c72cd04298b0a0532e7234 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 14 Sep 2024 13:14:19 -0500 Subject: [PATCH 1082/1400] CI: disable signer_set_rollover and mine_2_nakamoto_reward_cycles * these tests work fine locally, but in CI, they just timeout after 30 minutes without any logging --- .github/workflows/bitcoin-tests.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index db108d85f0..40afc15f3e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -86,7 +86,6 @@ jobs: - tests::nakamoto_integrations::nakamoto_attempt_time - tests::signer::v0::block_proposal_rejection - tests::signer::v0::miner_gather_signatures - - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::end_of_tenure - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid @@ -95,7 +94,6 @@ jobs: - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 - tests::signer::v0::multiple_miners_mock_sign_epoch_25 - - tests::signer::v0::signer_set_rollover - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in - tests::signer::v0::signers_broadcast_signed_blocks @@ -120,6 +118,11 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork + # The following 2 tests work locally, but fail on CI. + # Locally, they both run consistently quite quickly, but on + # CI, they timeout without any logging. Disabling in CI for now. + # - tests::signer::v0::mine_2_nakamoto_reward_cycles + # - tests::signer::v0::signer_set_rollover # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From b09ffc6ee42ce54c68e2085bc6a2bb85ac467b3b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 14 Sep 2024 14:06:39 -0500 Subject: [PATCH 1083/1400] chore: address PR review, uncomment CI tests --- .github/workflows/bitcoin-tests.yml | 10 ++++------ stackslib/src/net/api/postblock_proposal.rs | 4 +++- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 40afc15f3e..69878ce403 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -45,7 +45,8 @@ jobs: - tests::neon_integrations::liquid_ustx_integration - tests::neon_integrations::microblock_fork_poison_integration_test - tests::neon_integrations::microblock_integration_test - - tests::neon_integrations::microblock_large_tx_integration_test_FLAKY + # Disable this flaky test. Microblocks are no longer supported anyways. + # - tests::neon_integrations::microblock_large_tx_integration_test_FLAKY - tests::neon_integrations::microblock_limit_hit_integration_test - tests::neon_integrations::miner_submit_twice - tests::neon_integrations::mining_events_integration_test @@ -118,11 +119,8 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork - # The following 2 tests work locally, but fail on CI. - # Locally, they both run consistently quite quickly, but on - # CI, they timeout without any logging. Disabling in CI for now. - # - tests::signer::v0::mine_2_nakamoto_reward_cycles - # - tests::signer::v0::signer_set_rollover + - tests::signer::v0::mine_2_nakamoto_reward_cycles + - tests::signer::v0::signer_set_rollover # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 9c5ab712c3..6c54b05342 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -196,11 +196,13 @@ impl NakamotoBlockProposal { }) } + /// DO NOT CALL FROM CONSENSUS CODE + /// /// Check to see if a block builds atop the highest block in a given tenure. /// That is: /// - its parent must exist, and /// - its parent must be as high as the highest block in the given tenure. - pub(crate) fn check_block_builds_on_highest_block_in_tenure( + fn check_block_builds_on_highest_block_in_tenure( chainstate: &StacksChainState, tenure_id: &ConsensusHash, parent_block_id: &StacksBlockId, From 35f866be8c8e3415b9fc95a25fc514d99ef3b4c8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 14 Sep 2024 13:07:13 -0700 Subject: [PATCH 1084/1400] Do not have an infinite loop inside mine_and_verify_confirmed_naka_block Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c4b1ae7d55..3f08b71f73 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -311,6 +311,11 @@ impl SignerTest { let mut signer_index = 0; let mut signature_index = 0; let mut signing_keys = HashSet::new(); + let start = Instant::now(); + debug!( + "Validating {} signatures against {num_signers} signers", + signature.len() + ); let validated = loop { // Since we've already checked `signature.len()`, this means we've // validated all the signatures in this loop @@ -341,6 +346,11 @@ impl SignerTest { signer_index += 1; signature_index += 1; } + // Shouldn't really ever timeout, but do this in case there is some sort of overflow/underflow happening. + assert!( + start.elapsed() < timeout, + "Timed out waiting to confirm block signatures" + ); }; assert!(validated); From 0fc9509d19b4e535a0ce9a607b38f73ac9d78fa7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sun, 15 Sep 2024 11:47:10 -0700 Subject: [PATCH 1085/1400] Fix unhandled result in block state machine move_to calls Signed-off-by: Jacinta Ferrant --- stacks-signer/src/signerdb.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index b424c6e7d6..1b8a57abbb 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -251,14 +251,14 @@ impl BlockInfo { /// Mark the block as locally rejected and invalid pub fn mark_locally_rejected(&mut self) -> Result<(), String> { - self.move_to(BlockState::LocallyRejected); + self.move_to(BlockState::LocallyRejected)?; self.valid = Some(false); Ok(()) } /// Mark the block as globally rejected and invalid pub fn mark_globally_rejected(&mut self) -> Result<(), String> { - self.move_to(BlockState::GloballyRejected); + self.move_to(BlockState::GloballyRejected)?; self.valid = Some(false); Ok(()) } From ec3aea8ceed0bf48ba70b9dc638084672cc4155e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sun, 15 Sep 2024 12:55:57 -0700 Subject: [PATCH 1086/1400] fix: add missing prometheus timers to stacks_client --- stacks-signer/src/client/stacks_client.rs | 44 +++++++++++------------ 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 5afec3f76e..75790dc54e 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -439,13 +439,16 @@ impl StacksClient { chosen_parent: &ConsensusHash, last_sortition: &ConsensusHash, ) -> Result, ClientError> { + let path = self.tenure_forking_info_path(chosen_parent, last_sortition); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { self.stacks_node_client - .get(self.tenure_forking_info_path(chosen_parent, last_sortition)) + .get(&path) .send() .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -456,16 +459,16 @@ impl StacksClient { /// Get the sortition information for the latest sortition pub fn get_latest_sortition(&self) -> Result { + let path = self.sortition_info_path(); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { - self.stacks_node_client - .get(self.sortition_info_path()) - .send() - .map_err(|e| { - warn!("Signer failed to request latest sortition"; "err" => ?e); - e - }) + self.stacks_node_client.get(&path).send().map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) }; let response = send_request()?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -475,16 +478,16 @@ impl StacksClient { /// Get the sortition information for a given sortition pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { + let path = format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex()); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { - self.stacks_node_client - .get(format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex())) - .send() - .map_err(|e| { - warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); - e - }) + self.stacks_node_client.get(&path).send().map_err(|e| { + warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); + e + }) }; let response = send_request()?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } @@ -582,7 +585,6 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { debug!("Getting pox data..."); - #[cfg(feature = "monitoring_prom")] let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client @@ -591,7 +593,6 @@ impl StacksClient { .map_err(backoff::Error::transient) }; let response = retry_with_exponential_backoff(send_request)?; - #[cfg(feature = "monitoring_prom")] timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); @@ -706,13 +707,11 @@ impl StacksClient { /// Returns `true` if the block was accepted or `false` if the block /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { + let path = format!("{}{}?broadcast=1", self.http_origin, postblock_v3::PATH); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { self.stacks_node_client - .post(format!( - "{}{}?broadcast=1", - self.http_origin, - postblock_v3::PATH - )) + .post(&path) .header("Content-Type", "application/octet-stream") .header(AUTHORIZATION, self.auth_password.clone()) .body(block.serialize_to_vec()) @@ -723,6 +722,7 @@ impl StacksClient { }) }; let response = retry_with_exponential_backoff(send_request)?; + timer.stop_and_record(); if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } From 8af3d2ef9ff1f378710bb0823a65da616615f903 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sun, 15 Sep 2024 14:08:06 -0700 Subject: [PATCH 1087/1400] Fix bug and add percentage missing Signed-off-by: Jacinta Ferrant --- stacks-signer/src/main.rs | 44 +++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 64e5c48a96..513382e843 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -220,6 +220,7 @@ struct RewardCycleState { signers_slots: HashMap, signers_keys: HashMap, signers_addresses: HashMap, + signers_weights: HashMap, slot_ids: Vec, /// Reward cycle is not known until the first successful call to the node reward_cycle: Option, @@ -257,12 +258,6 @@ impl SignerMonitor { self.cycle_state.signers_slots = self.stacks_client.get_parsed_signer_slots(reward_cycle)?; - self.cycle_state.slot_ids = self - .cycle_state - .signers_slots - .values() - .map(|value| value.0) - .collect(); let entries = self .stacks_client @@ -277,6 +272,9 @@ impl SignerMonitor { self.cycle_state .signers_keys .insert(stacks_address, public_key); + self.cycle_state + .signers_weights + .insert(stacks_address, entry.weight); } for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { self.cycle_state @@ -284,9 +282,6 @@ impl SignerMonitor { .insert(*slot_id, *signer_address); } - self.cycle_state.signers_slots = - self.stacks_client.get_parsed_signer_slots(reward_cycle)?; - for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { self.cycle_state .signers_addresses @@ -318,8 +313,14 @@ impl SignerMonitor { }) .collect::>() .join(", "); + let missing_weight = missing_signers + .iter() + .map(|addr| self.cycle_state.signers_weights.get(addr).unwrap()) + .sum::(); + let total_weight = self.cycle_state.signers_weights.values().sum::(); + let percentage_missing = missing_weight as f64 / total_weight as f64 * 100.00; warn!( - "Missing messages for {} of {} signer(s). ", missing_signers.len(), self.cycle_state.signers_addresses.len(); + "Missing messages for {} of {} signer(s). Missing {percentage_missing:.2}% of signing weight ({missing_weight}/{total_weight})", missing_signers.len(), self.cycle_state.signers_addresses.len(); "signer_addresses" => formatted_signers, "signer_keys" => formatted_keys ); @@ -442,18 +443,21 @@ impl SignerMonitor { chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) }) .collect(); - for ((signer_address, slot_id), signer_message_opt) in self - .cycle_state - .signers_slots - .clone() - .into_iter() - .zip(new_messages) + + for (signer_message_opt, slot_id) in + new_messages.into_iter().zip(&self.cycle_state.slot_ids) { + let signer_slot_id = SignerSlotID(*slot_id); + let signer_address = *self + .cycle_state + .signers_addresses + .get(&signer_slot_id) + .expect("BUG: missing signer address for given slot id"); let Some(signer_message) = signer_message_opt else { missing_signers.push(signer_address); continue; }; - if let Some(last_message) = last_messages.get(&slot_id) { + if let Some(last_message) = last_messages.get(&signer_slot_id) { if last_message == &signer_message { continue; } @@ -467,11 +471,11 @@ impl SignerMonitor { || (epoch > StacksEpochId::Epoch25 && !matches!(signer_message, SignerMessage::BlockResponse(_))) { - unexpected_messages.insert(signer_address, (signer_message, slot_id)); + unexpected_messages.insert(signer_address, (signer_message, signer_slot_id)); continue; } - last_messages.insert(slot_id, signer_message); - last_updates.insert(slot_id, std::time::Instant::now()); + last_messages.insert(signer_slot_id, signer_message); + last_updates.insert(signer_slot_id, std::time::Instant::now()); } for (slot_id, last_update_time) in last_updates.iter() { if last_update_time.elapsed().as_secs() > self.args.max_age { From 572fb815cdba65175c84b41992cdcad748be7269 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 21:09:25 -0400 Subject: [PATCH 1088/1400] chore: stdext and rlimit dev-dependencies --- Cargo.lock | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 10b65eb745..e56e4400b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2795,6 +2795,15 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" +[[package]] +name = "rlimit" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" +dependencies = [ + "libc", +] + [[package]] name = "rstest" version = "0.11.0" @@ -3411,6 +3420,9 @@ dependencies = [ "regex", "reqwest", "ring 0.16.20", + "rlimit", + "rstest 0.17.0", + "rstest_reuse 0.5.0", "rusqlite", "serde", "serde_derive", @@ -3419,6 +3431,7 @@ dependencies = [ "stacks-common", "stacks-signer", "stackslib", + "stdext", "stx-genesis", "tikv-jemallocator", "tiny_http", From 95b72cda4d8d4313746b12a91dd85579bb5fd3d2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 21:10:11 -0400 Subject: [PATCH 1089/1400] bugfix: always save new neighbor data when we get it --- stackslib/src/net/neighbors/db.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index c0e65a6f85..0289875f11 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -557,9 +557,12 @@ impl NeighborWalkDB for PeerDBNeighborWalk { if let Some(data) = new_data { cur_neighbor.handshake_update(&tx, &data.handshake)?; - if let Some(db_data) = new_db_data { - cur_neighbor.save_update(&tx, Some(db_data.smart_contracts.as_slice()))?; - } + } + + if let Some(db_data) = new_db_data { + cur_neighbor.save_update(&tx, Some(db_data.smart_contracts.as_slice()))?; + } else { + cur_neighbor.save_update(&tx, None)?; } tx.commit()?; From b602455aed2a67c5a12fdb69ba4a697568dd1042 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 15 Sep 2024 22:13:08 -0400 Subject: [PATCH 1090/1400] test: increase open file descriptor limit in CI --- .github/workflows/bitcoin-tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 69878ce403..9465b90e29 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -140,6 +140,11 @@ jobs: with: btc-version: "25.0" + ## Increase open file descriptors limit + - name: Increase Open File Descriptors + run: | + sudo prlimit --nofile=4096:4096 + ## Run test matrix using restored cache of archive file ## - Test will timeout after env.TEST_TIMEOUT minutes - name: Run Tests From 69ecf2e9be5557b489791ecb702631068edeac9a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:20:24 -0400 Subject: [PATCH 1091/1400] chore: reduce some debug noise, and log connection name for bandwidth-exceeded conditions --- stackslib/src/net/chat.rs | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 926340d7fe..5949db0bbf 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -510,7 +510,7 @@ impl Neighbor { } }; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer @@ -1433,6 +1433,8 @@ impl ConversationP2P { // get neighbors at random as long as they're fresh, and as long as they're compatible with // the current system epoch. + // Alternate at random between serving public-only and public/private-mixed IPs, since for + // the time being, the remote peer has no way of asking for a particular subset. let mut neighbors = PeerDB::get_fresh_random_neighbors( peer_dbconn, self.network_id, @@ -1441,6 +1443,7 @@ impl ConversationP2P { MAX_NEIGHBORS_DATA_LEN, chain_view.burn_block_height, false, + thread_rng().gen(), ) .map_err(net_error::DBError)?; @@ -1917,10 +1920,12 @@ impl ConversationP2P { /// Generates a Nack if we don't have this DB, or if the request's consensus hash is invalid. fn make_stacker_db_getchunkinv_response( network: &PeerNetwork, + naddr: NeighborAddress, chainstate: &mut StacksChainState, getchunkinv: &StackerDBGetChunkInvData, ) -> Result { Ok(network.make_StackerDBChunksInv_or_Nack( + naddr, chainstate, &getchunkinv.contract_id, &getchunkinv.rc_consensus_hash, @@ -1938,6 +1943,7 @@ impl ConversationP2P { ) -> Result { let response = ConversationP2P::make_stacker_db_getchunkinv_response( network, + self.to_neighbor_address(), chainstate, getchunkinv, )?; @@ -2120,7 +2126,8 @@ impl ConversationP2P { > (self.connection.options.max_block_push_bandwidth as f64) { debug!( - "Neighbor {:?} exceeded max block-push bandwidth of {} bytes/sec (currently at {})", + "{:?}: Neighbor {:?} exceeded max block-push bandwidth of {} bytes/sec (currently at {})", + &self, &self.to_neighbor_key(), self.connection.options.max_block_push_bandwidth, self.stats.get_block_push_bandwidth() @@ -2162,7 +2169,7 @@ impl ConversationP2P { && self.stats.get_microblocks_push_bandwidth() > (self.connection.options.max_microblocks_push_bandwidth as f64) { - debug!("Neighbor {:?} exceeded max microblocks-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_microblocks_push_bandwidth, self.stats.get_microblocks_push_bandwidth()); + debug!("{:?}: Neighbor {:?} exceeded max microblocks-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_microblocks_push_bandwidth, self.stats.get_microblocks_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) .and_then(|handle| Ok(Some(handle))); @@ -2199,7 +2206,7 @@ impl ConversationP2P { && self.stats.get_transaction_push_bandwidth() > (self.connection.options.max_transaction_push_bandwidth as f64) { - debug!("Neighbor {:?} exceeded max transaction-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_transaction_push_bandwidth, self.stats.get_transaction_push_bandwidth()); + debug!("{:?}: Neighbor {:?} exceeded max transaction-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_transaction_push_bandwidth, self.stats.get_transaction_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) .and_then(|handle| Ok(Some(handle))); @@ -2237,7 +2244,7 @@ impl ConversationP2P { && self.stats.get_stackerdb_push_bandwidth() > (self.connection.options.max_stackerdb_push_bandwidth as f64) { - debug!("Neighbor {:?} exceeded max stackerdb-push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_stackerdb_push_bandwidth, self.stats.get_stackerdb_push_bandwidth()); + debug!("{:?}: Neighbor {:?} exceeded max stackerdb-push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_stackerdb_push_bandwidth, self.stats.get_stackerdb_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) .and_then(|handle| Ok(Some(handle))); @@ -2276,7 +2283,7 @@ impl ConversationP2P { && self.stats.get_nakamoto_block_push_bandwidth() > (self.connection.options.max_nakamoto_block_push_bandwidth as f64) { - debug!("Neighbor {:?} exceeded max Nakamoto block push bandwidth of {} bytes/sec (currently at {})", &self.to_neighbor_key(), self.connection.options.max_nakamoto_block_push_bandwidth, self.stats.get_nakamoto_block_push_bandwidth()); + debug!("{:?}: Neighbor {:?} exceeded max Nakamoto block push bandwidth of {} bytes/sec (currently at {})", self, &self.to_neighbor_key(), self.connection.options.max_nakamoto_block_push_bandwidth, self.stats.get_nakamoto_block_push_bandwidth()); return self .reply_nack(local_peer, chain_view, preamble, NackErrorCodes::Throttled) .and_then(|handle| Ok(Some(handle))); @@ -2415,11 +2422,11 @@ impl ConversationP2P { Ok(num_recved) => { total_recved += num_recved; if num_recved > 0 { - debug!("{:?}: received {} bytes", self, num_recved); + test_debug!("{:?}: received {} bytes", self, num_recved); self.stats.last_recv_time = get_epoch_time_secs(); self.stats.bytes_rx += num_recved as u64; } else { - debug!("{:?}: received {} bytes, stopping", self, num_recved); + test_debug!("{:?}: received {} bytes, stopping", self, num_recved); break; } } @@ -2436,7 +2443,7 @@ impl ConversationP2P { } } } - debug!("{:?}: received {} bytes", self, total_recved); + test_debug!("{:?}: received {} bytes", self, total_recved); Ok(total_recved) } @@ -2464,7 +2471,7 @@ impl ConversationP2P { } } } - debug!("{:?}: sent {} bytes", self, total_sent); + test_debug!("{:?}: sent {} bytes", self, total_sent); Ok(total_sent) } @@ -3051,8 +3058,9 @@ impl ConversationP2P { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { + #![allow(unused)] use std::fs; use std::io::prelude::*; use std::io::{Read, Write}; From 62bdacd6359fa5ff502a481027cb24e946a472da Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:20:54 -0400 Subject: [PATCH 1092/1400] chore: document neighbor walk connection opts, and add `log_neighbors_Freq` and `walk_seed_probability` options --- stackslib/src/net/connection.rs | 36 ++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 0d4d5aafd6..db50c46333 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -43,7 +43,7 @@ use crate::net::inv::{INV_REWARD_CYCLES, INV_SYNC_INTERVAL}; use crate::net::neighbors::{ MAX_NEIGHBOR_AGE, NEIGHBOR_REQUEST_TIMEOUT, NEIGHBOR_WALK_INTERVAL, NUM_INITIAL_WALKS, WALK_MAX_DURATION, WALK_MIN_DURATION, WALK_RESET_INTERVAL, WALK_RESET_PROB, WALK_RETRY_COUNT, - WALK_STATE_TIMEOUT, + WALK_SEED_PROBABILITY, WALK_STATE_TIMEOUT, }; use crate::net::{ Error as net_error, MessageSequence, Preamble, ProtocolFamily, RelayData, StacksHttp, StacksP2P, @@ -235,9 +235,10 @@ impl NetworkReplyHandle

{ None } else { // still have data to send, or we will send more. - debug!( + test_debug!( "Still have data to send, drop_on_success = {}, ret = {}", - drop_on_success, ret + drop_on_success, + ret ); Some(fd) } @@ -345,15 +346,37 @@ pub struct ConnectionOptions { pub max_http_clients: u64, pub neighbor_request_timeout: u64, pub max_neighbor_age: u64, + /// How many walk steps to take when the node has booted up. This influences how quickly the + /// node will find new peers on start-up. This describes the maximum length of such walks. pub num_initial_walks: u64, + /// How many walk state-machine restarts to take when the node has boote dup. This influences + /// how quickly the node will find new peers on start-up. This describes the maximum number of + /// such walk state-machine run-throughs. pub walk_retry_count: u64, + /// How often, in seconds, to run the walk state machine. pub walk_interval: u64, + /// The regularity of doing an inbound neighbor walk (as opposed to an outbound neighbor walk). + /// Every `walk_inbound_ratio + 1`-th walk will be an inbound neighbor walk. pub walk_inbound_ratio: u64, + /// Minimum number of steps a walk will run until it can be reset. pub walk_min_duration: u64, + /// Maximum number of steps a walk will run until forcibly reset. pub walk_max_duration: u64, + /// Probabiility that the walk will be reset once `walk_min_duration` steps are taken. pub walk_reset_prob: f64, + /// Maximum number of seconds a walk can last before being reset. pub walk_reset_interval: u64, + /// Maximum number of seconds a walk can remain in the same state before being reset. pub walk_state_timeout: u64, + /// If the node is booting up, or if the node is not connected to an always-allowed peer and + /// there are one or more such peers in the peers DB, then this controls the probability that + /// the node will attempt to start a walk to an always-allowed peer. It's good to have this + /// close to, but not equal to 1.0, so that if the node can't reach any always-allowed peer for + /// some reason but can reach other neighbors, then neighbor walks can continue. + pub walk_seed_probability: f64, + /// How often, if ever, to log our neighbors via DEBG. + /// Units are milliseconds. A value of 0 means "never". + pub log_neighbors_freq: u64, pub inv_sync_interval: u64, pub inv_reward_cycles: u64, pub download_interval: u64, @@ -494,6 +517,8 @@ impl std::default::Default for ConnectionOptions { walk_reset_prob: WALK_RESET_PROB, walk_reset_interval: WALK_RESET_INTERVAL, walk_state_timeout: WALK_STATE_TIMEOUT, + walk_seed_probability: WALK_SEED_PROBABILITY, + log_neighbors_freq: 60_000, inv_sync_interval: INV_SYNC_INTERVAL, // how often to synchronize block inventories inv_reward_cycles: INV_REWARD_CYCLES, // how many reward cycles of blocks to sync in a non-full inventory sync download_interval: BLOCK_DOWNLOAD_INTERVAL, // how often to scan for blocks to download @@ -1024,7 +1049,7 @@ impl ConnectionInbox

{ total_read += num_read; if num_read > 0 || total_read > 0 { - debug!("read {} bytes; {} total", num_read, total_read); + test_debug!("read {} bytes; {} total", num_read, total_read); } if num_read > 0 { @@ -1479,8 +1504,9 @@ pub type ReplyHandleP2P = NetworkReplyHandle; pub type ConnectionHttp = NetworkConnection; pub type ReplyHandleHttp = NetworkReplyHandle; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { + #![allow(unused)] use std::io::prelude::*; use std::io::{Read, Write}; use std::sync::{Arc, Mutex}; From 71bdd843f2dab1bea44284c652cc661de6ae2215 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:21:26 -0400 Subject: [PATCH 1093/1400] fix: if we're not in IBD and we dno't have a seed node connection (which can happen due to misconfiguration), then still occasionally attempt to walk to non-seed nodes (fixes #5159) --- stackslib/src/net/neighbors/mod.rs | 68 +++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 6447a6ec00..28355d0e1a 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -42,9 +42,9 @@ pub use db::{NeighborReplacements, NeighborWalkDB, PeerDBNeighborWalk}; pub use walk::{NeighborPingback, NeighborWalk, NeighborWalkResult}; /// How often we can contact other neighbors, at a minimim -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const NEIGHBOR_MINIMUM_CONTACT_INTERVAL: u64 = 0; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const NEIGHBOR_MINIMUM_CONTACT_INTERVAL: u64 = 600; /// Default number of seconds to wait for a reply from a neighbor @@ -79,33 +79,37 @@ pub const WALK_STATE_TIMEOUT: u64 = 60; /// Total number of seconds for which a particular walk can exist. It will be reset if it exceeds /// this age. -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const WALK_RESET_INTERVAL: u64 = 60; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const WALK_RESET_INTERVAL: u64 = 600; /// How often the node will consider pruning neighbors from its neighbor set. The node will prune /// neighbors from over-represented hosts and IP ranges in order to maintain connections to a /// diverse set of neighbors. -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const PRUNE_FREQUENCY: u64 = 0; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const PRUNE_FREQUENCY: u64 = 43200; /// Not all neighbors discovered will have an up-to-date chain tip. This value is the highest /// discrepancy between the local burnchain block height and the remote node's burnchain block /// height for which the neighbor will be considered as a worthwhile peer to remember. -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const MAX_NEIGHBOR_BLOCK_DELAY: u64 = 25; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const MAX_NEIGHBOR_BLOCK_DELAY: u64 = 288; /// How often to kick off neighbor walks. -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const NEIGHBOR_WALK_INTERVAL: u64 = 0; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const NEIGHBOR_WALK_INTERVAL: u64 = 120; // seconds +/// Probability that we begin an always-allowed peer walk if we're either in IBD or if we're not +/// connected to at least one always-allowed node +pub const WALK_SEED_PROBABILITY: f64 = 0.9; + impl PeerNetwork { /// Begin an outbound walk or a pingback walk, depending on whether or not we have pingback /// state. @@ -115,6 +119,10 @@ impl PeerNetwork { &self, ) -> Result, net_error> { if self.get_walk_pingbacks().len() == 0 { + debug!( + "{:?}: no walk pingbacks, so instantiate a normal neighbor walk", + self.get_local_peer() + ); // unconditionally do an outbound walk return NeighborWalk::instantiate_walk( self.get_neighbor_walk_db(), @@ -127,6 +135,10 @@ impl PeerNetwork { // If one fails, then try the other let do_outbound = thread_rng().gen::(); if do_outbound { + debug!( + "{:?}: instantiate a normal neighbor walk", + self.get_local_peer() + ); match NeighborWalk::instantiate_walk( self.get_neighbor_walk_db(), self.get_neighbor_comms(), @@ -148,6 +160,10 @@ impl PeerNetwork { } } } else { + debug!( + "{:?}: instantiate a pingback neighbor walk", + self.get_local_peer() + ); match NeighborWalk::instantiate_walk_from_pingback( self.get_neighbor_walk_db(), self.get_neighbor_comms(), @@ -216,9 +232,17 @@ impl PeerNetwork { .unwrap_or((0, 0)); // always ensure we're connected to always-allowed outbound peers other than ourselves - let walk_res = if ibd || (num_always_connected == 0 && total_always_connected > 0) { + let walk_seed = + thread_rng().gen::() < self.get_connection_opts().walk_seed_probability; + let walk_res = if ibd + || (num_always_connected == 0 && total_always_connected > 0 && walk_seed) + { // always connect to bootstrap peers if in IBD, or if we're not connected to an // always-allowed peer already + debug!("{:?}: Instantiate walk to always allowed", self.get_local_peer(); + "num_always_connected" => num_always_connected, + "total_always_connected" => total_always_connected, + "ibd" => ibd); NeighborWalk::instantiate_walk_to_always_allowed( self.get_neighbor_walk_db(), self.get_neighbor_comms(), @@ -226,12 +250,26 @@ impl PeerNetwork { ibd, ) } else if self.walk_attempts % (self.connection_opts.walk_inbound_ratio + 1) == 0 { - // not IBD. Time to try an inbound neighbor + // not IBD, or not walk_seed, or connected to an always-allowed peer, or no always-allowed. + // Time to try an inbound neighbor + debug!("{:?}: Instantiate walk to inbound neigbor", self.get_local_peer(); + "walk_attempts" => self.walk_attempts, + "walk_inbound_ratio" => self.connection_opts.walk_inbound_ratio, + "num_always_connected" => num_always_connected, + "total_always_connected" => total_always_connected, + "walk_seed" => walk_seed); + self.new_maybe_inbound_walk() } else { - // not IBD, and not time to try an inbound neighbor. + // no need to walk to an always-allowed peer, and not time to try an inbound neighbor. // Either do an outbound walk, or do a pingback walk. // If one fails, then try the other. + debug!("{:?}: Instantiate walk to either outbound or pingback neighbor", self.get_local_peer(); + "walk_attempts" => self.walk_attempts, + "walk_inbound_ratio" => self.connection_opts.walk_inbound_ratio, + "num_always_connected" => num_always_connected, + "total_always_connected" => total_always_connected, + "walk_seed" => walk_seed); self.new_outbound_or_pingback_walk() }; @@ -329,7 +367,7 @@ impl PeerNetwork { return true; } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn print_walk_diagnostics(&mut self) { let (mut inbound, mut outbound) = self.dump_peer_table(); @@ -359,7 +397,7 @@ impl PeerNetwork { debug!("{:?}: Walk finished ===================", &self.local_peer); } - #[cfg(not(test))] + #[cfg(not(any(test, feature = "testing")))] fn print_walk_diagnostics(&self) {} /// Update the state of our peer graph walk. From 6c56f71961c3b802da70581f2272db6b2a8ef771 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:22:27 -0400 Subject: [PATCH 1094/1400] fix: collate peers by public key and only report the one with the latest last-contact time. Don't query rows in `frontier` directly, unless it's for a specific `slot` (fixes #5169) --- stackslib/src/net/db.rs | 383 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 351 insertions(+), 32 deletions(-) diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 07f0bb5d74..ff6b5a9a05 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::{fmt, fs}; use clarity::vm::types::{ @@ -45,7 +45,7 @@ use crate::util_lib::db::{ }; use crate::util_lib::strings::UrlString; -pub const PEERDB_VERSION: &'static str = "2"; +pub const PEERDB_VERSION: &'static str = "3"; const NUM_SLOTS: usize = 8; @@ -394,13 +394,20 @@ const PEERDB_SCHEMA_2: &'static [&'static str] = &[ CREATE INDEX IF NOT EXISTS index_stackedb_peers_by_slot ON stackerdb_peers(peer_slot); "#, r#" - ALTER TABLE local_peer ADD COLUMN stacker_dbs TEXT + ALTER TABLE local_peer ADD COLUMN stacker_dbs TEXT; "#, r#" UPDATE db_config SET version = 2; "#, ]; +const PEERDB_SCHEMA_3: &'static [&'static str] = &[ + r#" + ALTER TABLE frontier ADD COLUMN public BOOL NOT NULL DEFAULT 0; + "#, + "UPDATE db_config SET version = 3;", +]; + #[derive(Debug)] pub struct PeerDB { pub conn: Connection, @@ -516,6 +523,18 @@ impl PeerDB { Ok(version) } + /// Tag each address in the peer DB as public if its address is not private. + /// Happens as part of the schema 3 migration + fn update_peerdb_public_addrs(tx: &Transaction) -> Result<(), db_error> { + let all_peers = Self::get_all_peers(tx)?; + for peer in all_peers { + let public = !peer.addr.addrbytes.is_in_private_range(); + debug!("Marking peer {:?} as public? {}", &peer, public); + Self::update_peer(tx, &peer)?; + } + Ok(()) + } + #[cfg_attr(test, mutants::skip)] fn apply_schema_2(tx: &Transaction) -> Result<(), db_error> { test_debug!("Apply schema 2 to peer DB"); @@ -525,6 +544,16 @@ impl PeerDB { Ok(()) } + #[cfg_attr(test, mutants::skip)] + fn apply_schema_3(tx: &Transaction) -> Result<(), db_error> { + test_debug!("Apply schema 3 to peer DB"); + for row_text in PEERDB_SCHEMA_3 { + tx.execute_batch(row_text).map_err(db_error::SqliteError)?; + } + Self::update_peerdb_public_addrs(tx)?; + Ok(()) + } + fn apply_schema_migrations(tx: &Transaction) -> Result { test_debug!("Apply any schema migrations"); let expected_version = PEERDB_VERSION.to_string(); @@ -537,6 +566,8 @@ impl PeerDB { } if version == "1" { PeerDB::apply_schema_2(tx)?; + } else if version == "2" { + PeerDB::apply_schema_3(tx)?; } else if version == expected_version { return Ok(ret.expect("unreachable")); } else { @@ -675,9 +706,12 @@ impl PeerDB { } } } else { - let tx = db.tx_begin()?; - PeerDB::apply_schema_migrations(&tx)?; - tx.commit()?; + let peerdb_version = PeerDB::get_schema_version(&db.conn)?; + if peerdb_version != PEERDB_VERSION { + let tx = db.tx_begin()?; + PeerDB::apply_schema_migrations(&tx)?; + tx.commit()?; + } db.update_local_peer( network_id, @@ -748,7 +782,7 @@ impl PeerDB { } /// Open a peer database in memory (used for testing) - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn connect_memory( network_id: u32, parent_network_id: u32, @@ -903,8 +937,38 @@ impl PeerDB { Ok(ret) } + /// Group a list of peers by public key, and return the one with the highest last-contact time + fn query_peers( + conn: &Connection, + qry: &str, + args: &[&dyn ToSql], + ) -> Result, db_error> { + let peers: Vec = query_rows(conn, qry, args)?; + let mut grouped_by_public_key: HashMap = HashMap::new(); + for peer in peers.into_iter() { + if let Some(cur_peer) = grouped_by_public_key.get_mut(&peer.public_key) { + if cur_peer.last_contact_time < peer.last_contact_time { + *cur_peer = peer; + } + } else { + grouped_by_public_key.insert(peer.public_key.clone(), peer); + } + } + Ok(grouped_by_public_key.into_values().collect()) + } + + /// Query a single peer. + /// If multiple rows are returned, then only the first-found row is reported. + fn query_peer( + conn: &Connection, + qry: &str, + args: &[&dyn ToSql], + ) -> Result, db_error> { + let mut peers = Self::query_peers(conn, qry, args)?; + Ok(peers.pop()) + } + /// Get a peer from the DB. - /// Panics if the peer was inserted twice -- this shouldn't happen. pub fn get_peer( conn: &DBConn, network_id: u32, @@ -913,7 +977,7 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; let args = params![network_id, peer_addr.to_bin(), peer_port,]; - query_row::(conn, qry, args) + Self::query_peer(conn, qry, args) } pub fn has_peer( @@ -930,7 +994,7 @@ impl PeerDB { } /// Get peer by port (used in tests where the IP address doesn't really matter) - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_peer_by_port( conn: &DBConn, network_id: u32, @@ -938,7 +1002,7 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND port = ?2"; let args = params![network_id, peer_port]; - query_row::(conn, &qry, args) + Self::query_peer(conn, qry, args) } /// Get a peer record at a particular slot @@ -949,6 +1013,8 @@ impl PeerDB { ) -> Result, db_error> { let qry = "SELECT * FROM frontier WHERE network_id = ?1 AND slot = ?2"; let args = params![network_id, slot]; + + // N.B. we don't use Self::query_peer() here because `slot` is the primary key query_row::(conn, &qry, args) } @@ -1012,15 +1078,24 @@ impl PeerDB { conn: &DBConn, network_id: u32, ) -> Result, db_error> { + let local_peer = Self::get_local_peer(conn)?; let sql = "SELECT * FROM frontier WHERE allowed < 0 AND network_id = ?1 ORDER BY RANDOM()"; - let allow_rows = query_rows::(conn, sql, &[&network_id])?; - Ok(allow_rows) + let allow_rows: Vec = Self::query_peers(conn, sql, params![&network_id])?; + Ok(allow_rows + .into_iter() + .filter(|neighbor| { + // omit local peer if it ever gets entered by mistake, since we can't talk to + // ourselves. + neighbor.public_key.to_bytes_compressed() + != StacksPublicKey::from_private(&local_peer.private_key).to_bytes_compressed() + }) + .collect()) } /// Get the bootstrap peers pub fn get_bootstrap_peers(conn: &DBConn, network_id: u32) -> Result, db_error> { let sql = "SELECT * FROM frontier WHERE initial = 1 AND network_id = ?1 ORDER BY RANDOM()"; - let allow_rows = query_rows::(conn, sql, &[&network_id])?; + let allow_rows = Self::query_peers(conn, sql, params![&network_id])?; Ok(allow_rows) } @@ -1070,10 +1145,11 @@ impl PeerDB { neighbor.out_degree, 0i64, slot, + !neighbor.addr.addrbytes.is_in_private_range() ]; - tx.execute("INSERT OR REPLACE INTO frontier (peer_version, network_id, addrbytes, port, public_key, expire_block_height, last_contact_time, asn, org, allowed, denied, in_degree, out_degree, initial, slot) \ - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)", neighbor_args) + tx.execute("INSERT OR REPLACE INTO frontier (peer_version, network_id, addrbytes, port, public_key, expire_block_height, last_contact_time, asn, org, allowed, denied, in_degree, out_degree, initial, slot, public) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16)", neighbor_args) .map_err(db_error::SqliteError)?; if let Some(old_peer) = old_peer_opt { @@ -1111,6 +1187,7 @@ impl PeerDB { } /// Is a peer one of this node's initial neighbors? + /// Only checks IP address. pub fn is_initial_peer( conn: &DBConn, network_id: u32, @@ -1119,7 +1196,7 @@ impl PeerDB { ) -> Result { let res: Option = query_row( conn, - "SELECT initial FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", + "SELECT initial FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3 ORDER BY last_contact_time DESC LIMIT 1", params![network_id, peer_addr.to_bin(), peer_port], )?; @@ -1129,7 +1206,8 @@ impl PeerDB { } } - /// Set a peer as an initial peer + /// Set a peer as an initial peer. + /// Does so for all rows with the given IP address. pub fn set_initial_peer( tx: &Transaction, network_id: u32, @@ -1258,13 +1336,14 @@ impl PeerDB { neighbor.denied, neighbor.in_degree, neighbor.out_degree, + !neighbor.addr.addrbytes.is_in_private_range(), neighbor.addr.network_id, to_bin(neighbor.addr.addrbytes.as_bytes()), neighbor.addr.port, ]; - tx.execute("UPDATE frontier SET peer_version = ?1, public_key = ?2, expire_block_height = ?3, last_contact_time = ?4, asn = ?5, org = ?6, allowed = ?7, denied = ?8, in_degree = ?9, out_degree = ?10 \ - WHERE network_id = ?11 AND addrbytes = ?12 AND port = ?13", args) + tx.execute("UPDATE frontier SET peer_version = ?1, public_key = ?2, expire_block_height = ?3, last_contact_time = ?4, asn = ?5, org = ?6, allowed = ?7, denied = ?8, in_degree = ?9, out_degree = ?10, public = ?11 \ + WHERE network_id = ?12 AND addrbytes = ?13 AND port = ?14", args) .map_err(db_error::SqliteError)?; if let Some(old_peer) = old_peer_opt { @@ -1586,7 +1665,8 @@ impl PeerDB { Ok(()) } - /// Get random neighbors, optionally always including allowed neighbors + /// Get random neighbors, optionally always including allowed neighbors. + /// Private IPs may be returned, if known. pub fn get_random_neighbors( conn: &DBConn, network_id: u32, @@ -1603,6 +1683,7 @@ impl PeerDB { count, block_height, always_include_allowed, + false, ) } @@ -1615,6 +1696,7 @@ impl PeerDB { count: u32, block_height: u64, always_include_allowed: bool, + public_only: bool, ) -> Result, db_error> { let mut ret = vec![]; @@ -1630,7 +1712,7 @@ impl PeerDB { u64_to_sql(now_secs)?, network_epoch, ]; - let mut allow_rows = query_rows::(conn, &allow_qry, allow_args)?; + let mut allow_rows = Self::query_peers(conn, &allow_qry, allow_args)?; if allow_rows.len() >= (count as usize) { // return a random subset @@ -1646,12 +1728,14 @@ impl PeerDB { } // fill in with non-allowed, randomly-chosen, fresh peers + let use_public = if public_only { "AND public = 1" } else { "" }; + let random_peers_qry = if always_include_allowed { - "SELECT * FROM frontier WHERE network_id = ?1 AND last_contact_time >= ?2 AND ?3 < expire_block_height AND denied < ?4 AND \ - (allowed >= 0 AND allowed <= ?5) AND (peer_version & 0x000000ff) >= ?6 ORDER BY RANDOM() LIMIT ?7" + format!("SELECT * FROM frontier WHERE network_id = ?1 AND last_contact_time >= ?2 AND ?3 < expire_block_height AND denied < ?4 AND \ + (allowed >= 0 AND allowed <= ?5) AND (peer_version & 0x000000ff) >= ?6 {use_public} ORDER BY RANDOM() LIMIT ?7") } else { - "SELECT * FROM frontier WHERE network_id = ?1 AND last_contact_time >= ?2 AND ?3 < expire_block_height AND denied < ?4 AND \ - (allowed < 0 OR (allowed >= 0 AND allowed <= ?5)) AND (peer_version & 0x000000ff) >= ?6 ORDER BY RANDOM() LIMIT ?7" + format!("SELECT * FROM frontier WHERE network_id = ?1 AND last_contact_time >= ?2 AND ?3 < expire_block_height AND denied < ?4 AND \ + (allowed < 0 OR (allowed >= 0 AND allowed <= ?5)) AND (peer_version & 0x000000ff) >= ?6 {use_public} ORDER BY RANDOM() LIMIT ?7") }; let random_peers_args = params![ @@ -1663,8 +1747,7 @@ impl PeerDB { network_epoch, (count - (ret.len() as u32)), ]; - let mut random_peers = - query_rows::(conn, &random_peers_qry, random_peers_args)?; + let mut random_peers = Self::query_peers(conn, &random_peers_qry, random_peers_args)?; ret.append(&mut random_peers); Ok(ret) @@ -1686,6 +1769,7 @@ impl PeerDB { /// Get a randomized set of peers for walking the peer graph. /// -- selects peers at random even if not allowed + /// -- may include private IPs #[cfg_attr(test, mutants::skip)] pub fn get_random_walk_neighbors( conn: &DBConn, @@ -1703,6 +1787,7 @@ impl PeerDB { count, block_height, false, + false, ) } @@ -1767,7 +1852,7 @@ impl PeerDB { pub fn get_all_peers(conn: &DBConn) -> Result, db_error> { let qry = "SELECT * FROM frontier ORDER BY addrbytes ASC, port ASC"; - let rows = query_rows::(conn, &qry, NO_PARAMS)?; + let rows = Self::query_peers(conn, &qry, NO_PARAMS)?; Ok(rows) } @@ -1784,7 +1869,7 @@ impl PeerDB { if max_count == 0 { return Ok(vec![]); } - let qry = "SELECT DISTINCT frontier.* FROM frontier JOIN stackerdb_peers ON stackerdb_peers.peer_slot = frontier.slot WHERE stackerdb_peers.smart_contract_id = ?1 AND frontier.network_id = ?2 AND frontier.last_contact_time >= ?3 ORDER BY RANDOM() LIMIT ?4"; + let qry = "SELECT frontier.* FROM frontier JOIN stackerdb_peers ON stackerdb_peers.peer_slot = frontier.slot WHERE stackerdb_peers.smart_contract_id = ?1 AND frontier.network_id = ?2 AND frontier.last_contact_time >= ?3 ORDER BY RANDOM() LIMIT ?4"; let max_count_u32 = u32::try_from(max_count).unwrap_or(u32::MAX); let args = params![ smart_contract.to_string(), @@ -1792,11 +1877,11 @@ impl PeerDB { u64_to_sql(min_age)?, max_count_u32, ]; - query_rows(conn, qry, args) + Self::query_peers(conn, qry, args) } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use clarity::vm::types::{StacksAddressExtensions, StandardPrincipalData}; use stacks_common::types::chainstate::StacksAddress; @@ -1806,6 +1891,21 @@ mod test { use super::*; use crate::net::{Neighbor, NeighborKey}; + impl PeerDB { + /// test the `public` flag + pub fn is_public( + conn: &DBConn, + network_id: u32, + peer_addr: &PeerAddress, + peer_port: u16, + ) -> Result { + let qry = "SELECT public FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3"; + let args = params![network_id, peer_addr.to_bin(), peer_port,]; + let public: bool = query_row(conn, qry, args)?.ok_or(db_error::NotFoundError)?; + Ok(public) + } + } + /// Test storage, retrieval, and mutation of LocalPeer, including its stacker DB contract IDs #[test] fn test_local_peer() { @@ -3543,4 +3643,223 @@ mod test { ) .unwrap(); } + + /// Test `public` setting in DB migration + #[test] + fn test_db_schema_3_public_ip_migration() { + let key = Secp256k1PrivateKey::new(); + + let path = "/tmp/test-peerdb-schema-3-public-ip-migration.db".to_string(); + if fs::metadata(&path).is_ok() { + fs::remove_file(&path).unwrap(); + } + let mut db = PeerDB::connect( + &path, + true, + 0x80000000, + 0, + Some(key.clone()), + i64::MAX as u64, + PeerAddress::from_ipv4(127, 0, 0, 1), + 12345, + UrlString::try_from("http://foo.com").unwrap(), + &vec![], + None, + &[], + ) + .unwrap(); + + let private_addrbytes = vec![ + PeerAddress::from_ipv4(127, 0, 0, 1), + PeerAddress::from_ipv4(192, 168, 0, 1), + PeerAddress::from_ipv4(172, 16, 0, 1), + PeerAddress::from_ipv4(10, 0, 0, 1), + PeerAddress([ + 0xfc, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, + 0x0d, 0x0e, + ]), + PeerAddress([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, + ]), + ]; + + let public_addrbytes = vec![ + PeerAddress::from_ipv4(1, 2, 3, 4), + PeerAddress([ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, + 0xff, 0x00, + ]), + ]; + + let mut neighbor = Neighbor { + addr: NeighborKey { + peer_version: 0x12345678, + network_id: 0x9abcdef0, + addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + port: 12345, + }, + public_key: Secp256k1PublicKey::from_hex( + "02fa66b66f8971a8cd4d20ffded09674e030f0f33883f337f34b95ad4935bac0e3", + ) + .unwrap(), + expire_block: 23456, + last_contact_time: 1552509642, + allowed: -1, + denied: -1, + asn: 34567, + org: 45678, + in_degree: 1, + out_degree: 1, + }; + + // force public and see if it gets reverted + let tx = db.tx_begin().unwrap(); + + for private in private_addrbytes.iter() { + neighbor.addr.addrbytes = private.clone(); + neighbor.public_key = Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()); + assert!(PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap()); + } + for public in public_addrbytes.iter() { + neighbor.addr.addrbytes = public.clone(); + neighbor.public_key = Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()); + assert!(PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap()); + } + tx.execute("UPDATE frontier SET public = 1", params![]) + .unwrap(); + tx.commit().unwrap(); + + // make sure they're all listed as public (even if erroneously) + for private in private_addrbytes.iter() { + assert!(PeerDB::is_public( + &db.conn, + neighbor.addr.network_id, + private, + neighbor.addr.port + ) + .unwrap()); + } + for public in public_addrbytes.iter() { + assert!(PeerDB::is_public( + &db.conn, + neighbor.addr.network_id, + public, + neighbor.addr.port + ) + .unwrap()); + } + + let tx = db.tx_begin().unwrap(); + PeerDB::update_peerdb_public_addrs(&tx).unwrap(); + + // fixed + for private in private_addrbytes.iter() { + assert!( + !PeerDB::is_public(&tx, neighbor.addr.network_id, private, neighbor.addr.port) + .unwrap() + ); + } + for public in public_addrbytes.iter() { + assert!( + PeerDB::is_public(&tx, neighbor.addr.network_id, public, neighbor.addr.port) + .unwrap() + ); + } + + // now do the opposite + tx.execute("UPDATE frontier SET public = 0", params![]) + .unwrap(); + tx.commit().unwrap(); + + let tx = db.tx_begin().unwrap(); + PeerDB::update_peerdb_public_addrs(&tx).unwrap(); + + // fixed + for private in private_addrbytes.iter() { + assert!( + !PeerDB::is_public(&tx, neighbor.addr.network_id, private, neighbor.addr.port) + .unwrap() + ); + } + for public in public_addrbytes.iter() { + assert!( + PeerDB::is_public(&tx, neighbor.addr.network_id, public, neighbor.addr.port) + .unwrap() + ); + } + tx.commit().unwrap(); + } + + /// Verify that multiple peers with the same public key are coalesced by last-contact-time + #[test] + fn test_query_peers() { + let key = Secp256k1PrivateKey::new(); + + let path = "/tmp/test-query-peers.db".to_string(); + if fs::metadata(&path).is_ok() { + fs::remove_file(&path).unwrap(); + } + let mut db = PeerDB::connect( + &path, + true, + 0x80000000, + 0, + Some(key.clone()), + i64::MAX as u64, + PeerAddress::from_ipv4(127, 0, 0, 1), + 12345, + UrlString::try_from("http://foo.com").unwrap(), + &vec![], + None, + &[], + ) + .unwrap(); + + let mut neighbor = Neighbor { + addr: NeighborKey { + peer_version: 0x12345678, + network_id: 0x9abcdef0, + addrbytes: PeerAddress::from_ipv4(127, 0, 0, 1), + port: 12345, + }, + public_key: Secp256k1PublicKey::from_hex( + "02fa66b66f8971a8cd4d20ffded09674e030f0f33883f337f34b95ad4935bac0e3", + ) + .unwrap(), + expire_block: 23456, + last_contact_time: 1552509642, + allowed: -1, + denied: -1, + asn: 34567, + org: 45678, + in_degree: 1, + out_degree: 1, + }; + + let tx = db.tx_begin().unwrap(); + for i in 0..10 { + neighbor.addr.port = (i + 1024) as u16; + neighbor.last_contact_time = (i + 1552509642) as u64; + assert!(PeerDB::try_insert_peer(&tx, &neighbor, &[]).unwrap()); + } + tx.commit().unwrap(); + + // only one peer returned, and it's the one with the highest last-contact time + let mut peers = PeerDB::query_peers( + &db.conn, + "SELECT * FROM frontier WHERE network_id = ?1 AND addrbytes = ?2 AND port = ?3", + params![ + &neighbor.addr.network_id, + &to_bin(neighbor.addr.addrbytes.as_bytes()), + &neighbor.addr.port + ], + ) + .unwrap(); + assert_eq!(peers.len(), 1); + + let peer = peers.pop().unwrap(); + assert_eq!(peer.addr.port, 1033); + assert_eq!(peer.last_contact_time, 1552509651); + } } From 43d1ba9044ceb804e7a1be9efdccb54838dd8fc5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:23:09 -0400 Subject: [PATCH 1095/1400] feat: log all p2p conversations every `log_neighbor_freq` milliseconds --- stackslib/src/net/p2p.rs | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index f0693c10a0..3796a6c5f2 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -348,6 +348,9 @@ pub struct PeerNetwork { pub walk_pingbacks: HashMap, // inbound peers for us to try to ping back and add to our frontier, mapped to (peer_version, network_id, timeout, pubkey) pub walk_result: NeighborWalkResult, // last successful neighbor walk result + /// last time we logged neigbhors + last_neighbor_log: u128, + /// Epoch 2.x inventory state pub inv_state: Option, /// Epoch 3.x inventory state @@ -537,6 +540,8 @@ impl PeerNetwork { walk_pingbacks: HashMap::new(), walk_result: NeighborWalkResult::new(), + last_neighbor_log: 0, + inv_state: None, inv_state_nakamoto: None, pox_id: PoxId::initial(), @@ -5017,6 +5022,33 @@ impl PeerNetwork { false } + /// Log our neighbors. + /// Used for testing and debuggin + fn log_neighbors(&mut self) { + if self.get_connection_opts().log_neighbors_freq == 0 { + return; + } + + let now = get_epoch_time_ms(); + if self.last_neighbor_log + u128::from(self.get_connection_opts().log_neighbors_freq) >= now + { + return; + } + + let convo_strs: Vec<_> = self + .peers + .values() + .map(|convo| format!("{:?}", &convo)) + .collect(); + + debug!( + "{:?}: current neighbors are {:?}", + self.get_local_peer(), + &convo_strs + ); + self.last_neighbor_log = now; + } + /// Top-level main-loop circuit to take. /// -- polls the peer network and http network server sockets to get new sockets and detect ready sockets /// -- carries out network conversations @@ -5130,12 +5162,13 @@ impl PeerNetwork { p2p_poll_state, ); + self.log_neighbors(); debug!("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< End Network Dispatch <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"); Ok(network_result) } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use std::{thread, time}; From 32b57260649d1f5903322bff758f7727ce84f719 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:23:59 -0400 Subject: [PATCH 1096/1400] fix: when discovering a new neighbor, don't replace its inbound peer address with its outbound address if both addresses are private. Also, log more walk instantiation data --- stackslib/src/net/neighbors/walk.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index e1207941e0..d4f1cd089b 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -359,8 +359,8 @@ impl NeighborWalk { // pick a random search index let mut idx = thread_rng().gen::() % event_ids.len(); - test_debug!( - "{:?}: try inbound neighbors -- sample out of {}. idx = {}", + debug!( + "{:?}: instantiate inbound walk: try inbound neighbors -- sample out of {}. idx = {}", network.get_local_peer(), network.get_num_p2p_convos(), idx @@ -410,6 +410,10 @@ impl NeighborWalk { } // no inbound peers + debug!( + "{:?}: no inbound peers to talk to", + network.get_local_peer() + ); return Err(net_error::NoSuchNeighbor); } @@ -426,13 +430,14 @@ impl NeighborWalk { network: &PeerNetwork, ) -> Result, net_error> { if network.get_walk_pingbacks().len() == 0 { + debug!("{:?}: no walk pingbacks", network.get_local_peer()); return Err(net_error::NoSuchNeighbor); } // random search let idx = thread_rng().gen::() % network.get_walk_pingbacks().len(); - test_debug!( + debug!( "{:?}: try pingback candidates -- sample out of {}. idx = {}", network.get_local_peer(), network.get_walk_pingbacks().len(), @@ -490,7 +495,7 @@ impl NeighborWalk { next_neighbor: Neighbor, next_neighbor_outbound: bool, ) -> NeighborWalkResult { - test_debug!( + debug!( "{:?}: Walk reset to {} neighbor {:?}", local_peer, if self.next_walk_outbound { @@ -686,9 +691,10 @@ impl NeighborWalk { // if the neighbor accidentally gave us a private IP address, then // just use the one we used to contact it. This can happen if the // node is behind a load-balancer, or is doing port-forwarding, - // etc. - if neighbor_from_handshake.addr.addrbytes.is_in_private_range() - || neighbor_from_handshake.addr.addrbytes.is_anynet() + // etc. But do nothing if both cur_neighbor and its reported address are private. + if (neighbor_from_handshake.addr.addrbytes.is_in_private_range() + || neighbor_from_handshake.addr.addrbytes.is_anynet()) + && !self.cur_neighbor.addr.addrbytes.is_in_private_range() { debug!( "{}: outbound neighbor gave private IP address {:?}; assuming it meant {:?}", From 88c9a5023150ddb578054b4ea654cd6061a9ccbb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:24:43 -0400 Subject: [PATCH 1097/1400] fix: don't forward stackerdb chunks that are known to be locally stale --- stackslib/src/net/relay.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 123f78f422..4537ddff6f 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2374,6 +2374,7 @@ impl Relayer { /// this far at all means that they were novel, and thus potentially novel to our neighbors). pub fn process_uploaded_stackerdb_chunks( &mut self, + rc_consensus_hash: &ConsensusHash, uploaded_chunks: Vec, event_observer: Option<&dyn StackerDBEventDispatcher>, ) { @@ -2381,12 +2382,25 @@ impl Relayer { let mut all_events: HashMap> = HashMap::new(); for chunk in uploaded_chunks.into_iter() { - debug!("Got uploaded StackerDB chunk"; "stackerdb_contract_id" => &format!("{}", &chunk.contract_id), "slot_id" => chunk.chunk_data.slot_id, "slot_version" => chunk.chunk_data.slot_version); if let Some(events) = all_events.get_mut(&chunk.contract_id) { events.push(chunk.chunk_data.clone()); } else { all_events.insert(chunk.contract_id.clone(), vec![chunk.chunk_data.clone()]); } + + // forward if not stale + if chunk.rc_consensus_hash != *rc_consensus_hash { + debug!("Drop stale uploaded StackerDB chunk"; + "stackerdb_contract_id" => &format!("{}", &chunk.contract_id), + "slot_id" => chunk.chunk_data.slot_id, + "slot_version" => chunk.chunk_data.slot_version, + "chunk.rc_consensus_hash" => %chunk.rc_consensus_hash, + "network.rc_consensus_hash" => %rc_consensus_hash); + continue; + } + + debug!("Got uploaded StackerDB chunk"; "stackerdb_contract_id" => &format!("{}", &chunk.contract_id), "slot_id" => chunk.chunk_data.slot_id, "slot_version" => chunk.chunk_data.slot_version); + let msg = StacksMessageType::StackerDBPushChunk(chunk); if let Err(e) = self.p2p.broadcast_message(vec![], msg) { warn!("Failed to broadcast Nakamoto blocks: {:?}", &e); @@ -2918,6 +2932,7 @@ impl Relayer { // push events for HTTP-uploaded stacker DB chunks self.process_uploaded_stackerdb_chunks( + &network_result.rc_consensus_hash, mem::replace(&mut network_result.uploaded_stackerdb_chunks, vec![]), event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), ); From f9b94dc21404aff61f0157315b15927863fb1f52 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:25:26 -0400 Subject: [PATCH 1098/1400] chore: log the neighbor address which sent the chunk --- stackslib/src/net/stackerdb/mod.rs | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index b022746d6a..e971d9ebfc 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -111,7 +111,7 @@ /// state periodically (whereas Gaia stores data for as long as the back-end storage provider's SLA /// indicates). -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; pub mod config; @@ -386,6 +386,8 @@ pub enum StackerDBSyncState { pub struct StackerDBSync { /// what state are we in? state: StackerDBSyncState, + /// What was the rc consensus hash at the start of sync? + pub rc_consensus_hash: Option, /// which contract this is a replica for pub smart_contract_id: QualifiedContractIdentifier, /// number of chunks in this DB @@ -507,6 +509,7 @@ impl PeerNetwork { /// Runs in response to a received StackerDBGetChunksInv or a StackerDBPushChunk pub fn make_StackerDBChunksInv_or_Nack( &self, + naddr: NeighborAddress, chainstate: &mut StacksChainState, contract_id: &QualifiedContractIdentifier, rc_consensus_hash: &ConsensusHash, @@ -537,10 +540,10 @@ impl PeerNetwork { &tip_block_id, &rc_consensus_hash, ) { - debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (remote is stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); + debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk from {} since {} != {} (remote is stale)", self.get_local_peer(), &naddr, &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); return StacksMessageType::Nack(NackData::new(NackErrorCodes::StaleView)); } else { - debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk since {} != {} (local is potentially stale)", self.get_local_peer(), &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); + debug!("{:?}: NACK StackerDBGetChunksInv / StackerDBPushChunk from {} since {} != {} (local is potentially stale)", self.get_local_peer(), &naddr, &self.get_chain_view().rc_consensus_hash, rc_consensus_hash); return StacksMessageType::Nack(NackData::new(NackErrorCodes::FutureView)); } } @@ -655,7 +658,19 @@ impl PeerNetwork { chunk_data: &StackerDBPushChunkData, send_reply: bool, ) -> Result<(bool, bool), net_error> { + let Some(naddr) = self + .get_p2p_convo(event_id) + .map(|convo| convo.to_neighbor_address()) + else { + debug!( + "Drop unsolicited StackerDBPushChunk: event ID {} is not connected", + event_id + ); + return Ok((false, false)); + }; + let mut payload = self.make_StackerDBChunksInv_or_Nack( + naddr, chainstate, &chunk_data.contract_id, &chunk_data.rc_consensus_hash, From 39265dd1ee634b9764bc89760a9da9f516d5e2b8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:26:01 -0400 Subject: [PATCH 1099/1400] fix: check local rc_consensus_hash against rc_consensus_hash of a scheduled message we're about to send, and abort stackerdb sync if they differ (indicates that the p2p network advanced its stackerdb state, and this sync is acting on stale data). Also, log the local peer _and_ contract ID in debug messages --- stackslib/src/net/stackerdb/sync.rs | 221 ++++++++++++++++++++-------- 1 file changed, 158 insertions(+), 63 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index fa94c5be55..467bc608e1 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -50,6 +50,7 @@ impl StackerDBSync { ) -> StackerDBSync { let mut dbsync = StackerDBSync { state: StackerDBSyncState::ConnectBegin, + rc_consensus_hash: None, smart_contract_id: smart_contract, num_slots: config.num_slots() as usize, write_freq: config.write_freq, @@ -90,6 +91,9 @@ impl StackerDBSync { let mut found = HashSet::new(); let mut min_age = get_epoch_time_secs().saturating_sub(network.get_connection_opts().max_neighbor_age); + + let local_naddr = network.get_local_peer().to_neighbor_addr(); + while found.len() < self.max_neighbors { let peers_iter = PeerDB::find_stacker_db_replicas( network.peerdb_conn(), @@ -109,6 +113,10 @@ impl StackerDBSync { if naddr.addrbytes.is_anynet() { return false; } + if naddr.public_key_hash == local_naddr.public_key_hash { + // don't talk to us by another address + return false; + } if !network.get_connection_opts().private_neighbors && naddr.addrbytes.is_in_private_range() { @@ -169,7 +177,10 @@ impl StackerDBSync { network: Option<&PeerNetwork>, config: &StackerDBConfig, ) -> StackerDBSyncResult { - debug!("Reset {} with config {:?}", &self.smart_contract_id, config); + debug!( + "{}: Reset with config {:?}", + &self.smart_contract_id, config + ); let mut chunks = vec![]; let downloaded_chunks = mem::replace(&mut self.downloaded_chunks, HashMap::new()); for (_, mut data) in downloaded_chunks.into_iter() { @@ -220,6 +231,7 @@ impl StackerDBSync { self.num_connections = 0; self.num_attempted_connections = 0; self.rounds += 1; + self.rc_consensus_hash = None; result } @@ -258,7 +270,7 @@ impl StackerDBSync { .get_slot_write_timestamps(&self.smart_contract_id)?; if local_slot_versions.len() != local_write_timestamps.len() { - let msg = format!("Local slot versions ({}) out of sync with DB slot versions ({}) for {}; abandoning sync and trying again", local_slot_versions.len(), local_write_timestamps.len(), &self.smart_contract_id); + let msg = format!("{}: Local slot versions ({}) out of sync with DB slot versions ({}); abandoning sync and trying again", &self.smart_contract_id, local_slot_versions.len(), local_write_timestamps.len()); warn!("{}", &msg); return Err(net_error::Transient(msg)); } @@ -270,10 +282,11 @@ impl StackerDBSync { // who has data we need? for (i, local_version) in local_slot_versions.iter().enumerate() { let write_ts = local_write_timestamps[i]; - if write_ts + self.write_freq > now { + if self.write_freq > 0 && write_ts + self.write_freq > now { debug!( - "{:?}: Chunk {} was written too frequently ({} + {} >= {}) in {}, so will not fetch chunk", + "{:?}: {}: Chunk {} was written too frequently ({} + {} > {}) in {}, so will not fetch chunk", network.get_local_peer(), + &self.smart_contract_id, i, write_ts, self.write_freq, @@ -346,10 +359,10 @@ impl StackerDBSync { schedule.reverse(); debug!( - "{:?}: Will request up to {} chunks for {}. Schedule: {:?}", + "{:?}: {}: Will request up to {} chunks. Schedule: {:?}", network.get_local_peer(), - &schedule.len(), &self.smart_contract_id, + &schedule.len(), &schedule ); Ok(schedule) @@ -415,7 +428,7 @@ impl StackerDBSync { }; debug!( - "{:?}: Can push chunk StackerDBChunk(db={},id={},ver={}) to {}. Replicate? {}", + "{:?}: {}: Can push chunk StackerDBChunk(id={},ver={}) to {}. Replicate? {}", &network.get_local_peer(), &self.smart_contract_id, our_chunk.chunk_data.slot_id, @@ -448,10 +461,10 @@ impl StackerDBSync { schedule.sort_by(|item_1, item_2| item_1.1.len().cmp(&item_2.1.len())); debug!( - "{:?}: Will push up to {} chunks for {}", + "{:?}: {}: Will push up to {} chunks", network.get_local_peer(), - &schedule.len(), &self.smart_contract_id, + &schedule.len(), ); Ok(schedule) } @@ -524,13 +537,13 @@ impl StackerDBSync { if *old_version < new_inv.slot_versions[old_slot_id] { // remote peer indicated that it has a newer version of this chunk. debug!( - "{:?}: peer {:?} has a newer version of slot {} ({} < {}) in {}", + "{:?}: {}: peer {:?} has a newer version of slot {} ({} < {})", _network.get_local_peer(), + &self.smart_contract_id, &naddr, old_slot_id, old_version, new_inv.slot_versions[old_slot_id], - &self.smart_contract_id, ); resync = true; break; @@ -614,11 +627,12 @@ impl StackerDBSync { } for (naddr, chunks_req) in to_send.into_iter() { - debug!("{:?}: send_getchunksinv_to_inbound_neighbors: Send StackerDBGetChunkInv to inbound {:?}", network.get_local_peer(), &naddr); + debug!("{:?}: {}: send_getchunksinv_to_inbound_neighbors: Send StackerDBGetChunkInv at {} to inbound {:?}", network.get_local_peer(), &self.smart_contract_id, &network.get_chain_view().rc_consensus_hash, &naddr); if let Err(_e) = self.comms.neighbor_send(network, &naddr, chunks_req) { info!( - "{:?}: Failed to send StackerDBGetChunkInv to inbound {:?}: {:?}", + "{:?}: {}: Failed to send StackerDBGetChunkInv to inbound {:?}: {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr, &_e ); @@ -639,10 +653,12 @@ impl StackerDBSync { self.replicas = replicas; } debug!( - "{:?}: connect_begin: establish StackerDB sessions to {} neighbors (out of {} p2p peers)", + "{:?}: {}: connect_begin: establish StackerDB sessions to {} neighbors (out of {} p2p peers)", network.get_local_peer(), + &self.smart_contract_id, self.replicas.len(), - network.get_num_p2p_convos() + network.get_num_p2p_convos(); + "replicas" => ?self.replicas ); if self.replicas.len() == 0 { // nothing to do @@ -653,8 +669,9 @@ impl StackerDBSync { for naddr in naddrs.into_iter() { if self.comms.is_neighbor_connecting(network, &naddr) { debug!( - "{:?}: connect_begin: already connecting to StackerDB peer {:?}", + "{:?}: {}: connect_begin: already connecting to StackerDB peer {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); self.replicas.insert(naddr); @@ -662,8 +679,9 @@ impl StackerDBSync { } if self.comms.has_neighbor_session(network, &naddr) { debug!( - "{:?}: connect_begin: already connected to StackerDB peer {:?}", + "{:?}: {}: connect_begin: already connected to StackerDB peer {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); self.connected_replicas.insert(naddr); @@ -671,16 +689,18 @@ impl StackerDBSync { } debug!( - "{:?}: connect_begin: Send Handshake to StackerDB peer {:?}", + "{:?}: {}: connect_begin: Send Handshake to StackerDB peer {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); match self.comms.neighbor_session_begin(network, &naddr) { Ok(true) => { // connected! debug!( - "{:?}: connect_begin: connected to StackerDB peer {:?}", + "{:?}: {}: connect_begin: connected to StackerDB peer {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); self.num_attempted_connections += 1; @@ -692,7 +712,13 @@ impl StackerDBSync { self.num_attempted_connections += 1; } Err(_e) => { - debug!("Failed to begin session with {:?}: {:?}", &naddr, &_e); + debug!( + "{:?}: {}: Failed to begin session with {:?}: {:?}", + &network.get_local_peer(), + &self.smart_contract_id, + &naddr, + &_e + ); } } } @@ -710,8 +736,9 @@ impl StackerDBSync { if network.get_chain_view().rc_consensus_hash != db_data.rc_consensus_hash { // stale or inconsistent view. Do not proceed debug!( - "{:?}: remote peer {:?} has stale view ({} != {})", + "{:?}: {}: remote peer {:?} has stale view ({} != {})", network.get_local_peer(), + &self.smart_contract_id, &naddr, &network.get_chain_view().rc_consensus_hash, &db_data.rc_consensus_hash @@ -723,8 +750,9 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBHandshake with code {}", + "{:?}: {}: remote peer {:?} NACK'ed our StackerDBHandshake with code {}", &network.get_local_peer(), + &self.smart_contract_id, &naddr, data.error_code ); @@ -737,7 +765,12 @@ impl StackerDBSync { continue; } x => { - info!("Received unexpected message {:?}", &x); + info!( + "{:?}: {}: Received unexpected message {:?}", + &network.get_local_peer(), + &self.smart_contract_id, + &x + ); continue; } }; @@ -749,7 +782,7 @@ impl StackerDBSync { .is_none() { debug!( - "{:?}: remote peer does not replicate {}", + "{:?}: {}: remote peer does not replicate", network.get_local_peer(), &self.smart_contract_id ); @@ -760,8 +793,9 @@ impl StackerDBSync { } debug!( - "{:?}: connect_try_finish: Received StackerDBHandshakeAccept from {:?} for {:?}", + "{:?}: {}: connect_try_finish: Received StackerDBHandshakeAccept from {:?} for {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr, &data ); @@ -778,7 +812,8 @@ impl StackerDBSync { if self.connected_replicas.len() == 0 { // no one to talk to debug!( - "{:?}: connect_try_finish: no valid replicas", + "{:?}: {}: connect_try_finish: no valid replicas", + &self.smart_contract_id, network.get_local_peer() ); return Err(net_error::PeerNotConnected); @@ -796,21 +831,26 @@ impl StackerDBSync { let naddrs = mem::replace(&mut self.connected_replicas, HashSet::new()); let mut already_sent = vec![]; debug!( - "{:?}: getchunksinv_begin: Send StackerDBGetChunksInv to {} replicas", + "{:?}: {}: getchunksinv_begin: Send StackerDBGetChunksInv to {} replicas", network.get_local_peer(), - naddrs.len() + &self.smart_contract_id, + naddrs.len(); + "connected_replicas" => ?naddrs, ); for naddr in naddrs.into_iter() { debug!( - "{:?}: getchunksinv_begin: Send StackerDBGetChunksInv to {:?}", + "{:?}: {}: getchunksinv_begin: Send StackerDBGetChunksInv at {} to {:?}", network.get_local_peer(), - &naddr + &self.smart_contract_id, + &network.get_chain_view().rc_consensus_hash, + &naddr, ); let chunks_req = self.make_getchunkinv(&network.get_chain_view().rc_consensus_hash); if let Err(e) = self.comms.neighbor_send(network, &naddr, chunks_req) { debug!( - "{:?}: failed to send StackerDBGetChunkInv to {:?}: {:?}", + "{:?}: {}: failed to send StackerDBGetChunkInv to {:?}: {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr, &e ); @@ -833,7 +873,7 @@ impl StackerDBSync { let chunk_inv_opt = match message.payload { StacksMessageType::StackerDBChunkInv(data) => { if data.slot_versions.len() != self.num_slots { - info!("{:?}: Received malformed StackerDBChunkInv for {} from {:?}: expected {} chunks, got {}", network.get_local_peer(), &self.smart_contract_id, &naddr, self.num_slots, data.slot_versions.len()); + info!("{:?}: {}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &self.smart_contract_id, &naddr, self.num_slots, data.slot_versions.len()); None } else { Some(data) @@ -841,10 +881,10 @@ impl StackerDBSync { } StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunksInv us (on {}) with code {}", - &network.get_local_peer(), - &naddr, + "{:?}: {}: remote peer {:?} NACK'ed our StackerDBGetChunksInv with code {}", + network.get_local_peer(), &self.smart_contract_id, + &naddr, data.error_code ); self.connected_replicas.remove(&naddr); @@ -856,14 +896,20 @@ impl StackerDBSync { continue; } x => { - info!("Received unexpected message {:?}", &x); + info!( + "{:?}: {}: Received unexpected message {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &x + ); self.connected_replicas.remove(&naddr); continue; } }; debug!( - "{:?}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}: {:?}", + "{:?}: {}: getchunksinv_try_finish: Received StackerDBChunkInv from {:?}: {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr, &chunk_inv_opt ); @@ -893,15 +939,22 @@ impl StackerDBSync { pub fn getchunks_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.chunk_fetch_priorities.len() == 0 { // done + debug!( + "{:?}: {}: getchunks_begin: no chunks prioritized", + network.get_local_peer(), + &self.smart_contract_id + ); return Ok(true); } let mut cur_priority = self.next_chunk_fetch_priority % self.chunk_fetch_priorities.len(); debug!( - "{:?}: getchunks_begin: Issue up to {} StackerDBGetChunk requests", + "{:?}: {}: getchunks_begin: Issue up to {} StackerDBGetChunk requests", &network.get_local_peer(), - self.request_capacity + &self.smart_contract_id, + self.request_capacity; + "chunk_fetch_priorities" => ?self.chunk_fetch_priorities, ); let mut requested = 0; @@ -926,11 +979,12 @@ impl StackerDBSync { }; debug!( - "{:?}: getchunks_begin: Send StackerDBGetChunk(db={},id={},ver={}) to {}", + "{:?}: {}: getchunks_begin: Send StackerDBGetChunk(id={},ver={}) at {} to {}", &network.get_local_peer(), &self.smart_contract_id, chunk_request.slot_id, chunk_request.slot_version, + &chunk_request.rc_consensus_hash, &selected_neighbor ); @@ -940,10 +994,10 @@ impl StackerDBSync { StacksMessageType::StackerDBGetChunk(chunk_request.clone()), ) { info!( - "{:?}: Failed to request chunk {} of {} from {:?}: {:?}", + "{:?}: {} Failed to request chunk {} from {:?}: {:?}", network.get_local_peer(), - chunk_request.slot_id, &self.smart_contract_id, + chunk_request.slot_id, &selected_neighbor, &e ); @@ -981,10 +1035,10 @@ impl StackerDBSync { StacksMessageType::StackerDBChunk(data) => data, StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBGetChunk (on {}) with code {}", + "{:?}: {}: remote peer {:?} NACK'ed our StackerDBGetChunk with code {}", network.get_local_peer(), - &naddr, &self.smart_contract_id, + &naddr, data.error_code ); if data.error_code == NackErrorCodes::StaleView @@ -998,7 +1052,12 @@ impl StackerDBSync { continue; } x => { - info!("Received unexpected message {:?}", &x); + info!( + "{:?}: {}: Received unexpected message {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &x + ); self.connected_replicas.remove(&naddr); continue; } @@ -1007,8 +1066,11 @@ impl StackerDBSync { // validate if !self.validate_downloaded_chunk(network, config, &data)? { info!( - "Remote neighbor {:?} served an invalid chunk for ID {}", - &naddr, data.slot_id + "{:?}: {}: Remote neighbor {:?} served an invalid chunk for ID {}", + network.get_local_peer(), + &self.smart_contract_id, + &naddr, + data.slot_id ); self.connected_replicas.remove(&naddr); continue; @@ -1016,8 +1078,9 @@ impl StackerDBSync { // update bookkeeping debug!( - "{:?}: getchunks_try_finish: Received StackerDBChunk from {:?}", + "{:?}: {}, getchunks_try_finish: Received StackerDBChunk from {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); self.add_downloaded_chunk(naddr, data); @@ -1038,15 +1101,22 @@ impl StackerDBSync { } if self.chunk_push_priorities.len() == 0 { // done + debug!( + "{:?}:{}: pushchunks_begin: no chunks prioritized", + network.get_local_peer(), + &self.smart_contract_id + ); return Ok(true); } let mut cur_priority = self.next_chunk_push_priority % self.chunk_push_priorities.len(); debug!( - "{:?}: pushchunks_begin: Send up to {} StackerDBChunk pushes", + "{:?}: {}: pushchunks_begin: Send up to {} StackerDBChunk pushes", &network.get_local_peer(), - self.chunk_push_priorities.len() + &self.smart_contract_id, + self.chunk_push_priorities.len(); + "chunk_push_priorities" => ?self.chunk_push_priorities ); // fill up our comms with $capacity requests @@ -1063,7 +1133,7 @@ impl StackerDBSync { .map(|neighbor| (0, neighbor)); let Some((idx, selected_neighbor)) = selected_neighbor_opt else { - debug!("{:?}: pushchunks_begin: no available neighbor to send StackerDBChunk(db={},id={},ver={}) to", + debug!("{:?}: {}: pushchunks_begin: no available neighbor to send StackerDBChunk(id={},ver={}) to", &network.get_local_peer(), &self.smart_contract_id, chunk_push.chunk_data.slot_id, @@ -1073,11 +1143,12 @@ impl StackerDBSync { }; debug!( - "{:?}: pushchunks_begin: Send StackerDBChunk(db={},id={},ver={}) to {}", + "{:?}: {}: pushchunks_begin: Send StackerDBChunk(id={},ver={}) at {} to {}", &network.get_local_peer(), &self.smart_contract_id, chunk_push.chunk_data.slot_id, chunk_push.chunk_data.slot_version, + &chunk_push.rc_consensus_hash, &selected_neighbor ); @@ -1089,10 +1160,10 @@ impl StackerDBSync { StacksMessageType::StackerDBPushChunk(chunk_push), ) { info!( - "{:?}: Failed to send chunk {} of {} from {:?}: {:?}", + "{:?}: {}: Failed to send chunk {} from {:?}: {:?}", network.get_local_peer(), - slot_id, &self.smart_contract_id, + slot_id, &selected_neighbor, &e ); @@ -1130,8 +1201,9 @@ impl StackerDBSync { StacksMessageType::StackerDBChunkInv(data) => data, StacksMessageType::Nack(data) => { debug!( - "{:?}: remote peer {:?} NACK'ed our StackerDBChunk with code {}", + "{:?}: {}: remote peer {:?} NACK'ed our StackerDBChunk with code {}", network.get_local_peer(), + &self.smart_contract_id, &naddr, data.error_code ); @@ -1143,21 +1215,27 @@ impl StackerDBSync { continue; } x => { - info!("Received unexpected message {:?}", &x); + info!( + "{:?}: {}: Received unexpected message {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &x + ); continue; } }; // must be well-formed if new_chunk_inv.slot_versions.len() != self.num_slots { - info!("{:?}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &naddr, self.num_slots, new_chunk_inv.slot_versions.len()); + info!("{:?}: {}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &self.smart_contract_id, &naddr, self.num_slots, new_chunk_inv.slot_versions.len()); continue; } // update bookkeeping debug!( - "{:?}: pushchunks_try_finish: Received StackerDBChunkInv from {:?}", + "{:?}: {}: pushchunks_try_finish: Received StackerDBChunkInv from {:?}", network.get_local_peer(), + &self.smart_contract_id, &naddr ); @@ -1169,7 +1247,7 @@ impl StackerDBSync { let inflight = self.comms.count_inflight(); debug!( - "{:?}: inflight messages for {}: {:?}", + "{:?}: {}: inflight messages: {:?}", network.get_local_peer(), &self.smart_contract_id, inflight @@ -1220,10 +1298,26 @@ impl StackerDBSync { return Ok(None); } + // make sure we have an up-to-date chain view. + // If not, then abort and immediately retry the sync (since any queued messages we have are + // likely gonna fail) + if let Some(rc_consensus_hash) = self.rc_consensus_hash.as_ref() { + if network.get_chain_view().rc_consensus_hash != *rc_consensus_hash { + debug!("{:?}: {}: Resetting and restarting running StackerDB sync due to chain view change", network.get_local_peer(), &self.smart_contract_id); + let result = self.reset(Some(network), config); + self.state = StackerDBSyncState::ConnectBegin; + self.rc_consensus_hash = Some(network.get_chain_view().rc_consensus_hash.clone()); + self.wakeup(); + return Ok(Some(result)); + } + } else { + self.rc_consensus_hash = Some(network.get_chain_view().rc_consensus_hash.clone()); + } + // throttle to write_freq if self.last_run_ts + config.write_freq.max(1) > get_epoch_time_secs() { debug!( - "{:?}: stacker DB sync for {} is throttled until {}", + "{:?}: {}: stacker DB sync is throttled until {}", network.get_local_peer(), &self.smart_contract_id, self.last_run_ts + config.write_freq @@ -1233,11 +1327,12 @@ impl StackerDBSync { loop { debug!( - "{:?}: stacker DB sync state for {} is {:?}", + "{:?}: {}: stacker DB sync state is {:?}", network.get_local_peer(), &self.smart_contract_id, &self.state ); + let mut blocked = true; match self.state { StackerDBSyncState::ConnectBegin => { @@ -1297,7 +1392,7 @@ impl StackerDBSync { // someone pushed newer chunk data to us, and getting chunks is // enabled, so immediately go request them debug!( - "{:?}: immediately retry StackerDB GetChunks on {} due to PushChunk NACK", + "{:?}: {}: immediately retry StackerDB GetChunks due to PushChunk NACK", network.get_local_peer(), &self.smart_contract_id ); @@ -1319,7 +1414,7 @@ impl StackerDBSync { if stale_inv { debug!( - "{:?}: immediately retry StackerDB sync on {} due to stale inventory", + "{:?}: {}: immediately retry StackerDB sync due to stale inventory", network.get_local_peer(), &self.smart_contract_id ); From b389d5e59755e200239103c74230776f9ef5b87f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:27:21 -0400 Subject: [PATCH 1100/1400] fix/refactor: make it so the small-scale neighbor tests will bind to a kernel-chosen port (avoids clobbering), and move topology tests to integration test CI --- stackslib/src/net/tests/neighbors.rs | 1344 ++------------------------ 1 file changed, 102 insertions(+), 1242 deletions(-) diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index f1937cb89b..03b1224312 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -35,19 +35,16 @@ use crate::util_lib::test::*; const TEST_IN_OUT_DEGREES: u64 = 0x1; #[test] -#[ignore] fn test_step_walk_1_neighbor_plain() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(31890); - let peer_2_config = TestPeerConfig::from_port(31892); - - // peer 1 crawls peer 2, but not vice versa - // (so only peer 1 will learn its public IP) - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -142,7 +139,7 @@ fn test_step_walk_1_neighbor_plain() { PeerAddress::from_socketaddr( &format!("127.0.0.1:1").parse::().unwrap() ), - 31890 + peer_1.config.server_port, ) ); assert!(peer_1.network.public_ip_learned); @@ -156,21 +153,20 @@ fn test_step_walk_1_neighbor_plain() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_plain_no_natpunch() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(31980); - let mut peer_2_config = TestPeerConfig::from_port(31982); + let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); // simulate peer 2 not knowing how to handle a natpunch request peer_2_config.connection_opts.disable_natpunch = true; - // peer 1 crawls peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -265,14 +261,10 @@ fn test_step_walk_1_neighbor_plain_no_natpunch() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_denied() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(31994); - let mut peer_2_config = TestPeerConfig::from_port(31996); - - // peer 1 crawls peer 2, but peer 1 has denied peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.walk_retry_count = 10; peer_2_config.connection_opts.walk_retry_count = 10; @@ -282,6 +274,8 @@ fn test_step_walk_1_neighbor_denied() { let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, but peer 1 has denied peer 2 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); { let mut tx = peer_1.network.peerdb.tx_begin().unwrap(); PeerDB::add_deny_cidr(&mut tx, &PeerAddress::from_ipv4(127, 0, 0, 1), 128).unwrap(); @@ -344,11 +338,10 @@ fn test_step_walk_1_neighbor_denied() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_bad_epoch() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(31998); - let mut peer_2_config = TestPeerConfig::from_port(31990); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.walk_retry_count = 10; peer_2_config.connection_opts.walk_retry_count = 10; @@ -375,14 +368,14 @@ fn test_step_walk_1_neighbor_bad_epoch() { network_epoch: PEER_VERSION_EPOCH_2_05, }]); - // peers know about each other, but peer 2 never talks to peer 1 since it believes that - // it's in a wholly different epoch - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peers know about each other, but peer 2 never talks to peer 1 since it believes that + // it's in a wholly different epoch + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -441,21 +434,20 @@ fn test_step_walk_1_neighbor_bad_epoch() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_heartbeat_ping() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32992); - let mut peer_2_config = TestPeerConfig::from_port(32994); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.heartbeat = 10; peer_2_config.connection_opts.heartbeat = 10; - // peer 1 crawls peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -547,19 +539,18 @@ fn test_step_walk_1_neighbor_heartbeat_ping() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_bootstrapping() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32100); - let peer_2_config = TestPeerConfig::from_port(32102); - - // peer 1 crawls peer 2, but peer 1 doesn't add peer 2 to its frontier becuase peer 2 is - // too far behind. - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + let peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, but peer 1 doesn't add peer 2 to its frontier becuase peer 2 is + // too far behind. + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + // advance peer 1 for i in 0..MAX_NEIGHBOR_BLOCK_DELAY + 1 { peer_1.add_empty_burnchain_block(); @@ -623,22 +614,21 @@ fn test_step_walk_1_neighbor_bootstrapping() { } #[test] -#[ignore] fn test_step_walk_1_neighbor_behind() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32200); - let mut peer_2_config = TestPeerConfig::from_port(32202); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.disable_natpunch = true; peer_2_config.connection_opts.disable_natpunch = true; - // peer 1 crawls peer 2, and peer 1 adds peer 2 to its frontier even though peer 2 does - // not, because peer 2 is too far ahead - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 1 adds peer 2 to its frontier even though peer 2 does + // not, because peer 2 is too far ahead + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + // advance peer 2 for i in 0..MAX_NEIGHBOR_BLOCK_DELAY + 1 { peer_2.add_empty_burnchain_block(); @@ -743,14 +733,13 @@ fn test_step_walk_1_neighbor_behind() { } #[test] -#[ignore] fn test_step_walk_10_neighbors_of_neighbor_plain() { with_timeout(600, || { // peer 1 has peer 2 as its neighbor. // peer 2 has 10 other neighbors. // Goal: peer 1 learns about the 10 other neighbors. - let mut peer_1_config = TestPeerConfig::from_port(32300); - let mut peer_2_config = TestPeerConfig::from_port(32302); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.disable_inv_sync = true; peer_1_config.connection_opts.disable_block_download = true; @@ -758,25 +747,25 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { peer_2_config.connection_opts.disable_inv_sync = true; peer_2_config.connection_opts.disable_block_download = true; + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + let mut peer_2_neighbors = vec![]; for i in 0..10 { - let mut n = TestPeerConfig::from_port(2 * i + 4 + 32300); + let mut n = TestPeerConfig::new(function_name!(), 0, 0); // turn off features we don't use n.connection_opts.disable_inv_sync = true; n.connection_opts.disable_block_download = true; - peer_2_config.add_neighbor(&n.to_neighbor()); - let p = TestPeer::new(n); + + peer_2.add_neighbor(&mut p.to_neighbor(), None, false); peer_2_neighbors.push(p); } // peer 1 crawls peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); // next, make peer 1 discover peer 2's neighbors and peer 2's in/out degree. // Do two full walks @@ -840,6 +829,7 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { Some(p) => { assert_eq!(p.public_key, n.public_key); assert_eq!(p.expire_block, n.expire_block); + test_debug!("confirmed handshake with peer {:?}", &n.addr); num_handshakes += 1; } } @@ -863,6 +853,7 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { test_debug!("no peer 2"); } Some(p2) => { + test_debug!("p2 degrees = {}/{}", p2.in_degree, p2.out_degree); if p2.out_degree >= 11 && p2.in_degree >= 1 { assert_eq!(p2.out_degree, 11); did_connect = true; @@ -888,14 +879,13 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { } #[test] -#[ignore] fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { with_timeout(600, || { // peer 1 has peer 2 as its neighbor. // peer 2 has 10 other neighbors, 5 of which are too far behind peer 1. // Goal: peer 1 learns about the 5 fresher neighbors. - let mut peer_1_config = TestPeerConfig::from_port(32400); - let mut peer_2_config = TestPeerConfig::from_port(32402); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.connection_opts.disable_inv_sync = true; peer_1_config.connection_opts.disable_block_download = true; @@ -903,25 +893,24 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { peer_2_config.connection_opts.disable_inv_sync = true; peer_2_config.connection_opts.disable_block_download = true; + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + let mut peer_2_neighbors = vec![]; for i in 0..10 { - let mut n = TestPeerConfig::from_port(2 * i + 4 + 32400); + let mut n = TestPeerConfig::new(function_name!(), 0, 0); // turn off features we don't use n.connection_opts.disable_inv_sync = true; n.connection_opts.disable_block_download = true; - peer_2_config.add_neighbor(&n.to_neighbor()); - let p = TestPeer::new(n); + peer_2.add_neighbor(&mut p.to_neighbor(), None, true); peer_2_neighbors.push(p); } // peer 1 crawls peer 2 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); // advance peer 1 and peer 2, and some of peer 2's neighbors for i in 0..MAX_NEIGHBOR_BLOCK_DELAY + 1 { @@ -1069,8 +1058,8 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { #[test] fn test_step_walk_2_neighbors_plain() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32500); - let mut peer_2_config = TestPeerConfig::from_port(32502); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1079,13 +1068,13 @@ fn test_step_walk_2_neighbors_plain() { peer_1_config.connection_opts.walk_max_duration = 10; peer_2_config.connection_opts.walk_max_duration = 10; - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -1201,8 +1190,8 @@ fn test_step_walk_2_neighbors_plain() { #[test] fn test_step_walk_2_neighbors_state_timeout() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32504); - let mut peer_2_config = TestPeerConfig::from_port(32506); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1214,13 +1203,13 @@ fn test_step_walk_2_neighbors_state_timeout() { peer_1_config.connection_opts.walk_state_timeout = 1; peer_2_config.connection_opts.walk_state_timeout = 1; - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + for _i in 0..10 { let _ = peer_1.step(); let _ = peer_2.step(); @@ -1246,8 +1235,8 @@ fn test_step_walk_2_neighbors_state_timeout() { #[test] fn test_step_walk_2_neighbors_walk_timeout() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32508); - let mut peer_2_config = TestPeerConfig::from_port(32510); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1262,13 +1251,13 @@ fn test_step_walk_2_neighbors_walk_timeout() { peer_1_config.connection_opts.walk_reset_interval = 10; peer_2_config.connection_opts.walk_reset_interval = 10; - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_step_count = 0; let mut walk_2_step_count = 0; @@ -1317,12 +1306,11 @@ fn test_step_walk_2_neighbors_walk_timeout() { } #[test] -#[ignore] fn test_step_walk_3_neighbors_inbound() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32520); - let mut peer_2_config = TestPeerConfig::from_port(32522); - let mut peer_3_config = TestPeerConfig::from_port(32524); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_3_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1340,16 +1328,20 @@ fn test_step_walk_3_neighbors_inbound() { peer_2_config.connection_opts.disable_block_download = true; peer_3_config.connection_opts.disable_block_download = true; - // Peer 2 and peer 3 are public nodes that don't know about each other, but peer 1 lists - // both of them as outbound neighbors. Goal is for peer 2 to learn about peer 3, and vice - // versa, by crawling peer 1 through an inbound neighbor walk. - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_1_config.add_neighbor(&peer_3_config.to_neighbor()); + peer_1_config.connection_opts.log_neighbors_freq = 1; + peer_2_config.connection_opts.log_neighbors_freq = 1; + peer_3_config.connection_opts.log_neighbors_freq = 1; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); let mut peer_3 = TestPeer::new(peer_3_config); + // Peer 2 and peer 3 are public nodes that don't know about each other, but peer 1 lists + // both of them as outbound neighbors. Goal is for peer 2 to learn about peer 3, and vice + // versa, by crawling peer 1 through an inbound neighbor walk. + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_1.add_neighbor(&mut peer_3.to_neighbor(), None, true); + let mut i = 0; let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -1512,11 +1504,10 @@ fn test_step_walk_3_neighbors_inbound() { } #[test] -#[ignore] fn test_step_walk_2_neighbors_rekey() { with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::from_port(32600); - let mut peer_2_config = TestPeerConfig::from_port(32602); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); peer_1_config.allowed = -1; peer_2_config.allowed = -1; @@ -1537,13 +1528,13 @@ fn test_step_walk_2_neighbors_rekey() { peer_1_config.connection_opts.private_key_lifetime = 5; peer_2_config.connection_opts.private_key_lifetime = 5; - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); + // peer 1 crawls peer 2, and peer 2 crawls peer 1 + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1.to_neighbor(), None, true); + let initial_public_key_1 = peer_1.get_public_key(); let initial_public_key_2 = peer_2.get_public_key(); @@ -1630,30 +1621,19 @@ fn test_step_walk_2_neighbors_rekey() { #[test] fn test_step_walk_2_neighbors_different_networks() { with_timeout(600, || { - // peer 1 and 2 try to handshake but never succeed since they have different network IDs - let mut peer_1_config = TestPeerConfig::from_port(32700); - let mut peer_2_config = TestPeerConfig::from_port(32702); - - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - - // peer 2 thinks peer 1 has the same network ID that it does - println!("1 ~~~ {}", peer_1_config.network_id); - println!("2 ~~~ {}", peer_2_config.network_id); + let mut peer_1_config = TestPeerConfig::new(function_name!(), 0, 0); + let peer_2_config = TestPeerConfig::new(function_name!(), 0, 0); - peer_1_config.network_id = peer_1_config.network_id + 1; - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - peer_1_config.network_id = peer_1_config.network_id - 1; - - // different network IDs - peer_2_config.network_id = peer_1_config.network_id + 1; - - println!("3 ~~~ {}", peer_1_config.network_id); - println!("4 ~~~ {}", peer_2_config.network_id); + peer_1_config.network_id = peer_2_config.network_id + 1; let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); - println!("5 ~~~"); + + let mut peer_1_neighbor = peer_1.to_neighbor(); + peer_1_neighbor.addr.network_id = peer_2.config.network_id; + + peer_1.add_neighbor(&mut peer_2.to_neighbor(), None, true); + peer_2.add_neighbor(&mut peer_1_neighbor, None, true); let mut walk_1_count = 0; let mut walk_2_count = 0; @@ -1703,1125 +1683,5 @@ fn test_step_walk_2_neighbors_different_networks() { .network .get_neighbor_stats(&peer_1.to_neighbor().addr); assert!(stats_2.is_none()); - - let neighbor_1 = peer_1.to_neighbor(); - let neighbor_2 = peer_2.to_neighbor(); - - // peer 1 was NOT added to the peer DB of peer 2 - assert!(PeerDB::get_peer( - peer_1.network.peerdb.conn(), - neighbor_2.addr.network_id, - &neighbor_2.addr.addrbytes, - neighbor_2.addr.port - ) - .unwrap() - .is_none()); - - // peer 2 was NOT added to the peer DB of peer 1 - assert!(PeerDB::get_peer( - peer_2.network.peerdb.conn(), - neighbor_1.addr.network_id, - &neighbor_1.addr.addrbytes, - neighbor_1.addr.port - ) - .unwrap() - .is_none()); - }) -} - -fn stacker_db_id(i: usize) -> QualifiedContractIdentifier { - QualifiedContractIdentifier::new( - StandardPrincipalData(0x01, [i as u8; 20]), - format!("db-{}", i).as_str().into(), - ) -} - -fn make_stacker_db_ids(i: usize) -> Vec { - let mut dbs = vec![]; - for j in 0..i { - dbs.push(stacker_db_id(j)); - } - dbs -} - -fn setup_peer_config( - i: usize, - port_base: u16, - neighbor_count: usize, - peer_count: usize, -) -> TestPeerConfig { - let mut conf = TestPeerConfig::from_port(port_base + (2 * i as u16)); - conf.connection_opts.num_neighbors = neighbor_count as u64; - conf.connection_opts.soft_num_neighbors = neighbor_count as u64; - - conf.connection_opts.num_clients = 256; - conf.connection_opts.soft_num_clients = 128; - - conf.connection_opts.max_http_clients = 1000; - conf.connection_opts.max_neighbors_of_neighbor = neighbor_count as u64; - - conf.connection_opts.max_clients_per_host = MAX_NEIGHBORS_DATA_LEN as u64; - conf.connection_opts.soft_max_clients_per_host = peer_count as u64; - - conf.connection_opts.max_neighbors_per_host = MAX_NEIGHBORS_DATA_LEN as u64; - conf.connection_opts.soft_max_neighbors_per_host = (neighbor_count / 2) as u64; - conf.connection_opts.soft_max_neighbors_per_org = (neighbor_count / 2) as u64; - - conf.connection_opts.walk_interval = 0; - - conf.connection_opts.disable_inv_sync = true; - conf.connection_opts.disable_block_download = true; - - let j = i as u32; - conf.burnchain.peer_version = PEER_VERSION_TESTNET | (j << 16) | (j << 8) | j; // different non-major versions for each peer - - // even-number peers support stacker DBs. - // odd-number peers do not - if i % 2 == 0 { - conf.services = (ServiceFlags::RELAY as u16) - | (ServiceFlags::RPC as u16) - | (ServiceFlags::STACKERDB as u16); - conf.stacker_dbs = make_stacker_db_ids(i); - } else { - conf.services = (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16); - conf.stacker_dbs = vec![]; - } - - conf -} - -#[test] -#[ignore] -fn test_walk_ring_allow_15() { - with_timeout(600, || { - // all initial peers are allowed - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 32800, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = -1; // always allowed - conf.denied = 0; - - conf.connection_opts.timeout = 100000; - conf.connection_opts.handshake_timeout = 100000; - conf.connection_opts.disable_natpunch = true; // breaks allow checks - - peer_configs.push(conf); - } - - test_walk_ring(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_ring_15_plain() { - with_timeout(600, || { - // initial peers are neither white- nor denied - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 32900, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - - peer_configs.push(conf); - } - - test_walk_ring(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_ring_15_pingback() { - with_timeout(600, || { - // initial peers are neither white- nor denied - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 32950, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - conf.connection_opts.disable_pingbacks = true; - conf.connection_opts.disable_inbound_walks = false; - - peer_configs.push(conf); - } - - test_walk_ring_pingback(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_ring_15_org_biased() { - with_timeout(600, || { - // one outlier peer has a different org than the others. - use std::env; - - // ::33000 is in AS 1 - env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33000", "1"); - - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33000, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - if i == 0 { - conf.asn = 1; - conf.org = 1; - } else { - conf.asn = 0; - conf.org = 0; - } - - peer_configs.push(conf); - } - - // all peers see peer ::33000 as having ASN and Org ID 1 - let peer_0 = peer_configs[0].to_neighbor(); - - let peers = test_walk_ring(&mut peer_configs, NEIGHBOR_COUNT); - - for i in 1..PEER_COUNT { - match PeerDB::get_peer( - peers[i].network.peerdb.conn(), - peer_0.addr.network_id, - &peer_0.addr.addrbytes, - peer_0.addr.port, - ) - .unwrap() - { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} - } - } - - // no peer pruned peer ::33000 - for i in 1..PEER_COUNT { - match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { - None => {} - Some(count) => { - assert_eq!(*count, 0); - } - } - } - }) -} - -fn test_walk_ring_ex( - peer_configs: &mut Vec, - neighbor_count: usize, - test_pingback: bool, -) -> Vec { - // arrange neighbors into a "ring" topology, where - // neighbor N is connected to neighbor (N-1)%NUM_NEIGHBORS and (N+1)%NUM_NEIGHBORS. - // If test_pingback is true, then neighbor N is only connected to (N+1)%NUM_NEIGHBORS - let mut peers = vec![]; - - let PEER_COUNT = peer_configs.len(); - let NEIGHBOR_COUNT = neighbor_count; - - for i in 0..PEER_COUNT { - let n = (i + 1) % PEER_COUNT; - let neighbor = peer_configs[n].to_neighbor(); - peer_configs[i].add_neighbor(&neighbor); - } - - if !test_pingback { - for i in 1..PEER_COUNT + 1 { - let p = i - 1; - let neighbor = peer_configs[p].to_neighbor(); - peer_configs[i % PEER_COUNT].add_neighbor(&neighbor); - } - } - - for i in 0..PEER_COUNT { - let p = TestPeer::new(peer_configs[i].clone()); - peers.push(p); - } - - run_topology_test(&mut peers, NEIGHBOR_COUNT, TEST_IN_OUT_DEGREES); - - // no nacks or handshake-rejects - for i in 0..PEER_COUNT { - for (_, convo) in peers[i].network.peers.iter() { - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::Nack) - .unwrap_or(&0) - == 0 - ); - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::HandshakeReject) - .unwrap_or(&0) - == 0 - ); - } - } - - peers -} - -fn test_walk_ring(peer_configs: &mut Vec, neighbor_count: usize) -> Vec { - test_walk_ring_ex(peer_configs, neighbor_count, false) -} - -fn test_walk_ring_pingback( - peer_configs: &mut Vec, - neighbor_count: usize, -) -> Vec { - test_walk_ring_ex(peer_configs, neighbor_count, true) -} - -#[test] -#[ignore] -fn test_walk_line_allowed_15() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33100, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = -1; - conf.denied = 0; - - conf.connection_opts.timeout = 100000; - conf.connection_opts.handshake_timeout = 100000; - conf.connection_opts.disable_natpunch = true; // breaks allow checks - - peer_configs.push(conf); - } - - test_walk_line(&mut peer_configs, NEIGHBOR_COUNT, TEST_IN_OUT_DEGREES); - }) -} - -#[test] -#[ignore] -fn test_walk_line_15_plain() { - with_timeout(600, || { - // initial peers are neither white- nor denied - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33200, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - - peer_configs.push(conf); - } - - test_walk_line(&mut peer_configs, NEIGHBOR_COUNT, TEST_IN_OUT_DEGREES); - }) -} - -#[test] -#[ignore] -fn test_walk_line_15_org_biased() { - with_timeout(600, || { - // one outlier peer has a different org than the others. - use std::env; - - // ::33300 is in AS 1 - env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33300", "1"); - - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; // make this a little bigger to speed this test up - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33300, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - if i == 0 { - conf.asn = 1; - conf.org = 1; - } else { - conf.asn = 0; - conf.org = 0; - } - - peer_configs.push(conf); - } - // all peers see peer ::33300 as having ASN and Org ID 1 - let peer_0 = peer_configs[0].to_neighbor(); - - let peers = test_walk_line(&mut peer_configs, NEIGHBOR_COUNT, 0); - - for i in 1..PEER_COUNT { - match PeerDB::get_peer( - peers[i].network.peerdb.conn(), - peer_0.addr.network_id, - &peer_0.addr.addrbytes, - peer_0.addr.port, - ) - .unwrap() - { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} - } - } - - // no peer pruned peer ::33300 - for i in 1..PEER_COUNT { - match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { - None => {} - Some(count) => { - assert_eq!(*count, 0); - } - } - } - }) -} - -#[test] -#[ignore] -fn test_walk_line_15_pingback() { - with_timeout(600, || { - // initial peers are neither white- nor denied - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33350, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - conf.connection_opts.disable_pingbacks = false; - conf.connection_opts.disable_inbound_walks = true; - - peer_configs.push(conf); - } - - test_walk_line_pingback(&mut peer_configs, NEIGHBOR_COUNT, TEST_IN_OUT_DEGREES); - }) -} - -fn test_walk_line( - peer_configs: &mut Vec, - neighbor_count: usize, - tests: u64, -) -> Vec { - test_walk_line_ex(peer_configs, neighbor_count, tests, false) -} - -fn test_walk_line_pingback( - peer_configs: &mut Vec, - neighbor_count: usize, - tests: u64, -) -> Vec { - test_walk_line_ex(peer_configs, neighbor_count, tests, true) -} - -fn test_walk_line_ex( - peer_configs: &mut Vec, - neighbor_count: usize, - tests: u64, - pingback_test: bool, -) -> Vec { - // arrange neighbors into a "line" topology. - // If pingback_test is true, then the topology is unidirectional: - // - // 0 ---> 1 ---> 2 ---> ... ---> NEIGHBOR_COUNT - // - // If pingback_test is false, then the topology is bidirectional - // - // 0 <--> 1 <--> 2 <--> ... <--> NEIGHBOR_COUNT - // - // all initial peers are allowed - let mut peers = vec![]; - - let PEER_COUNT = peer_configs.len(); - let NEIGHBOR_COUNT = neighbor_count; - for i in 0..PEER_COUNT - 1 { - let n = i + 1; - let neighbor = peer_configs[n].to_neighbor(); - peer_configs[i].add_neighbor(&neighbor); - } - - if !pingback_test { - for i in 1..PEER_COUNT { - let p = i - 1; - let neighbor = peer_configs[p].to_neighbor(); - peer_configs[i].add_neighbor(&neighbor); - } - } - - for i in 0..PEER_COUNT { - let p = TestPeer::new(peer_configs[i].clone()); - peers.push(p); - } - - run_topology_test(&mut peers, NEIGHBOR_COUNT, tests); - - // no nacks or handshake-rejects - for i in 0..PEER_COUNT { - for (_, convo) in peers[i].network.peers.iter() { - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::Nack) - .unwrap_or(&0) - == 0 - ); - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::HandshakeReject) - .unwrap_or(&0) - == 0 - ); - } - } - - peers -} - -#[test] -#[ignore] -fn test_walk_star_allowed_15() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33400, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = -1; // always allowed - conf.denied = 0; - - conf.connection_opts.timeout = 100000; - conf.connection_opts.handshake_timeout = 100000; - conf.connection_opts.disable_natpunch = true; // breaks allow checks - - peer_configs.push(conf); - } - - test_walk_star(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_star_15_plain() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33500, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - - peer_configs.push(conf); - } - - test_walk_star(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_star_15_pingback() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33550, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - conf.connection_opts.disable_pingbacks = false; - conf.connection_opts.disable_inbound_walks = true; - conf.connection_opts.soft_max_neighbors_per_org = PEER_COUNT as u64; - - peer_configs.push(conf); - } - - test_walk_star_pingback(&mut peer_configs, NEIGHBOR_COUNT); - }) -} - -#[test] -#[ignore] -fn test_walk_star_15_org_biased() { - with_timeout(600, || { - // one outlier peer has a different org than the others. - use std::env; - - // ::33600 is in AS 1 - env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33600", "1"); - - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 3; - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33600, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - if i == 0 { - conf.asn = 1; - conf.org = 1; - } else { - conf.asn = 0; - conf.org = 0; - } - - peer_configs.push(conf); - } - // all peers see peer ::33600 as having ASN and Org ID 1 - let peer_0 = peer_configs[0].to_neighbor(); - - let peers = test_walk_star(&mut peer_configs, NEIGHBOR_COUNT); - - for i in 1..PEER_COUNT { - match PeerDB::get_peer( - peers[i].network.peerdb.conn(), - peer_0.addr.network_id, - &peer_0.addr.addrbytes, - peer_0.addr.port, - ) - .unwrap() - { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} - } - } - - // no peer pruned peer ::33600 - for i in 1..PEER_COUNT { - match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { - None => {} - Some(count) => { - assert_eq!(*count, 0); - } - } - } - }) -} - -fn test_walk_star(peer_configs: &mut Vec, neighbor_count: usize) -> Vec { - test_walk_star_ex(peer_configs, neighbor_count, false) -} - -fn test_walk_star_pingback( - peer_configs: &mut Vec, - neighbor_count: usize, -) -> Vec { - test_walk_star_ex(peer_configs, neighbor_count, true) -} - -fn test_walk_star_ex( - peer_configs: &mut Vec, - neighbor_count: usize, - pingback_test: bool, -) -> Vec { - // arrange neighbors into a "star" topology. - // If pingback_test is true, then initial connections are unidirectional -- each neighbor (except - // for 0) only knows about 0. Neighbor 0 knows about no one. - // If pingback_test is false, then initial connections are bidirectional. - - let mut peers = vec![]; - let PEER_COUNT = peer_configs.len(); - let NEIGHBOR_COUNT = neighbor_count; - - for i in 1..PEER_COUNT { - let neighbor = peer_configs[i].to_neighbor(); - let hub = peer_configs[0].to_neighbor(); - if !pingback_test { - peer_configs[0].add_neighbor(&neighbor); - } - - peer_configs[i].add_neighbor(&hub); - } - - for i in 0..PEER_COUNT { - let p = TestPeer::new(peer_configs[i].clone()); - peers.push(p); - } - - run_topology_test(&mut peers, NEIGHBOR_COUNT, 0); - - // no nacks or handshake-rejects - for i in 0..PEER_COUNT { - for (_, convo) in peers[i].network.peers.iter() { - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::Nack) - .unwrap_or(&0) - == 0 - ); - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::HandshakeReject) - .unwrap_or(&0) - == 0 - ); - } - } - - peers -} - -fn test_walk_inbound_line( - peer_configs: &mut Vec, - neighbor_count: usize, -) -> Vec { - // arrange neighbors into a two-tiered "line" topology, where even-numbered neighbors are - // "NAT'ed" but connected to both the predecessor and successor odd neighbors. Odd - // numbered neighbors are not connected to anyone. The first and last even-numbered - // neighbor is only connected to its successor and predecessor, respectively. - // - // 1 3 5 - // ^ ^ ^ ^ ^ ^ - // / \ / \ / \ ... etc ... - // 0 2 4 6 - // - // The goal of this test is that odd-numbered neighbors all learn about each other - - let mut peers = vec![]; - let PEER_COUNT = peer_configs.len(); - let NEIGHBOR_COUNT = neighbor_count; - - for i in 0..PEER_COUNT { - if i % 2 == 0 { - if i > 0 { - let predecessor = peer_configs[i - 1].to_neighbor(); - peer_configs[i].add_neighbor(&predecessor); - } - if i + 1 < PEER_COUNT { - let successor = peer_configs[i + 1].to_neighbor(); - peer_configs[i].add_neighbor(&successor); - } - } - } - - for i in 0..PEER_COUNT { - let p = TestPeer::new(peer_configs[i].clone()); - peers.push(p); - } - - run_topology_test_ex( - &mut peers, - NEIGHBOR_COUNT, - 0, - |peers: &Vec| { - let mut done = true; - for i in 0..PEER_COUNT { - // only check "public" peers - if i % 2 != 0 { - let all_neighbors = - PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); - if (all_neighbors.len() as u64) < ((PEER_COUNT / 2 - 1) as u64) { - let nk = peers[i].config.to_neighbor().addr; - test_debug!( - "waiting for public peer {:?} to fill up its frontier: {}", - &nk, - all_neighbors.len() - ); - done = false; - } - } - } - done - }, - true, - ); - - // no nacks or handshake-rejects - for i in 0..PEER_COUNT { - for (_, convo) in peers[i].network.peers.iter() { - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::Nack) - .unwrap_or(&0) - == 0 - ); - assert!( - *convo - .stats - .msg_rx_counts - .get(&StacksMessageID::HandshakeReject) - .unwrap_or(&0) - == 0 - ); - } - } - - peers -} - -#[test] -#[ignore] -fn test_walk_inbound_line_15() { - with_timeout(600, || { - let mut peer_configs = vec![]; - let PEER_COUNT: usize = 15; - let NEIGHBOR_COUNT: usize = 15; // make this test go faster - - for i in 0..PEER_COUNT { - let mut conf = setup_peer_config(i, 33250, NEIGHBOR_COUNT, PEER_COUNT); - - conf.allowed = 0; - conf.denied = 0; - conf.connection_opts.disable_pingbacks = true; - conf.connection_opts.disable_inbound_walks = false; - conf.connection_opts.walk_inbound_ratio = 2; - // basically, don't timeout (so public nodes can ask non-public inbound nodes about - // neighbors indefinitely) - conf.connection_opts.connect_timeout = 60000; - conf.connection_opts.timeout = 60000; - conf.connection_opts.handshake_timeout = 60000; - conf.connection_opts.soft_max_neighbors_per_org = (NEIGHBOR_COUNT + 1) as u64; - conf.connection_opts.soft_max_neighbors_per_host = (NEIGHBOR_COUNT + 1) as u64; - - peer_configs.push(conf); - } - - test_walk_inbound_line(&mut peer_configs, NEIGHBOR_COUNT); }) } - -fn dump_peers(peers: &Vec) -> () { - test_debug!("\n=== PEER DUMP ==="); - for i in 0..peers.len() { - let mut neighbor_index = vec![]; - let mut outbound_neighbor_index = vec![]; - for j in 0..peers.len() { - let stats_opt = peers[i] - .network - .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } - } - None => {} - } - } - - let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); - let num_allowed = all_neighbors.iter().fold(0, |mut sum, ref n2| { - sum += if n2.allowed < 0 { 1 } else { 0 }; - sum - }); - test_debug!("Neighbor {} (all={}, outbound={}) (total neighbors = {}, total allowed = {}): outbound={:?} all={:?}", i, neighbor_index.len(), outbound_neighbor_index.len(), all_neighbors.len(), num_allowed, &outbound_neighbor_index, &neighbor_index); - } - test_debug!("\n"); -} - -fn dump_peer_histograms(peers: &Vec) -> () { - let mut outbound_hist: HashMap = HashMap::new(); - let mut inbound_hist: HashMap = HashMap::new(); - let mut all_hist: HashMap = HashMap::new(); - for i in 0..peers.len() { - let mut neighbor_index = vec![]; - let mut inbound_neighbor_index = vec![]; - let mut outbound_neighbor_index = vec![]; - for j in 0..peers.len() { - let stats_opt = peers[i] - .network - .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } else { - inbound_neighbor_index.push(j); - } - } - None => {} - } - } - for inbound in inbound_neighbor_index.iter() { - if inbound_hist.contains_key(inbound) { - let c = inbound_hist.get(inbound).unwrap().to_owned(); - inbound_hist.insert(*inbound, c + 1); - } else { - inbound_hist.insert(*inbound, 1); - } - } - for outbound in outbound_neighbor_index.iter() { - if outbound_hist.contains_key(outbound) { - let c = outbound_hist.get(outbound).unwrap().to_owned(); - outbound_hist.insert(*outbound, c + 1); - } else { - outbound_hist.insert(*outbound, 1); - } - } - for n in neighbor_index.iter() { - if all_hist.contains_key(n) { - let c = all_hist.get(n).unwrap().to_owned(); - all_hist.insert(*n, c + 1); - } else { - all_hist.insert(*n, 1); - } - } - } - - test_debug!("=== PEER HISTOGRAM ==="); - for i in 0..peers.len() { - test_debug!( - "Neighbor {}: #in={} #out={} #all={}", - i, - inbound_hist.get(&i).unwrap_or(&0), - outbound_hist.get(&i).unwrap_or(&0), - all_hist.get(&i).unwrap_or(&0) - ); - } - test_debug!("\n"); -} - -fn run_topology_test(peers: &mut Vec, neighbor_count: usize, test_bits: u64) -> () { - run_topology_test_ex(peers, neighbor_count, test_bits, |_| false, false) -} - -fn run_topology_test_ex( - peers: &mut Vec, - neighbor_count: usize, - test_bits: u64, - mut finished_check: F, - use_finished_check: bool, -) -> () -where - F: FnMut(&Vec) -> bool, -{ - let PEER_COUNT = peers.len(); - - let mut initial_allowed: HashMap> = HashMap::new(); - let mut initial_denied: HashMap> = HashMap::new(); - - for i in 0..PEER_COUNT { - // turn off components we don't need - peers[i].config.connection_opts.disable_inv_sync = true; - peers[i].config.connection_opts.disable_block_download = true; - let nk = peers[i].config.to_neighbor().addr.clone(); - for j in 0..peers[i].config.initial_neighbors.len() { - let initial = &peers[i].config.initial_neighbors[j]; - if initial.allowed < 0 { - if !initial_allowed.contains_key(&nk) { - initial_allowed.insert(nk.clone(), vec![]); - } - initial_allowed - .get_mut(&nk) - .unwrap() - .push(initial.addr.clone()); - } - if initial.denied < 0 { - if !initial_denied.contains_key(&nk) { - initial_denied.insert(nk.clone(), vec![]); - } - initial_denied - .get_mut(&nk) - .unwrap() - .push(initial.addr.clone()); - } - } - } - - for i in 0..PEER_COUNT { - peers[i].connect_initial().unwrap(); - } - - // go until each neighbor knows about each other neighbor - let mut finished = false; - let mut count = 0; - while !finished { - finished = true; - let mut peer_counts = 0; - let mut random_order = vec![0usize; PEER_COUNT]; - for i in 0..PEER_COUNT { - random_order[i] = i; - } - let mut rng = thread_rng(); - random_order.shuffle(&mut rng); - - debug!("Random order = {:?}", &random_order); - for i in random_order.into_iter() { - let _ = peers[i].step_with_ibd(false); - let nk = peers[i].config.to_neighbor().addr; - debug!("Step peer {:?}", &nk); - - // allowed peers are still connected - match initial_allowed.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if !peers[i].network.events.contains_key(&pnk.clone()) { - error!( - "{:?}: Perma-allowed peer {:?} not connected anymore", - &nk, &pnk - ); - assert!(false); - } - } - } - None => {} - }; - - // denied peers are never connected - match initial_denied.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if peers[i].network.events.contains_key(&pnk.clone()) { - error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); - assert!(false); - } - } - } - None => {} - }; - - // all ports are unique in the p2p socket table - let mut ports: HashSet = HashSet::new(); - for k in peers[i].network.events.keys() { - if ports.contains(&k.port) { - error!("duplicate port {} from {:?}", k.port, k); - assert!(false); - } - ports.insert(k.port); - } - - // done? - let now_finished = if use_finished_check { - finished_check(&peers) - } else { - let mut done = true; - let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); - peer_counts += all_neighbors.len(); - test_debug!("Peer {} ({}) has {} neighbors", i, &nk, all_neighbors.len()); - - if (all_neighbors.len() as u64) < ((PEER_COUNT - 1) as u64) { - test_debug!( - "waiting for {:?} to fill up its frontier: {} < {}", - &nk, - all_neighbors.len(), - PEER_COUNT - 1 - ); - done = false; - } else { - test_debug!( - "not waiting for {:?} to fill up its frontier: {} >= {}", - &nk, - all_neighbors.len(), - PEER_COUNT - 1 - ); - } - done - }; - - finished = finished && now_finished; - } - - count += 1; - - test_debug!( - "Network convergence rate: {}%", - (100.0 * (peer_counts as f64)) / ((PEER_COUNT * PEER_COUNT) as f64), - ); - - if finished { - break; - } - - test_debug!("Finished walking the network {} times", count); - dump_peers(&peers); - dump_peer_histograms(&peers); - } - - test_debug!("Converged after {} calls to network.run()", count); - dump_peers(&peers); - dump_peer_histograms(&peers); - - // each peer learns each other peer's stacker DBs - for (i, peer) in peers.iter().enumerate() { - if i % 2 != 0 { - continue; - } - let mut expected_dbs = PeerDB::get_local_peer(peer.network.peerdb.conn()) - .unwrap() - .stacker_dbs; - expected_dbs.sort(); - for (j, other_peer) in peers.iter().enumerate() { - if i == j { - continue; - } - - let all_neighbors = PeerDB::get_all_peers(other_peer.network.peerdb.conn()).unwrap(); - - if (all_neighbors.len() as u64) < ((PEER_COUNT - 1) as u64) { - // this is a simulated-NAT'ed node -- it won't learn about other NAT'ed nodes' - // DBs - continue; - } - - // what does the other peer see as this peer's stacker DBs? - let mut other_peer_dbs = other_peer - .network - .peerdb - .get_peer_stacker_dbs(&peer.config.to_neighbor()) - .unwrap(); - other_peer_dbs.sort(); - - if j % 2 == 0 { - test_debug!( - "Compare stacker DBs of {} vs {}", - &peer.config.to_neighbor(), - &other_peer.config.to_neighbor() - ); - assert_eq!(expected_dbs, other_peer_dbs); - } else { - // this peer doesn't support Stacker DBs - assert_eq!(other_peer_dbs, vec![]); - } - } - } -} From 6f37fa6de5da01eccce1a3d7a90d8f77ed89cc9b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:28:05 -0400 Subject: [PATCH 1101/1400] fix: sort_unstable_by() for sorting peers by health, since our comparison function is not a total order (oops) --- stackslib/src/net/prune.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index d7991a67c3..87b16d7bba 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -82,7 +82,7 @@ impl PeerNetwork { }; } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { test_debug!( "==== ORG NEIGHBOR DISTRIBUTION OF {:?} ===", @@ -199,9 +199,11 @@ impl PeerNetwork { match org_neighbors.get_mut(&org) { None => {} Some(ref mut neighbor_infos) => { - neighbor_infos.sort_by(|&(ref _nk1, ref stats1), &(ref _nk2, ref stats2)| { - PeerNetwork::compare_neighbor_uptime_health(stats1, stats2) - }); + neighbor_infos.sort_unstable_by( + |&(ref _nk1, ref stats1), &(ref _nk2, ref stats2)| { + PeerNetwork::compare_neighbor_uptime_health(stats1, stats2) + }, + ); } } } @@ -374,7 +376,7 @@ impl PeerNetwork { } /// Dump our peer table - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn dump_peer_table(&mut self) -> (Vec, Vec) { let mut inbound: Vec = vec![]; let mut outbound: Vec = vec![]; @@ -445,7 +447,7 @@ impl PeerNetwork { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { if pruned_by_ip.len() > 0 || pruned_by_org.len() > 0 { let (mut inbound, mut outbound) = self.dump_peer_table(); From 50a967bc8d73637c2ceaabed3e4138d14a5f4c2e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:28:38 -0400 Subject: [PATCH 1102/1400] chore: add pathological reward cycles to downloader tests where the only sortitions in the reward cycle are to confirm the anchor block --- stackslib/src/net/tests/download/nakamoto.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 31f88b50f8..44bbaed7d2 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -2066,6 +2066,19 @@ fn test_make_tenure_downloaders() { fn test_nakamoto_download_run_2_peers() { let observer = TestEventObserver::new(); let bitvecs = vec![ + // a reward cycle with one prepare phase sortition at the start + vec![ + true, true, true, true, true, true, true, false, false, false, + ], + // a reward cycle with one prepare phase sortition at the end, + // and no tenures in the first three reward phase sortitions + vec![ + false, false, false, true, true, false, false, true, true, false, + ], + // full reward cycle, minus the first three tenures + vec![ + false, false, false, true, true, true, true, true, true, true, + ], // full reward cycle vec![true, true, true, true, true, true, true, true, true, true], // alternating reward cycle, but with a full prepare phase From d442f2ce09b26c489fd97e52fd677891521a452e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:29:12 -0400 Subject: [PATCH 1103/1400] build: build tests with `feature = "testing"`, and disable unused warnings for tests --- stackslib/src/net/api/getneighbors.rs | 4 +++- stackslib/src/net/asn.rs | 2 +- stackslib/src/net/atlas/db.rs | 6 +++--- stackslib/src/net/codec.rs | 2 +- stackslib/src/net/dns.rs | 2 +- stackslib/src/net/download/epoch2x.rs | 12 ++++++------ stackslib/src/net/httpcore.rs | 22 +++++++++++----------- stackslib/src/net/inv/epoch2x.rs | 9 +++------ stackslib/src/net/mod.rs | 19 ++++++++++++------- stackslib/src/net/poll.rs | 2 +- stackslib/src/net/stackerdb/db.rs | 2 +- stackslib/src/net/stackerdb/tests/mod.rs | 1 + stackslib/src/net/stackerdb/tests/sync.rs | 1 - stackslib/src/net/tests/download/mod.rs | 1 + stackslib/src/net/tests/inv/mod.rs | 1 + stackslib/src/net/tests/mempool/mod.rs | 1 + stackslib/src/net/tests/mod.rs | 1 + stackslib/src/net/tests/relay/mod.rs | 1 + 18 files changed, 49 insertions(+), 40 deletions(-) diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs index 6707ed3ba1..9e7d0402da 100644 --- a/stackslib/src/net/api/getneighbors.rs +++ b/stackslib/src/net/api/getneighbors.rs @@ -19,6 +19,7 @@ use std::io::{Read, Write}; use clarity::vm::types::QualifiedContractIdentifier; use regex::{Captures, Regex}; use stacks_common::types::net::{PeerAddress, PeerHost}; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Hash160; use crate::net::db::PeerDB; @@ -145,10 +146,11 @@ impl RPCNeighborsInfo { peerdb_conn, network_id, network_epoch, - max_neighbor_age, + get_epoch_time_secs().saturating_sub(max_neighbor_age), MAX_NEIGHBORS_DATA_LEN, burnchain_view.burn_block_height, false, + true, ) .map_err(NetError::DBError)?; diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index f38c6c54d4..bb31146c81 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -222,7 +222,7 @@ impl ASEntry4 { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use std::io; use std::io::BufRead; diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index d6bdbb301e..37ed22a26b 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -374,7 +374,7 @@ impl AtlasDB { } // Open an atlas database in memory (used for testing) - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn connect_memory(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory().map_err(|e| db_error::SqliteError(e))?; let mut db = AtlasDB { @@ -387,7 +387,7 @@ impl AtlasDB { Ok(db) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] /// Only ever to be used in testing, open and instantiate a V1 atlasdb pub fn connect_memory_db_v1(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory()?; @@ -432,7 +432,7 @@ impl AtlasDB { Ok(db) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] /// Only ever to be used in testing, connect to db, but using existing sqlconn pub fn connect_with_sqlconn( atlas_config: AtlasConfig, diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index bd8154e414..4cb4099fb4 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -1600,7 +1600,7 @@ impl ProtocolFamily for StacksP2P { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { use stacks_common::bitvec::BitVec; use stacks_common::codec::NEIGHBOR_ADDRESS_ENCODED_SIZE; diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index aedb73bd62..c63d1b4fed 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -355,7 +355,7 @@ impl DNSClient { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use std::collections::HashMap; use std::error::Error; diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index c57d9d19bc..5c926c4192 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -56,22 +56,22 @@ use crate::net::{ }; use crate::util_lib::db::{DBConn, Error as db_error}; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const BLOCK_DOWNLOAD_INTERVAL: u64 = 180; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const BLOCK_DOWNLOAD_INTERVAL: u64 = 0; /// If a URL never connects, don't use it again for this many seconds -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const BLOCK_DOWNLOAD_BAN_URL: u64 = 300; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const BLOCK_DOWNLOAD_BAN_URL: u64 = 60; /// If we created a request to download a block or microblock, don't do so again until this many /// seconds have passed. -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const BLOCK_REREQUEST_INTERVAL: u64 = 60; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const BLOCK_REREQUEST_INTERVAL: u64 = 30; /// This module is responsible for downloading blocks and microblocks from other peers, using block diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 804add6f33..fc296b9f2b 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -547,14 +547,14 @@ impl StacksHttpRequest { (self.preamble, self.contents) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn try_serialize(&self) -> Result, NetError> { let mut ret = vec![]; self.send(&mut ret)?; Ok(ret) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_response_handler_index(&self) -> Option { self.response_handler_index } @@ -676,7 +676,7 @@ impl StacksHttpResponse { self.preamble.headers.clear(); } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn try_serialize(&self) -> Result, NetError> { let mut ret = vec![]; self.send(&mut ret)?; @@ -700,7 +700,7 @@ pub enum StacksHttpPreamble { } impl StacksHttpPreamble { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn expect_request(self) -> HttpRequestPreamble { match self { Self::Request(x) => x, @@ -708,7 +708,7 @@ impl StacksHttpPreamble { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn expect_response(self) -> HttpResponsePreamble { match self { Self::Response(x) => x, @@ -1004,7 +1004,7 @@ impl StacksHttp { } /// Force the state machine to expect a response - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_response_handler(&mut self, request_verb: &str, request_path: &str) { let handler_index = self .find_response_handler(request_verb, request_path) @@ -1016,7 +1016,7 @@ impl StacksHttp { } /// Try to parse an inbound HTTP request using a given handler, preamble, and body - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn handle_try_parse_request( &self, handler: &mut dyn RPCRequestHandler, @@ -1202,7 +1202,7 @@ impl StacksHttp { Ok((response_preamble, response_contents)) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn num_pending(&self) -> usize { self.reply.as_ref().map(|_| 1).unwrap_or(0) } @@ -1346,10 +1346,10 @@ impl StacksHttp { } /// Given a fully-formed single HTTP response, parse it (used by clients). - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn parse_response( - verb: &str, - request_path: &str, + _verb: &str, + _request_path: &str, response_buf: &[u8], ) -> Result { let mut http = StacksHttp::new( diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index fc5f073b2e..7068db7acc 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -47,9 +47,9 @@ use crate::net::{ use crate::util_lib::db::{DBConn, Error as db_error}; /// This module is responsible for synchronizing block inventories with other peers -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const INV_SYNC_INTERVAL: u64 = 150; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const INV_SYNC_INTERVAL: u64 = 3; pub const INV_REWARD_CYCLES: u64 = 2; @@ -1143,7 +1143,7 @@ impl InvState { self.block_stats.get_mut(nk) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn add_peer(&mut self, nk: NeighborKey, is_bootstrap_peer: bool) -> () { self.block_stats.insert( nk.clone(), @@ -2848,6 +2848,3 @@ impl PeerNetwork { work_state } } - -#[cfg(test)] -mod test {} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 7f8dea9329..8b36377a25 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -143,7 +143,7 @@ pub mod unsolicited; pub use crate::net::neighbors::{NeighborComms, PeerNetworkComms}; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBs}; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; #[derive(Debug)] @@ -571,7 +571,7 @@ impl From for Error { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] impl PartialEq for Error { /// (make I/O errors comparable for testing purposes) fn eq(&self, other: &Self) -> bool { @@ -1293,9 +1293,9 @@ pub const MAX_BROADCAST_INBOUND_RECEIVERS: usize = 16; pub const BLOCKS_AVAILABLE_MAX_LEN: u32 = 32; // maximum number of PoX reward cycles we can ask about -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const GETPOXINV_MAX_BITLEN: u64 = 4096; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const GETPOXINV_MAX_BITLEN: u64 = 8; // maximum number of Stacks epoch2.x blocks that can be pushed at once (even if the entire message is undersized). @@ -1455,9 +1455,9 @@ pub const MAX_MICROBLOCKS_UNCONFIRMED: usize = 1024; pub const MAX_HEADERS: usize = 2100; // how long a peer will be denied for if it misbehaves -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const DENY_BAN_DURATION: u64 = 30; // seconds -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; @@ -1719,8 +1719,9 @@ pub trait Requestable: std::fmt::Display { fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest; } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use std::collections::HashMap; use std::io::{Cursor, ErrorKind, Read, Write}; use std::net::*; @@ -3919,6 +3920,10 @@ pub mod test { self.network.peerdb.conn() } + pub fn peerdb_mut(&mut self) -> &mut PeerDB { + &mut self.network.peerdb + } + pub fn get_burnchain_view(&mut self) -> Result { let sortdb = self.sortdb.take().unwrap(); let view_res = { diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index bdda12e6d4..ed24bc1168 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -481,7 +481,7 @@ impl NetworkState { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test { use std::collections::HashSet; diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 2b735668ac..c06e495514 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -515,7 +515,7 @@ impl StackerDBs { Self::instantiate(path, readwrite) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn connect_memory() -> StackerDBs { Self::instantiate(":memory:", true).unwrap() } diff --git a/stackslib/src/net/stackerdb/tests/mod.rs b/stackslib/src/net/stackerdb/tests/mod.rs index 0838342100..17c73daa04 100644 --- a/stackslib/src/net/stackerdb/tests/mod.rs +++ b/stackslib/src/net/stackerdb/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod config; pub mod db; diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index f45e3acb93..b16b10291f 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -51,7 +51,6 @@ const NUM_NEIGHBORS: usize = 8; /// Some testable configurations for stacker DB configs impl StackerDBConfig { - #[cfg(test)] pub fn template() -> StackerDBConfig { StackerDBConfig { chunk_size: CHUNK_SIZE, diff --git a/stackslib/src/net/tests/download/mod.rs b/stackslib/src/net/tests/download/mod.rs index 430b92e414..5b191a1161 100644 --- a/stackslib/src/net/tests/download/mod.rs +++ b/stackslib/src/net/tests/download/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/net/tests/inv/mod.rs b/stackslib/src/net/tests/inv/mod.rs index 04e8e0fd4f..04386e2097 100644 --- a/stackslib/src/net/tests/inv/mod.rs +++ b/stackslib/src/net/tests/inv/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 7a44a56788..602f943cb3 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::cell::RefCell; use std::{thread, time}; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index d8ee197f42..47cec3b0b4 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod download; pub mod httpcore; diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs index c408e9ee60..d75bae21e8 100644 --- a/stackslib/src/net/tests/relay/mod.rs +++ b/stackslib/src/net/tests/relay/mod.rs @@ -12,6 +12,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod epoch2x; pub mod nakamoto; From f20061ff0e592db91da899a81bfa8fc2b46b6214 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:30:14 -0400 Subject: [PATCH 1104/1400] chore: add dev-dependencies that will allow test modules to compile for stackslib when it's a dependency (e.g. so stackslib test code can be used in stacks-node tests) --- stackslib/Cargo.toml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index b7967fe249..909e237502 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -59,6 +59,10 @@ siphasher = "0.3.7" wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } +rstest = { version = "0.17.0", optional = true } +rstest_reuse = { version = "0.5.0", optional = true } +stdext = { version = "0.3.1", optional = true } +stx-genesis = { path = "../stx-genesis", optional = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} @@ -95,7 +99,7 @@ features = ["std"] assert-json-diff = "1.0.0" criterion = "0.3.5" stdext = "0.3.1" -stx-genesis = { path = "../stx-genesis"} +stx-genesis = { path = "../stx-genesis" } clarity = { features = ["default", "testing"], path = "../clarity" } stacks-common = { features = ["default", "testing"], path = "../stacks-common" } rstest = "0.17.0" @@ -109,7 +113,7 @@ disable-costs = [] developer-mode = ["clarity/developer-mode"] monitoring_prom = ["prometheus"] slog_json = ["slog-json", "stacks-common/slog_json", "clarity/slog_json", "pox-locking/slog_json"] -testing = [] +testing = ["stdext", "rstest", "rstest_reuse", "stx-genesis"] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } From 2ee25e43a6f79473ab1750f105d59b4e275fa27a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:30:45 -0400 Subject: [PATCH 1105/1400] build: use `feature = "testing"` to build stackslib test code, and suppress `unused` warnings in tests --- stackslib/src/burnchains/bitcoin/indexer.rs | 10 ++-- stackslib/src/burnchains/bitcoin/spv.rs | 10 ++-- stackslib/src/burnchains/burnchain.rs | 2 +- stackslib/src/burnchains/mod.rs | 8 +-- stackslib/src/burnchains/tests/db.rs | 2 +- stackslib/src/burnchains/tests/mod.rs | 1 + stackslib/src/chainstate/burn/db/sortdb.rs | 4 +- stackslib/src/chainstate/burn/distribution.rs | 2 +- .../burn/operations/leader_block_commit.rs | 8 +-- .../burn/operations/leader_key_register.rs | 8 +-- .../src/chainstate/burn/operations/mod.rs | 6 +-- .../chainstate/burn/operations/stack_stx.rs | 4 +- .../burn/operations/transfer_stx.rs | 2 +- stackslib/src/chainstate/coordinator/mod.rs | 8 +-- stackslib/src/chainstate/coordinator/tests.rs | 1 + .../chainstate/nakamoto/coordinator/mod.rs | 4 +- .../chainstate/nakamoto/coordinator/tests.rs | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 6 +-- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- .../src/chainstate/nakamoto/tests/mod.rs | 1 + .../chainstate/stacks/boot/contract_tests.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 17 +++--- .../src/chainstate/stacks/boot/pox_2_tests.rs | 1 + .../src/chainstate/stacks/boot/pox_3_tests.rs | 1 + .../src/chainstate/stacks/boot/pox_4_tests.rs | 1 + .../chainstate/stacks/boot/signers_tests.rs | 1 + stackslib/src/chainstate/stacks/db/blocks.rs | 29 +++++----- stackslib/src/chainstate/stacks/db/mod.rs | 5 +- .../src/chainstate/stacks/db/transactions.rs | 1 + stackslib/src/chainstate/stacks/index/file.rs | 6 +-- stackslib/src/chainstate/stacks/index/marf.rs | 4 +- .../src/chainstate/stacks/index/storage.rs | 24 ++++----- .../src/chainstate/stacks/index/trie_sql.rs | 2 +- stackslib/src/chainstate/stacks/miner.rs | 10 ++-- stackslib/src/chainstate/stacks/mod.rs | 5 +- stackslib/src/chainstate/stacks/tests/mod.rs | 1 + stackslib/src/clarity_vm/clarity.rs | 12 ++--- stackslib/src/clarity_vm/database/marf.rs | 4 +- stackslib/src/clarity_vm/mod.rs | 2 +- stackslib/src/clarity_vm/tests/mod.rs | 1 + stackslib/src/core/mempool.rs | 20 +++---- stackslib/src/core/mod.rs | 54 +++++++++---------- stackslib/src/cost_estimates/mod.rs | 2 +- stackslib/src/cost_estimates/tests/common.rs | 1 - stackslib/src/cost_estimates/tests/mod.rs | 1 + stackslib/src/lib.rs | 4 +- stackslib/src/util_lib/bloom.rs | 2 +- stackslib/src/util_lib/boot.rs | 2 +- stackslib/src/util_lib/mod.rs | 2 +- 49 files changed, 162 insertions(+), 147 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 40cabd86d3..7c9083985b 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -167,7 +167,7 @@ impl BitcoinIndexerConfig { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_default(spv_headers_path: String) -> BitcoinIndexerConfig { BitcoinIndexerConfig { peer_host: "127.0.0.1".to_string(), @@ -203,7 +203,7 @@ impl BitcoinIndexerRuntime { } impl BitcoinIndexer { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new( config: BitcoinIndexerConfig, runtime: BitcoinIndexerRuntime, @@ -216,7 +216,7 @@ impl BitcoinIndexer { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_unit_test(working_dir: &str) -> BitcoinIndexer { let mut working_dir_path = PathBuf::from(working_dir); if fs::metadata(&working_dir_path).is_err() { @@ -861,7 +861,7 @@ impl BitcoinIndexer { Ok(new_tip) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn raw_store_header(&mut self, header: BurnchainBlockHeader) -> Result<(), btc_error> { let mut spv_client = SpvClient::new( &self.config.spv_headers_path, @@ -887,7 +887,7 @@ impl BitcoinIndexer { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn mock_bitcoin_header( parent_block_hash: &BurnchainHeaderHash, timestamp: u32, diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index 82cbb7b7f6..b2b886bdc4 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -182,7 +182,7 @@ impl SpvClient { Ok(client) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_without_migration( headers_path: &str, start_block: u64, @@ -211,7 +211,7 @@ impl SpvClient { Ok(client) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn disable_check_txcount(&mut self) { self.check_txcount = false; } @@ -220,7 +220,7 @@ impl SpvClient { &self.headers_db } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn conn_mut(&mut self) -> &mut DBConn { &mut self.headers_db } @@ -277,7 +277,7 @@ impl SpvClient { .and_then(|_| Ok(())) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_db_migrate(conn: &mut DBConn) -> Result<(), btc_error> { SpvClient::db_migrate(conn) } @@ -925,7 +925,7 @@ impl SpvClient { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_write_block_headers( &mut self, height: u64, diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index a5ecaa0458..60f663c0de 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -623,7 +623,7 @@ impl Burnchain { ret } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn default_unittest( first_block_height: u64, first_block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 0bc68897cb..2720d48e8c 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -57,7 +57,7 @@ pub mod burnchain; pub mod db; pub mod indexer; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; pub struct Txid(pub [u8; 32]); @@ -351,7 +351,7 @@ impl PoxConstants { _shadow: PhantomData, } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots PoxConstants::new( @@ -369,7 +369,7 @@ impl PoxConstants { ) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] /// Create a PoX constants used in tests with 5-block cycles, /// 3-block prepare phases, a threshold of 3, rejection fraction of 25%, /// a participation threshold of 5% and no sunset or transition to pox-2 or beyond. @@ -821,7 +821,7 @@ impl From for Error { } impl BurnchainView { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn make_test_data(&mut self) { let oldest_height = if self.burn_stable_block_height < MAX_NEIGHBOR_BLOCK_DELAY { 0 diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index f14243d049..8b69449d74 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -49,7 +49,7 @@ impl BurnchainDB { /// Get back all of the parsed burnchain operations for a given block. /// Used in testing to replay burnchain data. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_burnchain_block_ops( &self, block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 31e29c0b26..f1bc0613af 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod affirmation; pub mod burnchain; diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 942e6774bd..3964732329 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -5482,7 +5482,7 @@ impl<'a> SortitionHandleTx<'a> { sn.canonical_stacks_tip_height, )?; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { let (block_consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self).unwrap(); @@ -6566,7 +6566,7 @@ impl ChainstateDB for SortitionDB { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests { use std::sync::mpsc::sync_channel; use std::thread; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index ed01ae014b..d91f158c27 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -424,7 +424,7 @@ impl BurnSamplePoint { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod tests { use std::marker::PhantomData; diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index cea03d4435..a1e5ee500a 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -90,7 +90,7 @@ pub static OUTPUTS_PER_COMMIT: usize = 2; pub static BURN_BLOCK_MINED_AT_MODULUS: u64 = 5; impl LeaderBlockCommitOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn initial( block_header_hash: &BlockHeaderHash, block_height: u64, @@ -131,10 +131,10 @@ impl LeaderBlockCommitOp { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new( block_header_hash: &BlockHeaderHash, - block_height: u64, + _block_height: u64, new_seed: &VRFSeed, parent: &LeaderBlockCommitOp, key_block_ptr: u32, @@ -170,7 +170,7 @@ impl LeaderBlockCommitOp { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_burn_height(&mut self, height: u64) { self.block_height = height; let new_burn_parent_modulus = if height > 0 { diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 44402adc0c..87b13d8f50 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -44,7 +44,7 @@ pub struct ParsedData { } impl LeaderKeyRegisterOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new(public_key: &VRFPublicKey) -> LeaderKeyRegisterOp { LeaderKeyRegisterOp { public_key: public_key.clone(), @@ -59,10 +59,10 @@ impl LeaderKeyRegisterOp { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_from_secrets( - num_sigs: u16, - hash_mode: &AddressHashMode, + _num_sigs: u16, + _hash_mode: &AddressHashMode, prover_key: &VRFPrivateKey, ) -> Option { let prover_pubk = VRFPublicKey::from_private(prover_key); diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 0843e03b1e..fd0d63ef59 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -47,7 +47,7 @@ pub mod stack_stx; pub mod transfer_stx; pub mod vote_for_aggregate_key; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test; /// This module contains all burn-chain operations @@ -439,7 +439,7 @@ impl BlockstackOperationType { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_block_height(&mut self, height: u64) { match self { BlockstackOperationType::LeaderKeyRegister(ref mut data) => data.block_height = height, @@ -456,7 +456,7 @@ impl BlockstackOperationType { }; } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_burn_header_hash(&mut self, hash: BurnchainHeaderHash) { match self { BlockstackOperationType::LeaderKeyRegister(ref mut data) => { diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index c4c54b9737..52e4d6bf3b 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -57,7 +57,7 @@ struct ParsedData { pub static OUTPUTS_PER_COMMIT: usize = 2; impl PreStxOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new(sender: &StacksAddress) -> PreStxOp { PreStxOp { output: sender.clone(), @@ -155,7 +155,7 @@ impl PreStxOp { } impl StackStxOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new( sender: &StacksAddress, reward_addr: &PoxAddress, diff --git a/stackslib/src/chainstate/burn/operations/transfer_stx.rs b/stackslib/src/chainstate/burn/operations/transfer_stx.rs index 9d1d562d9c..a70075ff7c 100644 --- a/stackslib/src/chainstate/burn/operations/transfer_stx.rs +++ b/stackslib/src/chainstate/burn/operations/transfer_stx.rs @@ -46,7 +46,7 @@ struct ParsedData { } impl TransferStxOp { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new( sender: &StacksAddress, recipient: &StacksAddress, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 2849b74904..666fd97657 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -82,7 +82,7 @@ use crate::net::atlas::{AtlasConfig, AtlasDB, AttachmentInstance}; use crate::util_lib::db::{DBConn, DBTx, Error as DBError}; pub mod comm; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; /// The 3 different states for the current @@ -110,7 +110,7 @@ impl NewBurnchainBlockStatus { /// Test helper to convert this status into the optional hash of the missing PoX anchor block. /// Because there are unit tests that expect a Some(..) result if PoX cannot proceed, the /// missing Nakamoto anchor block case is converted into a placeholder Some(..) value - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn into_missing_block_hash(self) -> Option { match self { Self::Ready => None, @@ -624,7 +624,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader ChainsCoordinator<'a, T, (), U, (), (), B> { /// Create a coordinator for testing, with some parameters defaulted to None - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_new( burnchain: &Burnchain, chain_id: u32, @@ -644,7 +644,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader } /// Create a coordinator for testing allowing for all configurable params - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_new_full( burnchain: &Burnchain, chain_id: u32, diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 50127af176..73b4349c2b 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::cmp; use std::collections::{BTreeMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index d4ef14ba9a..aa8ac21891 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -58,7 +58,7 @@ use crate::monitoring::increment_stx_blocks_processed_counter; use crate::net::Error as NetError; use crate::util_lib::db::Error as DBError; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; macro_rules! err_or_debug { @@ -506,7 +506,7 @@ pub fn load_nakamoto_reward_set( Err(e) => return Some(Err(e)), Ok(None) => { // no header for this snapshot (possibly invalid) - info!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); + debug!("Failed to find Stacks block by consensus hash"; "consensus_hash" => %sn.consensus_hash); return None } } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index ddeea51573..5372853a8d 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::collections::{HashMap, HashSet}; use std::sync::Mutex; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index cb515a860c..d25c106d1c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -122,7 +122,7 @@ pub mod signer_set; pub mod staging_blocks; pub mod tenure; pub mod test_signers; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; pub use self::staging_blocks::{ @@ -270,7 +270,7 @@ lazy_static! { ]; } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod test_stall { pub static TEST_PROCESS_BLOCK_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); @@ -1757,7 +1757,7 @@ impl NakamotoChainState { canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] test_stall::stall_block_processing(); let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 81380cc93d..4f09fd1f57 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -484,7 +484,7 @@ impl NakamotoChainState { /// Drop a nakamoto tenure. /// Used for testing - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub(crate) fn delete_nakamoto_tenure( tx: &Connection, ch: &ConsensusHash, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 722cfa541a..fa02a34a09 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::borrow::BorrowMut; use std::collections::HashMap; diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 04b74ba2e9..650617ab49 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -361,7 +361,7 @@ impl BurnStateDB for TestSimBurnStateDB { panic!("Not implemented in TestSim"); } - fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { + fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { panic!("Not implemented in TestSim"); } @@ -525,7 +525,7 @@ impl BurnStateDB for TestSimBurnStateDB { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] impl HeadersDB for TestSimHeadersDB { fn get_burn_header_hash_for_block( &self, diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 88ecc8887e..77a24b938f 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -626,7 +626,7 @@ impl StacksChainState { /// Determine the minimum amount of STX per reward address required to stack in the _next_ /// reward cycle - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_stacking_minimum( &mut self, sortdb: &SortitionDB, @@ -688,7 +688,7 @@ impl StacksChainState { } /// Determine how many uSTX are stacked in a given reward cycle - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_get_total_ustx_stacked( &mut self, sortdb: &SortitionDB, @@ -1379,19 +1379,20 @@ impl StacksChainState { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod contract_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod pox_2_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod pox_3_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod pox_4_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod signers_tests; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use std::collections::{HashMap, HashSet}; use std::fs; diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 7ae25d00f6..2ac7d0e6f5 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 3134b4773a..b34b7eb6c7 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 0968cc4de3..affb4bcf7b 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index bf3b5f312c..ba1a97556e 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use clarity::vm::clarity::ClarityConnection; use clarity::vm::contexts::OwnedEnvironment; diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 47cace8c4b..8df8983301 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -415,7 +415,7 @@ impl FromRow for StagingBlock { } impl StagingMicroblock { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn try_into_microblock(self) -> Result { StacksMicroblock::consensus_deserialize(&mut &self.block_data[..]).map_err(|_e| self) } @@ -660,7 +660,7 @@ impl StacksChainState { } /// Store an empty block to the chunk store, named by its hash. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn store_empty_block( blocks_path: &str, consensus_hash: &ConsensusHash, @@ -760,10 +760,10 @@ impl StacksChainState { } /// Get a list of all microblocks' hashes, and their anchored blocks' hashes - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn list_microblocks( blocks_conn: &DBConn, - blocks_dir: &str, + _blocks_dir: &str, ) -> Result)>, Error> { let mut blocks = StacksChainState::list_blocks(blocks_conn)?; let mut ret = vec![]; @@ -1025,7 +1025,7 @@ impl StacksChainState { .map_err(|e| Error::DBError(db_error::from(e))) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn load_staging_block_data( block_conn: &DBConn, blocks_path: &str, @@ -1493,7 +1493,7 @@ impl StacksChainState { /// Get an anchored block's parent block header. /// Doesn't matter if it's staging or not. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn load_parent_block_header( sort_ic: &SortitionDBConn, blocks_path: &str, @@ -2500,7 +2500,7 @@ impl StacksChainState { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn set_block_orphaned<'a>( tx: &mut DBTx<'a>, blocks_path: &str, @@ -2522,7 +2522,7 @@ impl StacksChainState { // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; - let orphaned_microblock_hashes = query_row_columns::( + let _orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, find_orphaned_microblocks_args, @@ -2801,7 +2801,7 @@ impl StacksChainState { /// Do we have any microblock available to serve in any capacity, given its parent anchored block's /// index block hash? - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn has_microblocks_indexed( &self, parent_index_block_hash: &StacksBlockId, @@ -2867,7 +2867,7 @@ impl StacksChainState { /// Get the sqlite rowid for a staging microblock, given the hash of the microblock. /// Returns None if no such microblock. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn stream_microblock_get_rowid( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -2883,7 +2883,7 @@ impl StacksChainState { /// Load up the metadata on a microblock stream (but don't get the data itself) /// DO NOT USE IN PRODUCTION -- doesn't work for microblock forks. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn stream_microblock_get_info( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -3576,7 +3576,7 @@ impl StacksChainState { /// Given a burnchain snapshot, a Stacks block and a microblock stream, preprocess them all. /// This does not work when forking - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn preprocess_stacks_epoch( &mut self, sort_ic: &SortitionDBConn, @@ -6438,7 +6438,7 @@ impl StacksChainState { /// PoX aware (i.e., unit tests, and old stacks-node loops), /// Elsewhere, block processing is invoked by the ChainsCoordinator, /// which handles tracking the chain tip itself - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn process_blocks_at_tip( &mut self, burnchain_db_conn: &DBConn, @@ -6936,8 +6936,9 @@ impl StacksChainState { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use std::fs; use clarity::vm::ast::ASTRules; diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index a942ec7fd1..81e954a090 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -543,7 +543,7 @@ impl<'a, 'b> ClarityTx<'a, 'b> { self.block.seal() } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn commit_block(self) -> () { self.block.commit_block(); } @@ -2713,8 +2713,9 @@ impl StacksChainState { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use std::{env, fs}; use clarity::vm::test_util::TEST_BURN_STATE_DB; diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 35ba532667..99e92aac32 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1571,6 +1571,7 @@ impl StacksChainState { #[cfg(test)] pub mod test { + #![allow(unused)] use clarity::vm::clarity::TransactionConnection; use clarity::vm::contracts::Contract; use clarity::vm::representations::{ClarityName, ContractName}; diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 4123b1310a..53df16b761 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -168,7 +168,7 @@ impl TrieFile { } /// Read a trie blob in its entirety from the blobs file - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn read_trie_blob(&mut self, db: &Connection, block_id: u32) -> Result, Error> { let (offset, length) = trie_sql::get_external_trie_offset_length(db, block_id)?; self.seek(SeekFrom::Start(offset))?; @@ -410,7 +410,7 @@ impl TrieFile { } /// Obtain a TrieHash for a node, given the node's block's hash (used only in testing) - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_node_hash_bytes_by_bhh( &mut self, db: &Connection, @@ -424,7 +424,7 @@ impl TrieFile { } /// Get all (root hash, trie hash) pairs for this TrieFile - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn read_all_block_hashes_and_roots( &mut self, db: &Connection, diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index d5dd77c51f..73d387c07b 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1494,12 +1494,12 @@ impl MARF { } /// Access internal storage - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn borrow_storage_backend(&mut self) -> TrieStorageConnection { self.storage.connection() } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn borrow_storage_transaction(&mut self) -> TrieStorageTransaction { self.storage.transaction().unwrap() } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 6994c7ad05..9397145fcb 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -364,7 +364,7 @@ impl UncommittedState { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn print_to_stderr(&self) { self.trie_ram_ref().print_to_stderr() } @@ -535,7 +535,7 @@ impl TrieRAM { result } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] #[allow(dead_code)] pub fn stats(&mut self) -> (u64, u64) { let r = self.read_count; @@ -545,7 +545,7 @@ impl TrieRAM { (r, w) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] #[allow(dead_code)] pub fn node_stats(&mut self) -> (u64, u64, u64) { let nr = self.read_node_count; @@ -559,7 +559,7 @@ impl TrieRAM { (nr, br, nw) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] #[allow(dead_code)] pub fn leaf_stats(&mut self) -> (u64, u64) { let lr = self.read_leaf_count; @@ -677,7 +677,7 @@ impl TrieRAM { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_inner_seal( &mut self, storage_tx: &mut TrieStorageTransaction, @@ -1113,14 +1113,14 @@ impl TrieRAM { Ok(self.data.len() as u32) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn print_to_stderr(&self) { for dat in self.data.iter() { eprintln!("{}: {:?}", &dat.1, &dat.0); } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn data(&self) -> &Vec<(TrieNodeType, TrieHash)> { &self.data } @@ -2035,7 +2035,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } /// Read the Trie root node's hash from the block table. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn read_block_root_hash(&mut self, bhh: &T) -> Result { let root_hash_ptr = TriePtr::new( TrieNodeID::Node256 as u8, @@ -2051,7 +2051,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn inner_read_persisted_root_to_blocks(&mut self) -> Result, Error> { let ret = match self.blobs.as_mut() { Some(blobs) => { @@ -2065,7 +2065,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } /// Generate a mapping between Trie root hashes and the blocks that contain them - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn read_root_to_block_table(&mut self) -> Result, Error> { let mut ret = self.inner_read_persisted_root_to_blocks()?; let uncommitted_writes = match self.data.uncommitted_writes.take() { @@ -2738,12 +2738,12 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { self.bench.reset(); } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn transient_data(&self) -> &TrieStorageTransientData { &self.data } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn transient_data_mut(&mut self) -> &mut TrieStorageTransientData { &mut self.data } diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index c9d3b40dce..1d54cce0d0 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -422,7 +422,7 @@ pub fn open_trie_blob_readonly<'a>(conn: &'a Connection, block_id: u32) -> Resul Ok(blob) } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn read_all_block_hashes_and_roots( conn: &Connection, ) -> Result, Error> { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 0195385d3b..cb4709d123 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1603,7 +1603,7 @@ impl StacksBlockBuilder { /// Append a transaction if doing so won't exceed the epoch data size. /// Does not check for errors - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn force_mine_tx( &mut self, clarity_tx: &mut ClarityTx, @@ -1626,7 +1626,7 @@ impl StacksBlockBuilder { if !self.anchored_done { // save match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, receipt)) => { + Ok((fee, _receipt)) => { self.total_anchored_fees += fee; } Err(e) => { @@ -1637,7 +1637,7 @@ impl StacksBlockBuilder { self.txs.push(tx.clone()); } else { match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, receipt)) => { + Ok((fee, _receipt)) => { self.total_streamed_fees += fee; } Err(e) => { @@ -2003,7 +2003,7 @@ impl StacksBlockBuilder { } /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn make_anchored_block_from_txs( builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, @@ -2022,7 +2022,7 @@ impl StacksBlockBuilder { /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn make_anchored_block_and_microblock_from_txs( mut builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 35c82f9b94..ed9cf98e84 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -69,7 +69,7 @@ pub mod index; pub mod miner; pub mod transaction; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; pub use stacks_common::address::{ @@ -1131,8 +1131,9 @@ pub const MAX_EPOCH_SIZE: u32 = 2 * 1024 * 1024; // $MAX_EPOCH_SIZE bytes (so the average microblock size needs to be 4kb if there are 256 of them) pub const MAX_MICROBLOCK_SIZE: u32 = 65536; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { + #![allow(unused)] use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index cda74cb46d..87601268c0 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] use std::cell::RefCell; use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index c89679f414..eb38daf68e 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -176,7 +176,7 @@ macro_rules! using { } impl<'a, 'b> ClarityBlockConnection<'a, 'b> { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn new_test_conn( datastore: WritableMarfStore<'a>, header_db: &'b dyn HeadersDB, @@ -731,7 +731,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { /// Commits all changes in the current block by /// (1) committing the current MARF tip to storage, /// (2) committing side-storage. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn commit_block(self) -> LimitedCostTracker { debug!("Commit Clarity datastore"); self.datastore.test_commit(); @@ -1591,7 +1591,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { self.datastore } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn set_epoch(&mut self, epoch_id: StacksEpochId) { self.epoch = epoch_id; } @@ -1856,7 +1856,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { } /// Evaluate a raw Clarity snippit - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn clarity_eval_raw(&mut self, code: &str) -> Result { let (result, _, _, _) = self.with_abort_callback( |vm_env| vm_env.eval_raw(code).map_err(Error::from), @@ -1865,7 +1865,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { Ok(result) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn eval_read_only( &mut self, contract: &QualifiedContractIdentifier, @@ -1879,7 +1879,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod tests { use std::fs; diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index fed0e70e95..eaec528c17 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -257,7 +257,7 @@ impl MarfedKV { &mut self.marf } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn sql_conn(&self) -> &Connection { self.marf.sqlite_conn() } @@ -526,7 +526,7 @@ impl<'a> WritableMarfStore<'a> { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn test_commit(self) { let bhh = self.chain_tip.clone(); self.commit_to(&bhh).unwrap(); diff --git a/stackslib/src/clarity_vm/mod.rs b/stackslib/src/clarity_vm/mod.rs index a3e6d23b8c..4e1688da11 100644 --- a/stackslib/src/clarity_vm/mod.rs +++ b/stackslib/src/clarity_vm/mod.rs @@ -6,5 +6,5 @@ pub mod special; /// Stacks blockchain specific Clarity database implementations and wrappers pub mod database; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod tests; diff --git a/stackslib/src/clarity_vm/tests/mod.rs b/stackslib/src/clarity_vm/tests/mod.rs index 5855d61f31..1cc597b3d1 100644 --- a/stackslib/src/clarity_vm/tests/mod.rs +++ b/stackslib/src/clarity_vm/tests/mod.rs @@ -13,6 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![allow(unused)] pub mod analysis_costs; pub mod ast; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index fe75d62bd2..73d1fc1c94 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1025,7 +1025,7 @@ impl NonceCache { where C: ClarityConnection, { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] assert!(self.cache.len() <= self.max_cache_size); // Check in-memory cache @@ -1111,7 +1111,7 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d query_row(conn, sql, params![addr_str]) } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; let mut stmt = conn.prepare(&sql).map_err(|e| db_error::SqliteError(e))?; @@ -1162,7 +1162,7 @@ impl CandidateCache { self.next.push_back(tx); } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] assert!(self.cache.len() + self.next.len() <= self.max_cache_size); } @@ -1177,7 +1177,7 @@ impl CandidateCache { self.next.append(&mut self.cache); self.cache = std::mem::take(&mut self.next); - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] { assert!(self.cache.len() <= self.max_cache_size + 1); assert!(self.next.len() <= self.max_cache_size + 1); @@ -1365,7 +1365,7 @@ impl MemPoolDB { .map(String::from) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn open_test( mainnet: bool, chain_id: u32, @@ -1934,7 +1934,7 @@ impl MemPoolDB { } /// Get all transactions across all tips - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_all_txs(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM mempool"; let rows = query_rows::(conn, &sql, NO_PARAMS)?; @@ -1942,7 +1942,7 @@ impl MemPoolDB { } /// Get all transactions at a specific block - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_num_tx_at_block( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -1955,7 +1955,7 @@ impl MemPoolDB { } /// Get a number of transactions after a given timestamp on a given chain tip. - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn get_txs_after( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -2283,7 +2283,7 @@ impl MemPoolDB { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn clear_before_coinbase_height( &mut self, min_coinbase_height: u64, @@ -2666,7 +2666,7 @@ impl MemPoolDB { Ok(()) } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] pub fn dump_txs(&self) { let sql = "SELECT * FROM mempool"; let txs: Vec = query_rows(&self.db, sql, NO_PARAMS).unwrap(); diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index ade8a82589..9a3d67e752 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -30,7 +30,7 @@ use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; use std::cmp::Ordering; @@ -68,9 +68,9 @@ pub const GENESIS_EPOCH: StacksEpochId = StacksEpochId::Epoch20; /// The number of blocks which will share the block bonus /// from burn blocks that occurred without a sortition. /// (See: https://forum.stacks.org/t/pox-consensus-and-stx-future-supply) -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10; -#[cfg(not(test))] +#[cfg(not(any(test, feature = "testing")))] pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10_000; pub const STACKS_2_0_LAST_BLOCK_TO_PROCESS: u64 = 700_000; @@ -557,29 +557,29 @@ fn test_ord_for_stacks_epoch_id() { ); } pub trait StacksEpochExtension { - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test(stacks_epoch_id: StacksEpochId, epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_05(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_05_only(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_pre_2_05(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_1(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_2(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_3(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_5(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_3_0(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, @@ -615,7 +615,7 @@ impl StacksEpochExtension for StacksEpoch { } } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_pre_2_05(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -640,7 +640,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_05(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -678,7 +678,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_05_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -716,7 +716,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_1(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -767,7 +767,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_2(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -831,7 +831,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_3(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_3 first_burn_height = {}", @@ -908,7 +908,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_4(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_4 first_burn_height = {}", @@ -998,7 +998,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_5(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_5 first_burn_height = {}", @@ -1101,7 +1101,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_3_0(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_3_0 first_burn_height = {}", @@ -1217,7 +1217,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -1268,7 +1268,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -1342,7 +1342,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(test)] + #[cfg(any(test, feature = "testing"))] fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> Vec { match stacks_epoch_id { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 => { diff --git a/stackslib/src/cost_estimates/mod.rs b/stackslib/src/cost_estimates/mod.rs index fc4aa5b1b2..0992aa180a 100644 --- a/stackslib/src/cost_estimates/mod.rs +++ b/stackslib/src/cost_estimates/mod.rs @@ -20,7 +20,7 @@ pub mod fee_scalar; pub mod metrics; pub mod pessimistic; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod tests; use self::metrics::CostMetric; diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index fe6527ff53..9ecfee2774 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -15,7 +15,6 @@ use crate::chainstate::stacks::{ use crate::core::StacksEpochId; /// Make a block receipt from `tx_receipts` with some dummy values filled for test. -#[cfg(test)] pub fn make_block_receipt(tx_receipts: Vec) -> StacksEpochReceipt { StacksEpochReceipt { header: StacksHeaderInfo { diff --git a/stackslib/src/cost_estimates/tests/mod.rs b/stackslib/src/cost_estimates/tests/mod.rs index 792ecb778e..e9292447bf 100644 --- a/stackslib/src/cost_estimates/tests/mod.rs +++ b/stackslib/src/cost_estimates/tests/mod.rs @@ -1,3 +1,4 @@ +#![allow(unused)] use crate::cost_estimates::FeeRateEstimate; pub mod common; diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 31f97628a6..8a6919412a 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -27,11 +27,11 @@ extern crate slog; #[macro_use] extern crate serde_derive; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] #[macro_use] extern crate rstest; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] #[macro_use] extern crate rstest_reuse; diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index d1632f0b14..d37802150f 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -592,7 +592,7 @@ impl BloomHash for BloomNodeHasher { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { use std::fs; diff --git a/stackslib/src/util_lib/boot.rs b/stackslib/src/util_lib/boot.rs index 95cfca9c41..2585fe1b75 100644 --- a/stackslib/src/util_lib/boot.rs +++ b/stackslib/src/util_lib/boot.rs @@ -43,7 +43,7 @@ pub fn boot_code_acc(boot_code_address: StacksAddress, boot_code_nonce: u64) -> } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn boot_code_test_addr() -> StacksAddress { boot_code_addr(false) } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 83a7ab2a25..44a2772c00 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -5,7 +5,7 @@ pub mod boot; pub mod signed_structured_data; pub mod strings; -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub mod test { use std::sync::mpsc::sync_channel; use std::{panic, process, thread}; From 6ed83d00f568ece3e21c98d18e2b5ddb78675578 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:31:17 -0400 Subject: [PATCH 1106/1400] build: plumb through features --- stx-genesis/Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stx-genesis/Cargo.toml b/stx-genesis/Cargo.toml index 39e97465ce..6914ca14a5 100644 --- a/stx-genesis/Cargo.toml +++ b/stx-genesis/Cargo.toml @@ -15,3 +15,6 @@ path = "src/lib.rs" [build-dependencies] libflate = "1.0.3" sha2 = { version = "0.10" } + +[features] +testing = [] From b721cf6bfea2b34eea2252b822e8f375ce1ff1d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:31:37 -0400 Subject: [PATCH 1107/1400] build: buidl stackslib and deps with "testing" feature so we can use stackslib test code in integration tests --- testnet/stacks-node/Cargo.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 19165db0a8..fb05aa0355 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -43,15 +43,20 @@ warp = "0.3.5" tokio = "1.15" reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } clarity = { path = "../../clarity", features = ["default", "testing"]} +rstest = "0.17.0" +rstest_reuse = "0.5.0" stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../../stackslib", features = ["default", "testing"] } stacks-signer = { path = "../../stacks-signer", features = ["testing"] } +stx-genesis = { path = "../../stx-genesis", features = ["testing"] } +stdext = "0.3.1" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" +rlimit = "0.10.2" [[bin]] name = "stacks-node" From 6324422580a7d427f9bfdb2ee4353c24b30a69fe Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:32:00 -0400 Subject: [PATCH 1108/1400] chore: expose walk_seed_probability and log_neighbors_freq connection opts --- testnet/stacks-node/src/config.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f1c3775056..81159486b4 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -558,6 +558,8 @@ lazy_static! { max_http_clients: 1000, // maximum number of HTTP connections max_neighbors_of_neighbor: 10, // maximum number of neighbors we'll handshake with when doing a neighbor walk (I/O for this can be expensive, so keep small-ish) walk_interval: 60, // how often, in seconds, we do a neighbor walk + walk_seed_probability: 0.1, // 10% of the time when not in IBD, walk to a non-seed node even if we aren't connected to a seed node + log_neighbors_freq: 60_000, // every minute, log all peer connections inv_sync_interval: 45, // how often, in seconds, we refresh block inventories inv_reward_cycles: 3, // how many reward cycles to look back on, for mainnet download_interval: 10, // how often, in seconds, we do a block download scan (should be less than inv_sync_interval) @@ -2424,6 +2426,8 @@ pub struct ConnectionOptionsFile { pub soft_max_clients_per_host: Option, pub max_sockets: Option, pub walk_interval: Option, + pub walk_seed_probability: Option, + pub log_neighbors_freq: Option, pub dns_timeout: Option, pub max_inflight_blocks: Option, pub max_inflight_attachments: Option, @@ -2528,6 +2532,12 @@ impl ConnectionOptionsFile { walk_interval: self .walk_interval .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval.clone()), + walk_seed_probability: self + .walk_seed_probability + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_seed_probability), + log_neighbors_freq: self + .log_neighbors_freq + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.log_neighbors_freq), dns_timeout: self .dns_timeout .map(|dns_timeout| dns_timeout as u128) From f7c2c2a0be80c4be5c1a4ca0a22595310db236b3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:32:29 -0400 Subject: [PATCH 1109/1400] chore: move topology neighbor convergence tests to integration test CI --- testnet/stacks-node/src/tests/mod.rs | 1 + .../stacks-node/src/tests/p2p/convergence.rs | 1136 +++++++++++++++++ testnet/stacks-node/src/tests/p2p/mod.rs | 18 + 3 files changed, 1155 insertions(+) create mode 100644 testnet/stacks-node/src/tests/p2p/convergence.rs create mode 100644 testnet/stacks-node/src/tests/p2p/mod.rs diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a7892b9a2d..0ac8e151a9 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -62,6 +62,7 @@ mod integrations; mod mempool; pub mod nakamoto_integrations; pub mod neon_integrations; +pub mod p2p; mod signer; mod stackerdb; diff --git a/testnet/stacks-node/src/tests/p2p/convergence.rs b/testnet/stacks-node/src/tests/p2p/convergence.rs new file mode 100644 index 0000000000..8c273e43ce --- /dev/null +++ b/testnet/stacks-node/src/tests/p2p/convergence.rs @@ -0,0 +1,1136 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// You are going to need `ulimit -n` to be 4096 for these tests. +/// In Linux, the default is 1024. +use std::collections::{HashMap, HashSet}; + +use clarity::vm::types::{QualifiedContractIdentifier, StandardPrincipalData}; +use rand::prelude::*; +use rand::thread_rng; +use rlimit; +use stacks::core::PEER_VERSION_TESTNET; +use stacks::net::db::*; +use stacks::net::test::*; +use stacks::net::*; +use stacks::util_lib::test::*; + +fn setup_rlimit_nofiles() { + info!("Attempt to set nofile rlimit to 4096 (required for these tests to run)"); + assert!(rlimit::Resource::NOFILE.get().is_ok()); + let (slimit, hlimit) = rlimit::getrlimit(rlimit::Resource::NOFILE).unwrap(); + rlimit::setrlimit(rlimit::Resource::NOFILE, 4096.max(slimit), hlimit).unwrap(); + info!("Successfully set nofile rlimit to 4096"); +} + +fn stacker_db_id(i: usize) -> QualifiedContractIdentifier { + QualifiedContractIdentifier::new( + StandardPrincipalData(0x01, [i as u8; 20]), + format!("db-{}", i).as_str().into(), + ) +} + +fn make_stacker_db_ids(i: usize) -> Vec { + let mut dbs = vec![]; + for j in 0..i { + dbs.push(stacker_db_id(j)); + } + dbs +} + +fn setup_peer_config( + i: usize, + port_base: u16, + neighbor_count: usize, + peer_count: usize, +) -> TestPeerConfig { + let mut conf = TestPeerConfig::from_port(port_base + (2 * i as u16)); + conf.connection_opts.num_neighbors = neighbor_count as u64; + conf.connection_opts.soft_num_neighbors = neighbor_count as u64; + + conf.connection_opts.num_clients = 256; + conf.connection_opts.soft_num_clients = 128; + + conf.connection_opts.max_http_clients = 1000; + conf.connection_opts.max_neighbors_of_neighbor = neighbor_count as u64; + + conf.connection_opts.max_clients_per_host = MAX_NEIGHBORS_DATA_LEN as u64; + conf.connection_opts.soft_max_clients_per_host = peer_count as u64; + + conf.connection_opts.max_neighbors_per_host = MAX_NEIGHBORS_DATA_LEN as u64; + conf.connection_opts.soft_max_neighbors_per_host = (neighbor_count / 2) as u64; + conf.connection_opts.soft_max_neighbors_per_org = (neighbor_count / 2) as u64; + + conf.connection_opts.walk_interval = 0; + + conf.connection_opts.disable_inv_sync = true; + conf.connection_opts.disable_block_download = true; + + let j = i as u32; + conf.burnchain.peer_version = PEER_VERSION_TESTNET | (j << 16) | (j << 8) | j; // different non-major versions for each peer + + // even-number peers support stacker DBs. + // odd-number peers do not + if i % 2 == 0 { + conf.services = (ServiceFlags::RELAY as u16) + | (ServiceFlags::RPC as u16) + | (ServiceFlags::STACKERDB as u16); + conf.stacker_dbs = make_stacker_db_ids(i); + } else { + conf.services = (ServiceFlags::RELAY as u16) | (ServiceFlags::RPC as u16); + conf.stacker_dbs = vec![]; + } + + conf +} + +/// Arrange 15 peers into a ring topology, and verify that each peer learns of each other peer over +/// time. Peers are always allowed, so always peered with. +#[test] +#[ignore] +fn test_walk_ring_allow_15() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // all initial peers are allowed + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 32800, neighbor_count, peer_count); + + conf.allowed = -1; // always allowed + conf.denied = 0; + + conf.connection_opts.timeout = 100000; + conf.connection_opts.handshake_timeout = 100000; + conf.connection_opts.disable_natpunch = true; // breaks allow checks + + peer_configs.push(conf); + } + + test_walk_ring(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a ring topology, and verify that each peer learns of each other peer over +/// time. No peer is always-allowed, and all walks are allowed. +#[test] +#[ignore] +fn test_walk_ring_15_plain() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // initial peers are neither white- nor denied + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 32900, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + + peer_configs.push(conf); + } + + test_walk_ring(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a ring topology, and verify that each peer learns of each other peer over +/// time. No inbound walks, but pingback walks are allowed. +#[test] +#[ignore] +fn test_walk_ring_15_pingback() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // initial peers are neither white- nor denied + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 32950, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + conf.connection_opts.disable_pingbacks = false; + conf.connection_opts.disable_inbound_walks = true; + + peer_configs.push(conf); + } + + test_walk_ring_pingback(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a ring topology, and verify that each peer learns of each other peer over +/// time. Puts one peer in a different AS to bias the neighbor walk towards it. +#[test] +#[ignore] +fn test_walk_ring_15_org_biased() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // one outlier peer has a different org than the others. + use std::env; + + // ::33000 is in AS 1 + env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33000", "1"); + + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33000, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + if i == 0 { + conf.asn = 1; + conf.org = 1; + } else { + conf.asn = 0; + conf.org = 0; + } + + peer_configs.push(conf); + } + + // all peers see peer ::33000 as having ASN and Org ID 1 + let peer_0 = peer_configs[0].to_neighbor(); + + let peers = test_walk_ring(&mut peer_configs); + + for i in 1..peer_count { + match PeerDB::get_peer( + peers[i].network.peerdb.conn(), + peer_0.addr.network_id, + &peer_0.addr.addrbytes, + peer_0.addr.port, + ) + .unwrap() + { + Some(p) => { + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); + } + None => {} + } + } + + // no peer pruned peer ::33000 + for i in 1..peer_count { + match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { + None => {} + Some(count) => { + assert_eq!(*count, 0); + } + } + } + }) +} + +fn test_walk_ring_ex(peer_configs: &mut Vec, test_pingback: bool) -> Vec { + // arrange neighbors into a "ring" topology, where + // neighbor N is connected to neighbor (N-1)%NUM_NEIGHBORS and (N+1)%NUM_NEIGHBORS. + // If test_pingback is true, then neighbor N is only connected to (N+1)%NUM_NEIGHBORS + let mut peers = vec![]; + + let peer_count = peer_configs.len(); + + for i in 0..peer_count { + let n = (i + 1) % peer_count; + let neighbor = peer_configs[n].to_neighbor(); + peer_configs[i].add_neighbor(&neighbor); + } + + if !test_pingback { + for i in 1..peer_count + 1 { + let p = i - 1; + let neighbor = peer_configs[p].to_neighbor(); + peer_configs[i % peer_count].add_neighbor(&neighbor); + } + } + + for i in 0..peer_count { + let p = TestPeer::new(peer_configs[i].clone()); + peers.push(p); + } + + run_topology_test(&mut peers); + + // no nacks or handshake-rejects + for i in 0..peer_count { + for (_, convo) in peers[i].network.peers.iter() { + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::Nack) + .unwrap_or(&0) + == 0 + ); + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::HandshakeReject) + .unwrap_or(&0) + == 0 + ); + } + } + + peers +} + +fn test_walk_ring(peer_configs: &mut Vec) -> Vec { + test_walk_ring_ex(peer_configs, false) +} + +fn test_walk_ring_pingback(peer_configs: &mut Vec) -> Vec { + test_walk_ring_ex(peer_configs, true) +} + +/// Arrange 15 peers into a line topology, and verify that each peer learns of each other peer over +/// time. All peers are whitelisted to one another. +#[test] +#[ignore] +fn test_walk_line_allowed_15() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33100, neighbor_count, peer_count); + + conf.allowed = -1; + conf.denied = 0; + + conf.connection_opts.timeout = 100000; + conf.connection_opts.handshake_timeout = 100000; + conf.connection_opts.disable_natpunch = true; // breaks allow checks + + peer_configs.push(conf); + } + + test_walk_line(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a line topology, and verify that each peer learns of each other peer over +/// time. No peers are whitelisted to one another, and all walk types are allowed. +#[test] +#[ignore] +fn test_walk_line_15_plain() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // initial peers are neither white- nor denied + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33200, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + + peer_configs.push(conf); + } + + test_walk_line(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a line topology, and verify that each peer learns of each other peer over +/// time. One peer is in a different AS. +#[test] +#[ignore] +fn test_walk_line_15_org_biased() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // one outlier peer has a different org than the others. + use std::env; + + // ::33300 is in AS 1 + env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33300", "1"); + + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; // make this a little bigger to speed this test up + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33300, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + if i == 0 { + conf.asn = 1; + conf.org = 1; + } else { + conf.asn = 0; + conf.org = 0; + } + + peer_configs.push(conf); + } + // all peers see peer ::33300 as having ASN and Org ID 1 + let peer_0 = peer_configs[0].to_neighbor(); + + let peers = test_walk_line(&mut peer_configs); + + for i in 1..peer_count { + match PeerDB::get_peer( + peers[i].network.peerdb.conn(), + peer_0.addr.network_id, + &peer_0.addr.addrbytes, + peer_0.addr.port, + ) + .unwrap() + { + Some(p) => { + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); + } + None => {} + } + } + + // no peer pruned peer ::33300 + for i in 1..peer_count { + match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { + None => {} + Some(count) => { + assert_eq!(*count, 0); + } + } + } + }) +} + +/// Arrange 15 peers into a line topology, and verify that each peer learns of each other peer over +/// time. No inbound walks allowed; only pingback walks. +#[test] +#[ignore] +fn test_walk_line_15_pingback() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // initial peers are neither white- nor denied + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33350, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + conf.connection_opts.disable_pingbacks = false; + conf.connection_opts.disable_inbound_walks = true; + + peer_configs.push(conf); + } + + test_walk_line_pingback(&mut peer_configs); + }) +} + +fn test_walk_line(peer_configs: &mut Vec) -> Vec { + test_walk_line_ex(peer_configs, false) +} + +fn test_walk_line_pingback(peer_configs: &mut Vec) -> Vec { + test_walk_line_ex(peer_configs, true) +} + +fn test_walk_line_ex(peer_configs: &mut Vec, pingback_test: bool) -> Vec { + // arrange neighbors into a "line" topology. + // If pingback_test is true, then the topology is unidirectional: + // + // 0 ---> 1 ---> 2 ---> ... ---> peer_count + // + // If pingback_test is false, then the topology is bidirectional + // + // 0 <--> 1 <--> 2 <--> ... <--> peer_count + // + // all initial peers are allowed + let mut peers = vec![]; + + let peer_count = peer_configs.len(); + for i in 0..peer_count - 1 { + let n = i + 1; + let neighbor = peer_configs[n].to_neighbor(); + peer_configs[i].add_neighbor(&neighbor); + } + + if !pingback_test { + for i in 1..peer_count { + let p = i - 1; + let neighbor = peer_configs[p].to_neighbor(); + peer_configs[i].add_neighbor(&neighbor); + } + } + + for i in 0..peer_count { + let p = TestPeer::new(peer_configs[i].clone()); + peers.push(p); + } + + run_topology_test(&mut peers); + + // no nacks or handshake-rejects + for i in 0..peer_count { + for (_, convo) in peers[i].network.peers.iter() { + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::Nack) + .unwrap_or(&0) + == 0 + ); + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::HandshakeReject) + .unwrap_or(&0) + == 0 + ); + } + } + + peers +} + +/// Arrange 15 peers into a star topology, and verify that each peer learns of each other peer over +/// time. All peers whitelist each other. +#[test] +#[ignore] +fn test_walk_star_allowed_15() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33400, neighbor_count, peer_count); + + conf.allowed = -1; // always allowed + conf.denied = 0; + + conf.connection_opts.timeout = 100000; + conf.connection_opts.handshake_timeout = 100000; + conf.connection_opts.disable_natpunch = true; // breaks allow checks + + peer_configs.push(conf); + } + + test_walk_star(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a star topology, and verify that each peer learns of each other peer over +/// time. No peers whitelist each other, and all walk types are alloweed. +#[test] +#[ignore] +fn test_walk_star_15_plain() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33500, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + + peer_configs.push(conf); + } + + test_walk_star(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a star topology, and verify that each peer learns of each other peer over +/// time. No peers whitelist each other, and inbound walks (but not pingbacks) are disabled. +#[test] +#[ignore] +fn test_walk_star_15_pingback() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33550, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + conf.connection_opts.disable_pingbacks = false; + conf.connection_opts.disable_inbound_walks = true; + conf.connection_opts.soft_max_neighbors_per_org = peer_count as u64; + + peer_configs.push(conf); + } + + test_walk_star_pingback(&mut peer_configs); + }) +} + +/// Arrange 15 peers into a star topology, and verify that each peer learns of each other peer over +/// time. One peer is in a separate AS. +#[test] +#[ignore] +fn test_walk_star_15_org_biased() { + setup_rlimit_nofiles(); + with_timeout(600, || { + // one outlier peer has a different org than the others. + use std::env; + + // ::33600 is in AS 1 + env::set_var("BLOCKSTACK_NEIGHBOR_TEST_33600", "1"); + + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 3; + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33600, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + if i == 0 { + conf.asn = 1; + conf.org = 1; + } else { + conf.asn = 0; + conf.org = 0; + } + + peer_configs.push(conf); + } + // all peers see peer ::33600 as having ASN and Org ID 1 + let peer_0 = peer_configs[0].to_neighbor(); + + let peers = test_walk_star(&mut peer_configs); + + for i in 1..peer_count { + match PeerDB::get_peer( + peers[i].network.peerdb.conn(), + peer_0.addr.network_id, + &peer_0.addr.addrbytes, + peer_0.addr.port, + ) + .unwrap() + { + Some(p) => { + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); + } + None => {} + } + } + + // no peer pruned peer ::33600 + for i in 1..peer_count { + match peers[i].network.prune_inbound_counts.get(&peer_0.addr) { + None => {} + Some(count) => { + assert_eq!(*count, 0); + } + } + } + }) +} + +fn test_walk_star(peer_configs: &mut Vec) -> Vec { + test_walk_star_ex(peer_configs, false) +} + +fn test_walk_star_pingback(peer_configs: &mut Vec) -> Vec { + test_walk_star_ex(peer_configs, true) +} + +fn test_walk_star_ex(peer_configs: &mut Vec, pingback_test: bool) -> Vec { + // arrange neighbors into a "star" topology. + // If pingback_test is true, then initial connections are unidirectional -- each neighbor (except + // for 0) only knows about 0. Neighbor 0 knows about no one. + // If pingback_test is false, then initial connections are bidirectional. + + let mut peers = vec![]; + let peer_count = peer_configs.len(); + + for i in 1..peer_count { + let neighbor = peer_configs[i].to_neighbor(); + let hub = peer_configs[0].to_neighbor(); + if !pingback_test { + peer_configs[0].add_neighbor(&neighbor); + } + + peer_configs[i].add_neighbor(&hub); + } + + for i in 0..peer_count { + let p = TestPeer::new(peer_configs[i].clone()); + peers.push(p); + } + + run_topology_test(&mut peers); + + // no nacks or handshake-rejects + for i in 0..peer_count { + for (_, convo) in peers[i].network.peers.iter() { + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::Nack) + .unwrap_or(&0) + == 0 + ); + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::HandshakeReject) + .unwrap_or(&0) + == 0 + ); + } + } + + peers +} + +fn test_walk_inbound_line(peer_configs: &mut Vec) -> Vec { + // arrange neighbors into a two-tiered "line" topology, where even-numbered neighbors are + // "NAT'ed" but connected to both the predecessor and successor odd neighbors. Odd + // numbered neighbors are not connected to anyone. The first and last even-numbered + // neighbor is only connected to its successor and predecessor, respectively. + // + // 1 3 5 + // ^ ^ ^ ^ ^ ^ + // / \ / \ / \ ... etc ... + // 0 2 4 6 + // + // The goal of this test is that odd-numbered neighbors all learn about each other + + let mut peers = vec![]; + let peer_count = peer_configs.len(); + + for i in 0..peer_count { + if i % 2 == 0 { + if i > 0 { + let predecessor = peer_configs[i - 1].to_neighbor(); + peer_configs[i].add_neighbor(&predecessor); + } + if i + 1 < peer_count { + let successor = peer_configs[i + 1].to_neighbor(); + peer_configs[i].add_neighbor(&successor); + } + } + } + + for i in 0..peer_count { + let p = TestPeer::new(peer_configs[i].clone()); + peers.push(p); + } + + run_topology_test_ex( + &mut peers, + |peers: &Vec| { + let mut done = true; + for i in 0..peer_count { + // only check "public" peers + if i % 2 != 0 { + let all_neighbors = + PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); + if (all_neighbors.len() as u64) < ((peer_count / 2 - 1) as u64) { + let nk = peers[i].config.to_neighbor().addr; + test_debug!( + "waiting for public peer {:?} to fill up its frontier: {}", + &nk, + all_neighbors.len() + ); + done = false; + } + } + } + done + }, + true, + ); + + // no nacks or handshake-rejects + for i in 0..peer_count { + for (_, convo) in peers[i].network.peers.iter() { + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::Nack) + .unwrap_or(&0) + == 0 + ); + assert!( + *convo + .stats + .msg_rx_counts + .get(&StacksMessageID::HandshakeReject) + .unwrap_or(&0) + == 0 + ); + } + } + + peers +} + +/// Arrange 15 peers into an alternating line topology, and verify that each peer learns of each +/// other peer over time. Odd peers have no outbound neighbors initially, but share one or two +/// inbound peers. +#[test] +#[ignore] +fn test_walk_inbound_line_15() { + setup_rlimit_nofiles(); + with_timeout(600, || { + let mut peer_configs = vec![]; + let peer_count: usize = 15; + let neighbor_count: usize = 15; // make this test go faster + + for i in 0..peer_count { + let mut conf = setup_peer_config(i, 33250, neighbor_count, peer_count); + + conf.allowed = 0; + conf.denied = 0; + conf.connection_opts.disable_pingbacks = true; + conf.connection_opts.disable_inbound_walks = false; + conf.connection_opts.walk_inbound_ratio = 2; + // basically, don't timeout (so public nodes can ask non-public inbound nodes about + // neighbors indefinitely) + conf.connection_opts.connect_timeout = 60000; + conf.connection_opts.timeout = 60000; + conf.connection_opts.handshake_timeout = 60000; + conf.connection_opts.soft_max_neighbors_per_org = (neighbor_count + 1) as u64; + conf.connection_opts.soft_max_neighbors_per_host = (neighbor_count + 1) as u64; + + peer_configs.push(conf); + } + + test_walk_inbound_line(&mut peer_configs); + }) +} + +fn dump_peers(peers: &Vec) -> () { + test_debug!("\n=== PEER DUMP ==="); + for i in 0..peers.len() { + let mut neighbor_index = vec![]; + let mut outbound_neighbor_index = vec![]; + for j in 0..peers.len() { + let stats_opt = peers[i] + .network + .get_neighbor_stats(&peers[j].to_neighbor().addr); + match stats_opt { + Some(stats) => { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } + } + None => {} + } + } + + let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); + let num_allowed = all_neighbors.iter().fold(0, |mut sum, ref n2| { + sum += if n2.allowed < 0 { 1 } else { 0 }; + sum + }); + test_debug!("Neighbor {} (all={}, outbound={}) (total neighbors = {}, total allowed = {}): outbound={:?} all={:?}", i, neighbor_index.len(), outbound_neighbor_index.len(), all_neighbors.len(), num_allowed, &outbound_neighbor_index, &neighbor_index); + } + test_debug!("\n"); +} + +fn dump_peer_histograms(peers: &Vec) -> () { + let mut outbound_hist: HashMap = HashMap::new(); + let mut inbound_hist: HashMap = HashMap::new(); + let mut all_hist: HashMap = HashMap::new(); + for i in 0..peers.len() { + let mut neighbor_index = vec![]; + let mut inbound_neighbor_index = vec![]; + let mut outbound_neighbor_index = vec![]; + for j in 0..peers.len() { + let stats_opt = peers[i] + .network + .get_neighbor_stats(&peers[j].to_neighbor().addr); + match stats_opt { + Some(stats) => { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } else { + inbound_neighbor_index.push(j); + } + } + None => {} + } + } + for inbound in inbound_neighbor_index.iter() { + if inbound_hist.contains_key(inbound) { + let c = inbound_hist.get(inbound).unwrap().to_owned(); + inbound_hist.insert(*inbound, c + 1); + } else { + inbound_hist.insert(*inbound, 1); + } + } + for outbound in outbound_neighbor_index.iter() { + if outbound_hist.contains_key(outbound) { + let c = outbound_hist.get(outbound).unwrap().to_owned(); + outbound_hist.insert(*outbound, c + 1); + } else { + outbound_hist.insert(*outbound, 1); + } + } + for n in neighbor_index.iter() { + if all_hist.contains_key(n) { + let c = all_hist.get(n).unwrap().to_owned(); + all_hist.insert(*n, c + 1); + } else { + all_hist.insert(*n, 1); + } + } + } + + test_debug!("=== PEER HISTOGRAM ==="); + for i in 0..peers.len() { + test_debug!( + "Neighbor {}: #in={} #out={} #all={}", + i, + inbound_hist.get(&i).unwrap_or(&0), + outbound_hist.get(&i).unwrap_or(&0), + all_hist.get(&i).unwrap_or(&0) + ); + } + test_debug!("\n"); +} + +fn run_topology_test(peers: &mut Vec) -> () { + run_topology_test_ex(peers, |_| false, false) +} + +fn run_topology_test_ex( + peers: &mut Vec, + mut finished_check: F, + use_finished_check: bool, +) -> () +where + F: FnMut(&Vec) -> bool, +{ + let peer_count = peers.len(); + + let mut initial_allowed: HashMap> = HashMap::new(); + let mut initial_denied: HashMap> = HashMap::new(); + + for i in 0..peer_count { + // turn off components we don't need + peers[i].config.connection_opts.disable_inv_sync = true; + peers[i].config.connection_opts.disable_block_download = true; + let nk = peers[i].config.to_neighbor().addr.clone(); + for j in 0..peers[i].config.initial_neighbors.len() { + let initial = &peers[i].config.initial_neighbors[j]; + if initial.allowed < 0 { + if !initial_allowed.contains_key(&nk) { + initial_allowed.insert(nk.clone(), vec![]); + } + initial_allowed + .get_mut(&nk) + .unwrap() + .push(initial.addr.clone()); + } + if initial.denied < 0 { + if !initial_denied.contains_key(&nk) { + initial_denied.insert(nk.clone(), vec![]); + } + initial_denied + .get_mut(&nk) + .unwrap() + .push(initial.addr.clone()); + } + } + } + + for i in 0..peer_count { + peers[i].connect_initial().unwrap(); + } + + // go until each neighbor knows about each other neighbor + let mut finished = false; + let mut count = 0; + while !finished { + finished = true; + let mut peer_counts = 0; + let mut random_order = vec![0usize; peer_count]; + for i in 0..peer_count { + random_order[i] = i; + } + let mut rng = thread_rng(); + random_order.shuffle(&mut rng); + + debug!("Random order = {:?}", &random_order); + for i in random_order.into_iter() { + let _ = peers[i].step_with_ibd(false); + let nk = peers[i].config.to_neighbor().addr; + debug!("Step peer {:?}", &nk); + + // allowed peers are still connected + match initial_allowed.get(&nk) { + Some(ref peer_list) => { + for pnk in peer_list.iter() { + if !peers[i].network.events.contains_key(&pnk.clone()) { + error!( + "{:?}: Perma-allowed peer {:?} not connected anymore", + &nk, &pnk + ); + assert!(false); + } + } + } + None => {} + }; + + // denied peers are never connected + match initial_denied.get(&nk) { + Some(ref peer_list) => { + for pnk in peer_list.iter() { + if peers[i].network.events.contains_key(&pnk.clone()) { + error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); + assert!(false); + } + } + } + None => {} + }; + + // all ports are unique in the p2p socket table + let mut ports: HashSet = HashSet::new(); + for k in peers[i].network.events.keys() { + if ports.contains(&k.port) { + error!("duplicate port {} from {:?}", k.port, k); + assert!(false); + } + ports.insert(k.port); + } + + // done? + let now_finished = if use_finished_check { + finished_check(&peers) + } else { + let mut done = true; + let all_neighbors = PeerDB::get_all_peers(peers[i].network.peerdb.conn()).unwrap(); + peer_counts += all_neighbors.len(); + test_debug!("Peer {} ({}) has {} neighbors", i, &nk, all_neighbors.len()); + + if (all_neighbors.len() as u64) < ((peer_count - 1) as u64) { + test_debug!( + "waiting for {:?} to fill up its frontier: {} < {}", + &nk, + all_neighbors.len(), + peer_count - 1 + ); + done = false; + } else { + test_debug!( + "not waiting for {:?} to fill up its frontier: {} >= {}", + &nk, + all_neighbors.len(), + peer_count - 1 + ); + } + done + }; + + finished = finished && now_finished; + } + + count += 1; + + test_debug!( + "Network convergence rate: {}%", + (100.0 * (peer_counts as f64)) / ((peer_count * peer_count) as f64), + ); + + if finished { + break; + } + + test_debug!("Finished walking the network {} times", count); + dump_peers(&peers); + dump_peer_histograms(&peers); + } + + test_debug!("Converged after {} calls to network.run()", count); + dump_peers(&peers); + dump_peer_histograms(&peers); + + // each peer learns each other peer's stacker DBs + for (i, peer) in peers.iter().enumerate() { + if i % 2 != 0 { + continue; + } + let mut expected_dbs = PeerDB::get_local_peer(peer.network.peerdb.conn()) + .unwrap() + .stacker_dbs; + expected_dbs.sort(); + for (j, other_peer) in peers.iter().enumerate() { + if i == j { + continue; + } + + let all_neighbors = PeerDB::get_all_peers(other_peer.network.peerdb.conn()).unwrap(); + + if (all_neighbors.len() as u64) < ((peer_count - 1) as u64) { + // this is a simulated-NAT'ed node -- it won't learn about other NAT'ed nodes' + // DBs + continue; + } + + // what does the other peer see as this peer's stacker DBs? + let mut other_peer_dbs = other_peer + .network + .peerdb + .get_peer_stacker_dbs(&peer.config.to_neighbor()) + .unwrap(); + other_peer_dbs.sort(); + + if j % 2 == 0 { + test_debug!( + "Compare stacker DBs of {} vs {}", + &peer.config.to_neighbor(), + &other_peer.config.to_neighbor() + ); + assert_eq!(expected_dbs, other_peer_dbs); + } else { + // this peer doesn't support Stacker DBs + assert_eq!(other_peer_dbs, vec![]); + } + } + } +} diff --git a/testnet/stacks-node/src/tests/p2p/mod.rs b/testnet/stacks-node/src/tests/p2p/mod.rs new file mode 100644 index 0000000000..c2a61de8ac --- /dev/null +++ b/testnet/stacks-node/src/tests/p2p/mod.rs @@ -0,0 +1,18 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Integration tests that verify that sets of nodes in various initial topologies will, over time, +/// learn about every other node in the network +pub mod convergence; From ca9c5169f042bc3724ad7414796b00f9285f75c5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 15 Sep 2024 22:32:51 -0400 Subject: [PATCH 1110/1400] chore: activate p2p convergence tests --- .github/workflows/bitcoin-tests.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 6c3aca0e14..360b4f74cb 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -117,6 +117,19 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover + - tests::p2p::convergence::test_walk_ring_allow_15 + - tests::p2p::convergence::test_walk_ring_15_plain + - tests::p2p::convergence::test_walk_ring_15_pingback + - tests::p2p::convergence::test_walk_ring_15_org_biased + - tests::p2p::convergence::test_walk_line_allowed_15 + - tests::p2p::convergence::test_walk_line_15_plain + - tests::p2p::convergence::test_walk_line_15_org_biased + - tests::p2p::convergence::test_walk_line_15_pingback + - tests::p2p::convergence::test_walk_star_allowed_15 + - tests::p2p::convergence::test_walk_star_15_plain + - tests::p2p::convergence::test_walk_star_15_pingback + - tests::p2p::convergence::test_walk_star_15_org_biased + - tests::p2p::convergence::test_walk_inbound_line_15 # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower From 57351cf11afac7019bd9b4c680dde29d58df6c90 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 16 Sep 2024 08:34:43 -0500 Subject: [PATCH 1111/1400] fix build error when built without prom --- stacks-signer/src/client/stacks_client.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 130e4d9832..cc780166af 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -598,7 +598,6 @@ impl StacksClient { /// Retrieve the current pox data from the stacks node pub fn get_pox_data(&self) -> Result { debug!("stacks_node_client: Getting pox data..."); - #[cfg(feature = "monitoring_prom")] let timer = crate::monitoring::new_rpc_call_timer(&self.pox_path(), &self.http_origin); let send_request = || { self.stacks_node_client From c2ec5ef35e4206c8e2f2673cc990ddabec982bc2 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 09:49:40 -0700 Subject: [PATCH 1112/1400] feat: revert change to bubble error from announce_new_stacks_block --- stackslib/src/net/relay.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 35627d9dd4..d022148b3a 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2049,11 +2049,9 @@ impl Relayer { } } if !http_uploaded_blocks.is_empty() { - if let Some(comm) = coord_comms { - if !comm.announce_new_stacks_block() { - return Err(net_error::CoordinatorClosed); - } - }; + coord_comms.inspect(|comm| { + comm.announce_new_stacks_block(); + }); } accepted_nakamoto_blocks_and_relayers.extend(pushed_blocks_and_relayers); From 1002d23f485055125e1cd1e261b8a441fd11f16b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 09:53:44 -0700 Subject: [PATCH 1113/1400] fix: revert default staging_db schema handling --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index be904395c2..f17dda37a8 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -673,7 +673,7 @@ impl StacksChainState { Ok(x) => x, Err(e) => { debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); - return Ok(NAKAMOTO_STAGING_DB_SCHEMA_LATEST); + return Ok(1); } }; From 8cdc6dc01c2e50458e1be31eb977bcf8c47e9c04 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 09:58:29 -0700 Subject: [PATCH 1114/1400] fix: add missing insert_block failure catch after merge --- stacks-signer/src/v0/signer.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 522ef4e249..a5f635cf16 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -783,6 +783,7 @@ impl Signer { } if let Err(e) = self.signer_db.insert_block(&block_info) { warn!("{self}: Failed to update block state: {e:?}",); + panic!("{self} Failed to update block state: {e}"); } } @@ -903,7 +904,7 @@ impl Signer { "Failed to set group threshold signature timestamp for {}: {:?}", block_hash, &e ); - e + panic!("{self} Failed to write block to signerdb: {e}"); }); #[cfg(any(test, feature = "testing"))] { From 0ca9d3971023765c8fc96539ebb320a13a942938 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 10:09:24 -0700 Subject: [PATCH 1115/1400] fix: remove extra unused param from replace_blocks query --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index f17dda37a8..0fcdaffad8 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -600,7 +600,6 @@ impl<'a> NakamotoStagingBlocksTx<'a> { &block.serialize_to_vec(), &signing_weight, &obtain_method.to_string(), - u64_to_sql(get_epoch_time_secs())?, &block.header.consensus_hash, &block.header.block_hash(), ])?; From 565a37ff16e999b3d92cdbf7bd6a3581beb18338 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 11:09:21 -0700 Subject: [PATCH 1116/1400] fix: use lower-cardinality prometheus metrics in signer --- stacks-signer/src/client/stacks_client.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cc780166af..f44d988138 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -450,7 +450,10 @@ impl StacksClient { "last_sortition" => %last_sortition, ); let path = self.tenure_forking_info_path(chosen_parent, last_sortition); - let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); + let timer = crate::monitoring::new_rpc_call_timer( + "/v3/tenures/fork_info/:start/:stop", + &self.http_origin, + ); let send_request = || { self.stacks_node_client .get(&path) @@ -491,7 +494,8 @@ impl StacksClient { pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { debug!("stacks_node_client: Getting sortition with consensus hash {ch}..."); let path = format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex()); - let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); + let timer_label = format!("{}/consensus/:consensus_hash", self.sortition_info_path()); + let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { self.stacks_node_client.get(&path).send().map_err(|e| { warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); @@ -561,7 +565,7 @@ impl StacksClient { ) -> Result>, ClientError> { debug!("stacks_node_client: Getting reward set signers for reward cycle {reward_cycle}..."); let timer = crate::monitoring::new_rpc_call_timer( - &self.reward_set_path(reward_cycle), + &format!("{}/v3/stacker_set/:reward_cycle", self.http_origin), &self.http_origin, ); let send_request = || { @@ -644,8 +648,8 @@ impl StacksClient { address: &StacksAddress, ) -> Result { debug!("stacks_node_client: Getting account info..."); - let timer = - crate::monitoring::new_rpc_call_timer(&self.accounts_path(address), &self.http_origin); + let timer_label = format!("{}/v2/accounts/:stacks_address", self.http_origin); + let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { self.stacks_node_client .get(self.accounts_path(address)) @@ -797,7 +801,11 @@ impl StacksClient { let body = json!({"sender": self.stacks_address.to_string(), "arguments": args}).to_string(); let path = self.read_only_path(contract_addr, contract_name, function_name); - let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); + let timer_label = format!( + "{}/v2/contracts/call-read/:principal/{contract_name}/{function_name}", + self.http_origin + ); + let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let response = self .stacks_node_client .post(path) From 8ad5e95b85a7b4cbb918e63048f5eea70229e8ce Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 16 Sep 2024 13:53:29 -0500 Subject: [PATCH 1117/1400] chore: move p2p::conv tests back to stackslib --- .github/workflows/bitcoin-tests.yml | 13 ----- Cargo.lock | 5 +- stackslib/Cargo.toml | 9 ++-- stackslib/src/burnchains/bitcoin/indexer.rs | 10 ++-- stackslib/src/burnchains/bitcoin/spv.rs | 10 ++-- stackslib/src/burnchains/burnchain.rs | 2 +- stackslib/src/burnchains/mod.rs | 8 +-- stackslib/src/burnchains/tests/db.rs | 2 +- stackslib/src/burnchains/tests/mod.rs | 1 - stackslib/src/chainstate/burn/db/sortdb.rs | 4 +- stackslib/src/chainstate/burn/distribution.rs | 2 +- .../burn/operations/leader_block_commit.rs | 8 +-- .../burn/operations/leader_key_register.rs | 8 +-- .../src/chainstate/burn/operations/mod.rs | 6 +-- .../chainstate/burn/operations/stack_stx.rs | 4 +- .../burn/operations/transfer_stx.rs | 2 +- stackslib/src/chainstate/coordinator/mod.rs | 8 +-- stackslib/src/chainstate/coordinator/tests.rs | 1 - .../chainstate/nakamoto/coordinator/mod.rs | 2 +- .../chainstate/nakamoto/coordinator/tests.rs | 1 - stackslib/src/chainstate/nakamoto/mod.rs | 6 +-- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- .../src/chainstate/nakamoto/tests/mod.rs | 1 - .../chainstate/stacks/boot/contract_tests.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 17 +++--- .../src/chainstate/stacks/boot/pox_2_tests.rs | 1 - .../src/chainstate/stacks/boot/pox_3_tests.rs | 1 - .../src/chainstate/stacks/boot/pox_4_tests.rs | 1 - .../chainstate/stacks/boot/signers_tests.rs | 1 - stackslib/src/chainstate/stacks/db/blocks.rs | 29 +++++----- stackslib/src/chainstate/stacks/db/mod.rs | 5 +- .../src/chainstate/stacks/db/transactions.rs | 1 - stackslib/src/chainstate/stacks/index/file.rs | 6 +-- stackslib/src/chainstate/stacks/index/marf.rs | 4 +- .../src/chainstate/stacks/index/storage.rs | 24 ++++----- .../src/chainstate/stacks/index/trie_sql.rs | 2 +- stackslib/src/chainstate/stacks/miner.rs | 10 ++-- stackslib/src/chainstate/stacks/mod.rs | 5 +- stackslib/src/chainstate/stacks/tests/mod.rs | 1 - stackslib/src/clarity_vm/clarity.rs | 12 ++--- stackslib/src/clarity_vm/database/marf.rs | 4 +- stackslib/src/clarity_vm/mod.rs | 2 +- stackslib/src/clarity_vm/tests/mod.rs | 1 - stackslib/src/core/mempool.rs | 20 +++---- stackslib/src/core/mod.rs | 54 +++++++++---------- stackslib/src/cost_estimates/mod.rs | 2 +- stackslib/src/cost_estimates/tests/common.rs | 1 + stackslib/src/cost_estimates/tests/mod.rs | 1 - stackslib/src/lib.rs | 4 +- stackslib/src/net/asn.rs | 2 +- stackslib/src/net/atlas/db.rs | 6 +-- stackslib/src/net/chat.rs | 2 +- stackslib/src/net/codec.rs | 2 +- stackslib/src/net/connection.rs | 3 +- stackslib/src/net/dns.rs | 2 +- stackslib/src/net/download/epoch2x.rs | 12 ++--- stackslib/src/net/httpcore.rs | 22 ++++---- stackslib/src/net/inv/epoch2x.rs | 9 ++-- stackslib/src/net/mod.rs | 19 +++---- stackslib/src/net/neighbors/mod.rs | 20 +++---- stackslib/src/net/p2p.rs | 2 +- stackslib/src/net/poll.rs | 2 +- stackslib/src/net/prune.rs | 6 +-- stackslib/src/net/stackerdb/db.rs | 2 +- stackslib/src/net/stackerdb/mod.rs | 2 +- stackslib/src/net/stackerdb/tests/mod.rs | 1 - stackslib/src/net/stackerdb/tests/sync.rs | 1 + .../src/net/tests}/convergence.rs | 11 ++-- stackslib/src/net/tests/download/mod.rs | 1 - stackslib/src/net/tests/inv/mod.rs | 1 - stackslib/src/net/tests/mempool/mod.rs | 1 - stackslib/src/net/tests/mod.rs | 2 +- stackslib/src/net/tests/relay/mod.rs | 1 - stackslib/src/util_lib/bloom.rs | 2 +- stackslib/src/util_lib/boot.rs | 2 +- stackslib/src/util_lib/mod.rs | 2 +- stx-genesis/Cargo.toml | 3 -- testnet/stacks-node/Cargo.toml | 5 -- testnet/stacks-node/src/tests/mod.rs | 1 - testnet/stacks-node/src/tests/p2p/mod.rs | 18 ------- 80 files changed, 213 insertions(+), 280 deletions(-) rename {testnet/stacks-node/src/tests/p2p => stackslib/src/net/tests}/convergence.rs (99%) delete mode 100644 testnet/stacks-node/src/tests/p2p/mod.rs diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e411179a70..a7a483665e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -121,19 +121,6 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - - tests::p2p::convergence::test_walk_ring_allow_15 - - tests::p2p::convergence::test_walk_ring_15_plain - - tests::p2p::convergence::test_walk_ring_15_pingback - - tests::p2p::convergence::test_walk_ring_15_org_biased - - tests::p2p::convergence::test_walk_line_allowed_15 - - tests::p2p::convergence::test_walk_line_15_plain - - tests::p2p::convergence::test_walk_line_15_org_biased - - tests::p2p::convergence::test_walk_line_15_pingback - - tests::p2p::convergence::test_walk_star_allowed_15 - - tests::p2p::convergence::test_walk_star_15_plain - - tests::p2p::convergence::test_walk_star_15_pingback - - tests::p2p::convergence::test_walk_star_15_org_biased - - tests::p2p::convergence::test_walk_inbound_line_15 # Do not run this one until we figure out why it fails in CI # - tests::neon_integrations::bitcoin_reorg_flap # - tests::neon_integrations::bitcoin_reorg_flap_with_follower diff --git a/Cargo.lock b/Cargo.lock index e56e4400b4..b9b45849d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3420,9 +3420,6 @@ dependencies = [ "regex", "reqwest", "ring 0.16.20", - "rlimit", - "rstest 0.17.0", - "rstest_reuse 0.5.0", "rusqlite", "serde", "serde_derive", @@ -3431,7 +3428,6 @@ dependencies = [ "stacks-common", "stacks-signer", "stackslib", - "stdext", "stx-genesis", "tikv-jemallocator", "tiny_http", @@ -3507,6 +3503,7 @@ dependencies = [ "rand_core 0.6.4", "regex", "ripemd", + "rlimit", "rstest 0.17.0", "rstest_reuse 0.5.0", "rusqlite", diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index 909e237502..d04fc3b1af 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -59,10 +59,6 @@ siphasher = "0.3.7" wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } -rstest = { version = "0.17.0", optional = true } -rstest_reuse = { version = "0.5.0", optional = true } -stdext = { version = "0.3.1", optional = true } -stx-genesis = { path = "../stx-genesis", optional = true } [target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] tikv-jemallocator = {workspace = true} @@ -99,12 +95,13 @@ features = ["std"] assert-json-diff = "1.0.0" criterion = "0.3.5" stdext = "0.3.1" -stx-genesis = { path = "../stx-genesis" } +stx-genesis = { path = "../stx-genesis"} clarity = { features = ["default", "testing"], path = "../clarity" } stacks-common = { features = ["default", "testing"], path = "../stacks-common" } rstest = "0.17.0" rstest_reuse = "0.5.0" mutants = "0.0.3" +rlimit = "0.10.2" [features] default = [] @@ -113,7 +110,7 @@ disable-costs = [] developer-mode = ["clarity/developer-mode"] monitoring_prom = ["prometheus"] slog_json = ["slog-json", "stacks-common/slog_json", "clarity/slog_json", "pox-locking/slog_json"] -testing = ["stdext", "rstest", "rstest_reuse", "stx-genesis"] +testing = [] [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 7c9083985b..40cabd86d3 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -167,7 +167,7 @@ impl BitcoinIndexerConfig { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_default(spv_headers_path: String) -> BitcoinIndexerConfig { BitcoinIndexerConfig { peer_host: "127.0.0.1".to_string(), @@ -203,7 +203,7 @@ impl BitcoinIndexerRuntime { } impl BitcoinIndexer { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new( config: BitcoinIndexerConfig, runtime: BitcoinIndexerRuntime, @@ -216,7 +216,7 @@ impl BitcoinIndexer { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new_unit_test(working_dir: &str) -> BitcoinIndexer { let mut working_dir_path = PathBuf::from(working_dir); if fs::metadata(&working_dir_path).is_err() { @@ -861,7 +861,7 @@ impl BitcoinIndexer { Ok(new_tip) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn raw_store_header(&mut self, header: BurnchainBlockHeader) -> Result<(), btc_error> { let mut spv_client = SpvClient::new( &self.config.spv_headers_path, @@ -887,7 +887,7 @@ impl BitcoinIndexer { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn mock_bitcoin_header( parent_block_hash: &BurnchainHeaderHash, timestamp: u32, diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index b2b886bdc4..82cbb7b7f6 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -182,7 +182,7 @@ impl SpvClient { Ok(client) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new_without_migration( headers_path: &str, start_block: u64, @@ -211,7 +211,7 @@ impl SpvClient { Ok(client) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn disable_check_txcount(&mut self) { self.check_txcount = false; } @@ -220,7 +220,7 @@ impl SpvClient { &self.headers_db } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn conn_mut(&mut self) -> &mut DBConn { &mut self.headers_db } @@ -277,7 +277,7 @@ impl SpvClient { .and_then(|_| Ok(())) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_db_migrate(conn: &mut DBConn) -> Result<(), btc_error> { SpvClient::db_migrate(conn) } @@ -925,7 +925,7 @@ impl SpvClient { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_write_block_headers( &mut self, height: u64, diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 60f663c0de..a5ecaa0458 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -623,7 +623,7 @@ impl Burnchain { ret } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn default_unittest( first_block_height: u64, first_block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 2720d48e8c..0bc68897cb 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -57,7 +57,7 @@ pub mod burnchain; pub mod db; pub mod indexer; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; pub struct Txid(pub [u8; 32]); @@ -351,7 +351,7 @@ impl PoxConstants { _shadow: PhantomData, } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots PoxConstants::new( @@ -369,7 +369,7 @@ impl PoxConstants { ) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] /// Create a PoX constants used in tests with 5-block cycles, /// 3-block prepare phases, a threshold of 3, rejection fraction of 25%, /// a participation threshold of 5% and no sunset or transition to pox-2 or beyond. @@ -821,7 +821,7 @@ impl From for Error { } impl BurnchainView { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn make_test_data(&mut self) { let oldest_height = if self.burn_stable_block_height < MAX_NEIGHBOR_BLOCK_DELAY { 0 diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 8b69449d74..f14243d049 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -49,7 +49,7 @@ impl BurnchainDB { /// Get back all of the parsed burnchain operations for a given block. /// Used in testing to replay burnchain data. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_burnchain_block_ops( &self, block_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index f1bc0613af..31e29c0b26 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod affirmation; pub mod burnchain; diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 3964732329..942e6774bd 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -5482,7 +5482,7 @@ impl<'a> SortitionHandleTx<'a> { sn.canonical_stacks_tip_height, )?; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { let (block_consensus_hash, block_bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self).unwrap(); @@ -6566,7 +6566,7 @@ impl ChainstateDB for SortitionDB { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests { use std::sync::mpsc::sync_channel; use std::thread; diff --git a/stackslib/src/chainstate/burn/distribution.rs b/stackslib/src/chainstate/burn/distribution.rs index d91f158c27..ed01ae014b 100644 --- a/stackslib/src/chainstate/burn/distribution.rs +++ b/stackslib/src/chainstate/burn/distribution.rs @@ -424,7 +424,7 @@ impl BurnSamplePoint { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod tests { use std::marker::PhantomData; diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index a1e5ee500a..cea03d4435 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -90,7 +90,7 @@ pub static OUTPUTS_PER_COMMIT: usize = 2; pub static BURN_BLOCK_MINED_AT_MODULUS: u64 = 5; impl LeaderBlockCommitOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn initial( block_header_hash: &BlockHeaderHash, block_height: u64, @@ -131,10 +131,10 @@ impl LeaderBlockCommitOp { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new( block_header_hash: &BlockHeaderHash, - _block_height: u64, + block_height: u64, new_seed: &VRFSeed, parent: &LeaderBlockCommitOp, key_block_ptr: u32, @@ -170,7 +170,7 @@ impl LeaderBlockCommitOp { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_burn_height(&mut self, height: u64) { self.block_height = height; let new_burn_parent_modulus = if height > 0 { diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 87b13d8f50..44402adc0c 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -44,7 +44,7 @@ pub struct ParsedData { } impl LeaderKeyRegisterOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new(public_key: &VRFPublicKey) -> LeaderKeyRegisterOp { LeaderKeyRegisterOp { public_key: public_key.clone(), @@ -59,10 +59,10 @@ impl LeaderKeyRegisterOp { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new_from_secrets( - _num_sigs: u16, - _hash_mode: &AddressHashMode, + num_sigs: u16, + hash_mode: &AddressHashMode, prover_key: &VRFPrivateKey, ) -> Option { let prover_pubk = VRFPublicKey::from_private(prover_key); diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index fd0d63ef59..0843e03b1e 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -47,7 +47,7 @@ pub mod stack_stx; pub mod transfer_stx; pub mod vote_for_aggregate_key; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test; /// This module contains all burn-chain operations @@ -439,7 +439,7 @@ impl BlockstackOperationType { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_block_height(&mut self, height: u64) { match self { BlockstackOperationType::LeaderKeyRegister(ref mut data) => data.block_height = height, @@ -456,7 +456,7 @@ impl BlockstackOperationType { }; } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_burn_header_hash(&mut self, hash: BurnchainHeaderHash) { match self { BlockstackOperationType::LeaderKeyRegister(ref mut data) => { diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index 52e4d6bf3b..c4c54b9737 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -57,7 +57,7 @@ struct ParsedData { pub static OUTPUTS_PER_COMMIT: usize = 2; impl PreStxOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new(sender: &StacksAddress) -> PreStxOp { PreStxOp { output: sender.clone(), @@ -155,7 +155,7 @@ impl PreStxOp { } impl StackStxOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new( sender: &StacksAddress, reward_addr: &PoxAddress, diff --git a/stackslib/src/chainstate/burn/operations/transfer_stx.rs b/stackslib/src/chainstate/burn/operations/transfer_stx.rs index a70075ff7c..9d1d562d9c 100644 --- a/stackslib/src/chainstate/burn/operations/transfer_stx.rs +++ b/stackslib/src/chainstate/burn/operations/transfer_stx.rs @@ -46,7 +46,7 @@ struct ParsedData { } impl TransferStxOp { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new( sender: &StacksAddress, recipient: &StacksAddress, diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 666fd97657..2849b74904 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -82,7 +82,7 @@ use crate::net::atlas::{AtlasConfig, AtlasDB, AttachmentInstance}; use crate::util_lib::db::{DBConn, DBTx, Error as DBError}; pub mod comm; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; /// The 3 different states for the current @@ -110,7 +110,7 @@ impl NewBurnchainBlockStatus { /// Test helper to convert this status into the optional hash of the missing PoX anchor block. /// Because there are unit tests that expect a Some(..) result if PoX cannot proceed, the /// missing Nakamoto anchor block case is converted into a placeholder Some(..) value - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn into_missing_block_hash(self) -> Option { match self { Self::Ready => None, @@ -624,7 +624,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader ChainsCoordinator<'a, T, (), U, (), (), B> { /// Create a coordinator for testing, with some parameters defaulted to None - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_new( burnchain: &Burnchain, chain_id: u32, @@ -644,7 +644,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader } /// Create a coordinator for testing allowing for all configurable params - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_new_full( burnchain: &Burnchain, chain_id: u32, diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 73b4349c2b..50127af176 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::cmp; use std::collections::{BTreeMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index aa8ac21891..cb1966d806 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -58,7 +58,7 @@ use crate::monitoring::increment_stx_blocks_processed_counter; use crate::net::Error as NetError; use crate::util_lib::db::Error as DBError; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; macro_rules! err_or_debug { diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 902efc8a83..6a2a484790 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::collections::{HashMap, HashSet}; use std::sync::Mutex; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 8bdc48c933..cc8d6caaa2 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -122,7 +122,7 @@ pub mod signer_set; pub mod staging_blocks; pub mod tenure; pub mod test_signers; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; pub use self::staging_blocks::{ @@ -270,7 +270,7 @@ lazy_static! { ]; } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod fault_injection { static PROCESS_BLOCK_STALL: std::sync::Mutex = std::sync::Mutex::new(false); @@ -1756,7 +1756,7 @@ impl NakamotoChainState { canonical_sortition_tip: &SortitionId, dispatcher_opt: Option<&'a T>, ) -> Result, ChainstateError> { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fault_injection::stall_block_processing(); let nakamoto_blocks_db = stacks_chain_state.nakamoto_blocks_db(); diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 4f09fd1f57..81380cc93d 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -484,7 +484,7 @@ impl NakamotoChainState { /// Drop a nakamoto tenure. /// Used for testing - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub(crate) fn delete_nakamoto_tenure( tx: &Connection, ch: &ConsensusHash, diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index fa02a34a09..722cfa541a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::borrow::BorrowMut; use std::collections::HashMap; diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 650617ab49..04b74ba2e9 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -361,7 +361,7 @@ impl BurnStateDB for TestSimBurnStateDB { panic!("Not implemented in TestSim"); } - fn get_burn_block_height(&self, _sortition_id: &SortitionId) -> Option { + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { panic!("Not implemented in TestSim"); } @@ -525,7 +525,7 @@ impl BurnStateDB for TestSimBurnStateDB { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] impl HeadersDB for TestSimHeadersDB { fn get_burn_header_hash_for_block( &self, diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 77a24b938f..88ecc8887e 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -626,7 +626,7 @@ impl StacksChainState { /// Determine the minimum amount of STX per reward address required to stack in the _next_ /// reward cycle - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_stacking_minimum( &mut self, sortdb: &SortitionDB, @@ -688,7 +688,7 @@ impl StacksChainState { } /// Determine how many uSTX are stacked in a given reward cycle - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_get_total_ustx_stacked( &mut self, sortdb: &SortitionDB, @@ -1379,20 +1379,19 @@ impl StacksChainState { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod contract_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod pox_2_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod pox_3_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod pox_4_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod signers_tests; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use std::collections::{HashMap, HashSet}; use std::fs; diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 2ac7d0e6f5..7ae25d00f6 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index b34b7eb6c7..3134b4773a 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index affb4bcf7b..0968cc4de3 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index ba1a97556e..bf3b5f312c 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use clarity::vm::clarity::ClarityConnection; use clarity::vm::contexts::OwnedEnvironment; diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 8df8983301..47cace8c4b 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -415,7 +415,7 @@ impl FromRow for StagingBlock { } impl StagingMicroblock { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn try_into_microblock(self) -> Result { StacksMicroblock::consensus_deserialize(&mut &self.block_data[..]).map_err(|_e| self) } @@ -660,7 +660,7 @@ impl StacksChainState { } /// Store an empty block to the chunk store, named by its hash. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn store_empty_block( blocks_path: &str, consensus_hash: &ConsensusHash, @@ -760,10 +760,10 @@ impl StacksChainState { } /// Get a list of all microblocks' hashes, and their anchored blocks' hashes - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn list_microblocks( blocks_conn: &DBConn, - _blocks_dir: &str, + blocks_dir: &str, ) -> Result)>, Error> { let mut blocks = StacksChainState::list_blocks(blocks_conn)?; let mut ret = vec![]; @@ -1025,7 +1025,7 @@ impl StacksChainState { .map_err(|e| Error::DBError(db_error::from(e))) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn load_staging_block_data( block_conn: &DBConn, blocks_path: &str, @@ -1493,7 +1493,7 @@ impl StacksChainState { /// Get an anchored block's parent block header. /// Doesn't matter if it's staging or not. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn load_parent_block_header( sort_ic: &SortitionDBConn, blocks_path: &str, @@ -2500,7 +2500,7 @@ impl StacksChainState { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn set_block_orphaned<'a>( tx: &mut DBTx<'a>, blocks_path: &str, @@ -2522,7 +2522,7 @@ impl StacksChainState { // find all orphaned microblocks, and delete the block data let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2"; let find_orphaned_microblocks_args = params![consensus_hash, anchored_block_hash]; - let _orphaned_microblock_hashes = query_row_columns::( + let orphaned_microblock_hashes = query_row_columns::( tx, find_orphaned_microblocks_sql, find_orphaned_microblocks_args, @@ -2801,7 +2801,7 @@ impl StacksChainState { /// Do we have any microblock available to serve in any capacity, given its parent anchored block's /// index block hash? - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn has_microblocks_indexed( &self, parent_index_block_hash: &StacksBlockId, @@ -2867,7 +2867,7 @@ impl StacksChainState { /// Get the sqlite rowid for a staging microblock, given the hash of the microblock. /// Returns None if no such microblock. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn stream_microblock_get_rowid( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -2883,7 +2883,7 @@ impl StacksChainState { /// Load up the metadata on a microblock stream (but don't get the data itself) /// DO NOT USE IN PRODUCTION -- doesn't work for microblock forks. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn stream_microblock_get_info( blocks_conn: &DBConn, parent_index_block_hash: &StacksBlockId, @@ -3576,7 +3576,7 @@ impl StacksChainState { /// Given a burnchain snapshot, a Stacks block and a microblock stream, preprocess them all. /// This does not work when forking - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn preprocess_stacks_epoch( &mut self, sort_ic: &SortitionDBConn, @@ -6438,7 +6438,7 @@ impl StacksChainState { /// PoX aware (i.e., unit tests, and old stacks-node loops), /// Elsewhere, block processing is invoked by the ChainsCoordinator, /// which handles tracking the chain tip itself - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn process_blocks_at_tip( &mut self, burnchain_db_conn: &DBConn, @@ -6936,9 +6936,8 @@ impl StacksChainState { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use std::fs; use clarity::vm::ast::ASTRules; diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 81e954a090..a942ec7fd1 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -543,7 +543,7 @@ impl<'a, 'b> ClarityTx<'a, 'b> { self.block.seal() } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn commit_block(self) -> () { self.block.commit_block(); } @@ -2713,9 +2713,8 @@ impl StacksChainState { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use std::{env, fs}; use clarity::vm::test_util::TEST_BURN_STATE_DB; diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 99e92aac32..35ba532667 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1571,7 +1571,6 @@ impl StacksChainState { #[cfg(test)] pub mod test { - #![allow(unused)] use clarity::vm::clarity::TransactionConnection; use clarity::vm::contracts::Contract; use clarity::vm::representations::{ClarityName, ContractName}; diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 53df16b761..4123b1310a 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -168,7 +168,7 @@ impl TrieFile { } /// Read a trie blob in its entirety from the blobs file - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn read_trie_blob(&mut self, db: &Connection, block_id: u32) -> Result, Error> { let (offset, length) = trie_sql::get_external_trie_offset_length(db, block_id)?; self.seek(SeekFrom::Start(offset))?; @@ -410,7 +410,7 @@ impl TrieFile { } /// Obtain a TrieHash for a node, given the node's block's hash (used only in testing) - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_node_hash_bytes_by_bhh( &mut self, db: &Connection, @@ -424,7 +424,7 @@ impl TrieFile { } /// Get all (root hash, trie hash) pairs for this TrieFile - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn read_all_block_hashes_and_roots( &mut self, db: &Connection, diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 73d387c07b..d5dd77c51f 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1494,12 +1494,12 @@ impl MARF { } /// Access internal storage - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn borrow_storage_backend(&mut self) -> TrieStorageConnection { self.storage.connection() } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn borrow_storage_transaction(&mut self) -> TrieStorageTransaction { self.storage.transaction().unwrap() } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 9397145fcb..6994c7ad05 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -364,7 +364,7 @@ impl UncommittedState { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn print_to_stderr(&self) { self.trie_ram_ref().print_to_stderr() } @@ -535,7 +535,7 @@ impl TrieRAM { result } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] #[allow(dead_code)] pub fn stats(&mut self) -> (u64, u64) { let r = self.read_count; @@ -545,7 +545,7 @@ impl TrieRAM { (r, w) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] #[allow(dead_code)] pub fn node_stats(&mut self) -> (u64, u64, u64) { let nr = self.read_node_count; @@ -559,7 +559,7 @@ impl TrieRAM { (nr, br, nw) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] #[allow(dead_code)] pub fn leaf_stats(&mut self) -> (u64, u64) { let lr = self.read_leaf_count; @@ -677,7 +677,7 @@ impl TrieRAM { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_inner_seal( &mut self, storage_tx: &mut TrieStorageTransaction, @@ -1113,14 +1113,14 @@ impl TrieRAM { Ok(self.data.len() as u32) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn print_to_stderr(&self) { for dat in self.data.iter() { eprintln!("{}: {:?}", &dat.1, &dat.0); } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn data(&self) -> &Vec<(TrieNodeType, TrieHash)> { &self.data } @@ -2035,7 +2035,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } /// Read the Trie root node's hash from the block table. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn read_block_root_hash(&mut self, bhh: &T) -> Result { let root_hash_ptr = TriePtr::new( TrieNodeID::Node256 as u8, @@ -2051,7 +2051,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn inner_read_persisted_root_to_blocks(&mut self) -> Result, Error> { let ret = match self.blobs.as_mut() { Some(blobs) => { @@ -2065,7 +2065,7 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { } /// Generate a mapping between Trie root hashes and the blocks that contain them - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn read_root_to_block_table(&mut self) -> Result, Error> { let mut ret = self.inner_read_persisted_root_to_blocks()?; let uncommitted_writes = match self.data.uncommitted_writes.take() { @@ -2738,12 +2738,12 @@ impl<'a, T: MarfTrieId> TrieStorageConnection<'a, T> { self.bench.reset(); } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn transient_data(&self) -> &TrieStorageTransientData { &self.data } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn transient_data_mut(&mut self) -> &mut TrieStorageTransientData { &mut self.data } diff --git a/stackslib/src/chainstate/stacks/index/trie_sql.rs b/stackslib/src/chainstate/stacks/index/trie_sql.rs index 1d54cce0d0..c9d3b40dce 100644 --- a/stackslib/src/chainstate/stacks/index/trie_sql.rs +++ b/stackslib/src/chainstate/stacks/index/trie_sql.rs @@ -422,7 +422,7 @@ pub fn open_trie_blob_readonly<'a>(conn: &'a Connection, block_id: u32) -> Resul Ok(blob) } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub fn read_all_block_hashes_and_roots( conn: &Connection, ) -> Result, Error> { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index cb4709d123..0195385d3b 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1603,7 +1603,7 @@ impl StacksBlockBuilder { /// Append a transaction if doing so won't exceed the epoch data size. /// Does not check for errors - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn force_mine_tx( &mut self, clarity_tx: &mut ClarityTx, @@ -1626,7 +1626,7 @@ impl StacksBlockBuilder { if !self.anchored_done { // save match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, _receipt)) => { + Ok((fee, receipt)) => { self.total_anchored_fees += fee; } Err(e) => { @@ -1637,7 +1637,7 @@ impl StacksBlockBuilder { self.txs.push(tx.clone()); } else { match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, _receipt)) => { + Ok((fee, receipt)) => { self.total_streamed_fees += fee; } Err(e) => { @@ -2003,7 +2003,7 @@ impl StacksBlockBuilder { } /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn make_anchored_block_from_txs( builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, @@ -2022,7 +2022,7 @@ impl StacksBlockBuilder { /// Unconditionally build an anchored block from a list of transactions. /// Used in test cases - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn make_anchored_block_and_microblock_from_txs( mut builder: StacksBlockBuilder, chainstate_handle: &StacksChainState, diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index ed9cf98e84..35c82f9b94 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -69,7 +69,7 @@ pub mod index; pub mod miner; pub mod transaction; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; pub use stacks_common::address::{ @@ -1131,9 +1131,8 @@ pub const MAX_EPOCH_SIZE: u32 = 2 * 1024 * 1024; // $MAX_EPOCH_SIZE bytes (so the average microblock size needs to be 4kb if there are 256 of them) pub const MAX_MICROBLOCK_SIZE: u32 = 65536; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 87601268c0..cda74cb46d 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::cell::RefCell; use std::collections::{HashMap, HashSet, VecDeque}; diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index eb38daf68e..c89679f414 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -176,7 +176,7 @@ macro_rules! using { } impl<'a, 'b> ClarityBlockConnection<'a, 'b> { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn new_test_conn( datastore: WritableMarfStore<'a>, header_db: &'b dyn HeadersDB, @@ -731,7 +731,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { /// Commits all changes in the current block by /// (1) committing the current MARF tip to storage, /// (2) committing side-storage. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn commit_block(self) -> LimitedCostTracker { debug!("Commit Clarity datastore"); self.datastore.test_commit(); @@ -1591,7 +1591,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { self.datastore } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_epoch(&mut self, epoch_id: StacksEpochId) { self.epoch = epoch_id; } @@ -1856,7 +1856,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { } /// Evaluate a raw Clarity snippit - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn clarity_eval_raw(&mut self, code: &str) -> Result { let (result, _, _, _) = self.with_abort_callback( |vm_env| vm_env.eval_raw(code).map_err(Error::from), @@ -1865,7 +1865,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { Ok(result) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn eval_read_only( &mut self, contract: &QualifiedContractIdentifier, @@ -1879,7 +1879,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod tests { use std::fs; diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index eaec528c17..fed0e70e95 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -257,7 +257,7 @@ impl MarfedKV { &mut self.marf } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn sql_conn(&self) -> &Connection { self.marf.sqlite_conn() } @@ -526,7 +526,7 @@ impl<'a> WritableMarfStore<'a> { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn test_commit(self) { let bhh = self.chain_tip.clone(); self.commit_to(&bhh).unwrap(); diff --git a/stackslib/src/clarity_vm/mod.rs b/stackslib/src/clarity_vm/mod.rs index 4e1688da11..a3e6d23b8c 100644 --- a/stackslib/src/clarity_vm/mod.rs +++ b/stackslib/src/clarity_vm/mod.rs @@ -6,5 +6,5 @@ pub mod special; /// Stacks blockchain specific Clarity database implementations and wrappers pub mod database; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod tests; diff --git a/stackslib/src/clarity_vm/tests/mod.rs b/stackslib/src/clarity_vm/tests/mod.rs index 1cc597b3d1..5855d61f31 100644 --- a/stackslib/src/clarity_vm/tests/mod.rs +++ b/stackslib/src/clarity_vm/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod analysis_costs; pub mod ast; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 73d1fc1c94..fe75d62bd2 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1025,7 +1025,7 @@ impl NonceCache { where C: ClarityConnection, { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] assert!(self.cache.len() <= self.max_cache_size); // Check in-memory cache @@ -1111,7 +1111,7 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d query_row(conn, sql, params![addr_str]) } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; let mut stmt = conn.prepare(&sql).map_err(|e| db_error::SqliteError(e))?; @@ -1162,7 +1162,7 @@ impl CandidateCache { self.next.push_back(tx); } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] assert!(self.cache.len() + self.next.len() <= self.max_cache_size); } @@ -1177,7 +1177,7 @@ impl CandidateCache { self.next.append(&mut self.cache); self.cache = std::mem::take(&mut self.next); - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { assert!(self.cache.len() <= self.max_cache_size + 1); assert!(self.next.len() <= self.max_cache_size + 1); @@ -1365,7 +1365,7 @@ impl MemPoolDB { .map(String::from) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn open_test( mainnet: bool, chain_id: u32, @@ -1934,7 +1934,7 @@ impl MemPoolDB { } /// Get all transactions across all tips - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_all_txs(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM mempool"; let rows = query_rows::(conn, &sql, NO_PARAMS)?; @@ -1942,7 +1942,7 @@ impl MemPoolDB { } /// Get all transactions at a specific block - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_num_tx_at_block( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -1955,7 +1955,7 @@ impl MemPoolDB { } /// Get a number of transactions after a given timestamp on a given chain tip. - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_txs_after( conn: &DBConn, consensus_hash: &ConsensusHash, @@ -2283,7 +2283,7 @@ impl MemPoolDB { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn clear_before_coinbase_height( &mut self, min_coinbase_height: u64, @@ -2666,7 +2666,7 @@ impl MemPoolDB { Ok(()) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn dump_txs(&self) { let sql = "SELECT * FROM mempool"; let txs: Vec = query_rows(&self.db, sql, NO_PARAMS).unwrap(); diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 9a3d67e752..ade8a82589 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -30,7 +30,7 @@ use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; use std::cmp::Ordering; @@ -68,9 +68,9 @@ pub const GENESIS_EPOCH: StacksEpochId = StacksEpochId::Epoch20; /// The number of blocks which will share the block bonus /// from burn blocks that occurred without a sortition. /// (See: https://forum.stacks.org/t/pox-consensus-and-stx-future-supply) -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10_000; pub const STACKS_2_0_LAST_BLOCK_TO_PROCESS: u64 = 700_000; @@ -557,29 +557,29 @@ fn test_ord_for_stacks_epoch_id() { ); } pub trait StacksEpochExtension { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test(stacks_epoch_id: StacksEpochId, epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_05(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_05_only(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_pre_2_05(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_1(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_2(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_3(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_5(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_3_0(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, @@ -615,7 +615,7 @@ impl StacksEpochExtension for StacksEpoch { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_pre_2_05(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -640,7 +640,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_05(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -678,7 +678,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_05_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -716,7 +716,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_1(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -767,7 +767,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_2(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -831,7 +831,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_3(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_3 first_burn_height = {}", @@ -908,7 +908,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_4(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_4 first_burn_height = {}", @@ -998,7 +998,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_5(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_2_5 first_burn_height = {}", @@ -1101,7 +1101,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_3_0(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test_3_0 first_burn_height = {}", @@ -1217,7 +1217,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -1268,7 +1268,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec { info!( "StacksEpoch unit_test first_burn_height = {}", @@ -1342,7 +1342,7 @@ impl StacksEpochExtension for StacksEpoch { ] } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> Vec { match stacks_epoch_id { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 => { diff --git a/stackslib/src/cost_estimates/mod.rs b/stackslib/src/cost_estimates/mod.rs index 0992aa180a..fc4aa5b1b2 100644 --- a/stackslib/src/cost_estimates/mod.rs +++ b/stackslib/src/cost_estimates/mod.rs @@ -20,7 +20,7 @@ pub mod fee_scalar; pub mod metrics; pub mod pessimistic; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; use self::metrics::CostMetric; diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 9ecfee2774..fe6527ff53 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -15,6 +15,7 @@ use crate::chainstate::stacks::{ use crate::core::StacksEpochId; /// Make a block receipt from `tx_receipts` with some dummy values filled for test. +#[cfg(test)] pub fn make_block_receipt(tx_receipts: Vec) -> StacksEpochReceipt { StacksEpochReceipt { header: StacksHeaderInfo { diff --git a/stackslib/src/cost_estimates/tests/mod.rs b/stackslib/src/cost_estimates/tests/mod.rs index e9292447bf..792ecb778e 100644 --- a/stackslib/src/cost_estimates/tests/mod.rs +++ b/stackslib/src/cost_estimates/tests/mod.rs @@ -1,4 +1,3 @@ -#![allow(unused)] use crate::cost_estimates::FeeRateEstimate; pub mod common; diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index 8a6919412a..31f97628a6 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -27,11 +27,11 @@ extern crate slog; #[macro_use] extern crate serde_derive; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] #[macro_use] extern crate rstest; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] #[macro_use] extern crate rstest_reuse; diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index bb31146c81..f38c6c54d4 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -222,7 +222,7 @@ impl ASEntry4 { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { use std::io; use std::io::BufRead; diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index 37ed22a26b..d6bdbb301e 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -374,7 +374,7 @@ impl AtlasDB { } // Open an atlas database in memory (used for testing) - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn connect_memory(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory().map_err(|e| db_error::SqliteError(e))?; let mut db = AtlasDB { @@ -387,7 +387,7 @@ impl AtlasDB { Ok(db) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] /// Only ever to be used in testing, open and instantiate a V1 atlasdb pub fn connect_memory_db_v1(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory()?; @@ -432,7 +432,7 @@ impl AtlasDB { Ok(db) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] /// Only ever to be used in testing, connect to db, but using existing sqlconn pub fn connect_with_sqlconn( atlas_config: AtlasConfig, diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 5949db0bbf..99b07a6055 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3058,7 +3058,7 @@ impl ConversationP2P { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { #![allow(unused)] use std::fs; diff --git a/stackslib/src/net/codec.rs b/stackslib/src/net/codec.rs index 4cb4099fb4..bd8154e414 100644 --- a/stackslib/src/net/codec.rs +++ b/stackslib/src/net/codec.rs @@ -1600,7 +1600,7 @@ impl ProtocolFamily for StacksP2P { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { use stacks_common::bitvec::BitVec; use stacks_common::codec::NEIGHBOR_ADDRESS_ENCODED_SIZE; diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index db50c46333..c360d7a548 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -1504,9 +1504,8 @@ pub type ReplyHandleP2P = NetworkReplyHandle; pub type ConnectionHttp = NetworkConnection; pub type ReplyHandleHttp = NetworkReplyHandle; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { - #![allow(unused)] use std::io::prelude::*; use std::io::{Read, Write}; use std::sync::{Arc, Mutex}; diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index c63d1b4fed..aedb73bd62 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -355,7 +355,7 @@ impl DNSClient { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { use std::collections::HashMap; use std::error::Error; diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index 5c926c4192..c57d9d19bc 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -56,22 +56,22 @@ use crate::net::{ }; use crate::util_lib::db::{DBConn, Error as db_error}; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const BLOCK_DOWNLOAD_INTERVAL: u64 = 180; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const BLOCK_DOWNLOAD_INTERVAL: u64 = 0; /// If a URL never connects, don't use it again for this many seconds -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const BLOCK_DOWNLOAD_BAN_URL: u64 = 300; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const BLOCK_DOWNLOAD_BAN_URL: u64 = 60; /// If we created a request to download a block or microblock, don't do so again until this many /// seconds have passed. -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const BLOCK_REREQUEST_INTERVAL: u64 = 60; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const BLOCK_REREQUEST_INTERVAL: u64 = 30; /// This module is responsible for downloading blocks and microblocks from other peers, using block diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index fc296b9f2b..804add6f33 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -547,14 +547,14 @@ impl StacksHttpRequest { (self.preamble, self.contents) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn try_serialize(&self) -> Result, NetError> { let mut ret = vec![]; self.send(&mut ret)?; Ok(ret) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn get_response_handler_index(&self) -> Option { self.response_handler_index } @@ -676,7 +676,7 @@ impl StacksHttpResponse { self.preamble.headers.clear(); } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn try_serialize(&self) -> Result, NetError> { let mut ret = vec![]; self.send(&mut ret)?; @@ -700,7 +700,7 @@ pub enum StacksHttpPreamble { } impl StacksHttpPreamble { - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn expect_request(self) -> HttpRequestPreamble { match self { Self::Request(x) => x, @@ -708,7 +708,7 @@ impl StacksHttpPreamble { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn expect_response(self) -> HttpResponsePreamble { match self { Self::Response(x) => x, @@ -1004,7 +1004,7 @@ impl StacksHttp { } /// Force the state machine to expect a response - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn set_response_handler(&mut self, request_verb: &str, request_path: &str) { let handler_index = self .find_response_handler(request_verb, request_path) @@ -1016,7 +1016,7 @@ impl StacksHttp { } /// Try to parse an inbound HTTP request using a given handler, preamble, and body - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn handle_try_parse_request( &self, handler: &mut dyn RPCRequestHandler, @@ -1202,7 +1202,7 @@ impl StacksHttp { Ok((response_preamble, response_contents)) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn num_pending(&self) -> usize { self.reply.as_ref().map(|_| 1).unwrap_or(0) } @@ -1346,10 +1346,10 @@ impl StacksHttp { } /// Given a fully-formed single HTTP response, parse it (used by clients). - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn parse_response( - _verb: &str, - _request_path: &str, + verb: &str, + request_path: &str, response_buf: &[u8], ) -> Result { let mut http = StacksHttp::new( diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 7068db7acc..fc5f073b2e 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -47,9 +47,9 @@ use crate::net::{ use crate::util_lib::db::{DBConn, Error as db_error}; /// This module is responsible for synchronizing block inventories with other peers -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const INV_SYNC_INTERVAL: u64 = 150; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const INV_SYNC_INTERVAL: u64 = 3; pub const INV_REWARD_CYCLES: u64 = 2; @@ -1143,7 +1143,7 @@ impl InvState { self.block_stats.get_mut(nk) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn add_peer(&mut self, nk: NeighborKey, is_bootstrap_peer: bool) -> () { self.block_stats.insert( nk.clone(), @@ -2848,3 +2848,6 @@ impl PeerNetwork { work_state } } + +#[cfg(test)] +mod test {} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 8b36377a25..7f8dea9329 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -143,7 +143,7 @@ pub mod unsolicited; pub use crate::net::neighbors::{NeighborComms, PeerNetworkComms}; use crate::net::stackerdb::{StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBs}; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; #[derive(Debug)] @@ -571,7 +571,7 @@ impl From for Error { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] impl PartialEq for Error { /// (make I/O errors comparable for testing purposes) fn eq(&self, other: &Self) -> bool { @@ -1293,9 +1293,9 @@ pub const MAX_BROADCAST_INBOUND_RECEIVERS: usize = 16; pub const BLOCKS_AVAILABLE_MAX_LEN: u32 = 32; // maximum number of PoX reward cycles we can ask about -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const GETPOXINV_MAX_BITLEN: u64 = 4096; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const GETPOXINV_MAX_BITLEN: u64 = 8; // maximum number of Stacks epoch2.x blocks that can be pushed at once (even if the entire message is undersized). @@ -1455,9 +1455,9 @@ pub const MAX_MICROBLOCKS_UNCONFIRMED: usize = 1024; pub const MAX_HEADERS: usize = 2100; // how long a peer will be denied for if it misbehaves -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const DENY_BAN_DURATION: u64 = 30; // seconds -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; @@ -1719,9 +1719,8 @@ pub trait Requestable: std::fmt::Display { fn make_request_type(&self, peer_host: PeerHost) -> StacksHttpRequest; } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { - #![allow(unused)] use std::collections::HashMap; use std::io::{Cursor, ErrorKind, Read, Write}; use std::net::*; @@ -3920,10 +3919,6 @@ pub mod test { self.network.peerdb.conn() } - pub fn peerdb_mut(&mut self) -> &mut PeerDB { - &mut self.network.peerdb - } - pub fn get_burnchain_view(&mut self) -> Result { let sortdb = self.sortdb.take().unwrap(); let view_res = { diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 28355d0e1a..efe368efa1 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -79,31 +79,31 @@ pub const WALK_STATE_TIMEOUT: u64 = 60; /// Total number of seconds for which a particular walk can exist. It will be reset if it exceeds /// this age. -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const WALK_RESET_INTERVAL: u64 = 60; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const WALK_RESET_INTERVAL: u64 = 600; /// How often the node will consider pruning neighbors from its neighbor set. The node will prune /// neighbors from over-represented hosts and IP ranges in order to maintain connections to a /// diverse set of neighbors. -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const PRUNE_FREQUENCY: u64 = 0; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const PRUNE_FREQUENCY: u64 = 43200; /// Not all neighbors discovered will have an up-to-date chain tip. This value is the highest /// discrepancy between the local burnchain block height and the remote node's burnchain block /// height for which the neighbor will be considered as a worthwhile peer to remember. -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const MAX_NEIGHBOR_BLOCK_DELAY: u64 = 25; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const MAX_NEIGHBOR_BLOCK_DELAY: u64 = 288; /// How often to kick off neighbor walks. -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const NEIGHBOR_WALK_INTERVAL: u64 = 0; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const NEIGHBOR_WALK_INTERVAL: u64 = 120; // seconds /// Probability that we begin an always-allowed peer walk if we're either in IBD or if we're not @@ -367,7 +367,7 @@ impl PeerNetwork { return true; } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn print_walk_diagnostics(&mut self) { let (mut inbound, mut outbound) = self.dump_peer_table(); @@ -397,7 +397,7 @@ impl PeerNetwork { debug!("{:?}: Walk finished ===================", &self.local_peer); } - #[cfg(not(any(test, feature = "testing")))] + #[cfg(not(test))] fn print_walk_diagnostics(&self) {} /// Update the state of our peer graph walk. diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 3796a6c5f2..45183cdf1b 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5168,7 +5168,7 @@ impl PeerNetwork { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { use std::{thread, time}; diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index ed24bc1168..bdda12e6d4 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -481,7 +481,7 @@ impl NetworkState { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] mod test { use std::collections::HashSet; diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index 87b16d7bba..c33b7fea76 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -82,7 +82,7 @@ impl PeerNetwork { }; } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { test_debug!( "==== ORG NEIGHBOR DISTRIBUTION OF {:?} ===", @@ -376,7 +376,7 @@ impl PeerNetwork { } /// Dump our peer table - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn dump_peer_table(&mut self) -> (Vec, Vec) { let mut inbound: Vec = vec![]; let mut outbound: Vec = vec![]; @@ -447,7 +447,7 @@ impl PeerNetwork { } } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { if pruned_by_ip.len() > 0 || pruned_by_org.len() > 0 { let (mut inbound, mut outbound) = self.dump_peer_table(); diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index c06e495514..2b735668ac 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -515,7 +515,7 @@ impl StackerDBs { Self::instantiate(path, readwrite) } - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] pub fn connect_memory() -> StackerDBs { Self::instantiate(":memory:", true).unwrap() } diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index e971d9ebfc..40fbc7711a 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -111,7 +111,7 @@ /// state periodically (whereas Gaia stores data for as long as the back-end storage provider's SLA /// indicates). -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod tests; pub mod config; diff --git a/stackslib/src/net/stackerdb/tests/mod.rs b/stackslib/src/net/stackerdb/tests/mod.rs index 17c73daa04..0838342100 100644 --- a/stackslib/src/net/stackerdb/tests/mod.rs +++ b/stackslib/src/net/stackerdb/tests/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod config; pub mod db; diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index b16b10291f..f45e3acb93 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -51,6 +51,7 @@ const NUM_NEIGHBORS: usize = 8; /// Some testable configurations for stacker DB configs impl StackerDBConfig { + #[cfg(test)] pub fn template() -> StackerDBConfig { StackerDBConfig { chunk_size: CHUNK_SIZE, diff --git a/testnet/stacks-node/src/tests/p2p/convergence.rs b/stackslib/src/net/tests/convergence.rs similarity index 99% rename from testnet/stacks-node/src/tests/p2p/convergence.rs rename to stackslib/src/net/tests/convergence.rs index 8c273e43ce..8494f4ea46 100644 --- a/testnet/stacks-node/src/tests/p2p/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -22,11 +22,12 @@ use clarity::vm::types::{QualifiedContractIdentifier, StandardPrincipalData}; use rand::prelude::*; use rand::thread_rng; use rlimit; -use stacks::core::PEER_VERSION_TESTNET; -use stacks::net::db::*; -use stacks::net::test::*; -use stacks::net::*; -use stacks::util_lib::test::*; + +use crate::core::PEER_VERSION_TESTNET; +use crate::net::db::*; +use crate::net::test::*; +use crate::net::*; +use crate::util_lib::test::*; fn setup_rlimit_nofiles() { info!("Attempt to set nofile rlimit to 4096 (required for these tests to run)"); diff --git a/stackslib/src/net/tests/download/mod.rs b/stackslib/src/net/tests/download/mod.rs index 5b191a1161..430b92e414 100644 --- a/stackslib/src/net/tests/download/mod.rs +++ b/stackslib/src/net/tests/download/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/net/tests/inv/mod.rs b/stackslib/src/net/tests/inv/mod.rs index 04386e2097..04e8e0fd4f 100644 --- a/stackslib/src/net/tests/inv/mod.rs +++ b/stackslib/src/net/tests/inv/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 602f943cb3..7a44a56788 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] use std::cell::RefCell; use std::{thread, time}; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index ebb7158b1c..a74cb0fd2c 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -13,8 +13,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] +pub mod convergence; pub mod download; pub mod httpcore; pub mod inv; diff --git a/stackslib/src/net/tests/relay/mod.rs b/stackslib/src/net/tests/relay/mod.rs index d75bae21e8..c408e9ee60 100644 --- a/stackslib/src/net/tests/relay/mod.rs +++ b/stackslib/src/net/tests/relay/mod.rs @@ -12,7 +12,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(unused)] pub mod epoch2x; pub mod nakamoto; diff --git a/stackslib/src/util_lib/bloom.rs b/stackslib/src/util_lib/bloom.rs index d37802150f..d1632f0b14 100644 --- a/stackslib/src/util_lib/bloom.rs +++ b/stackslib/src/util_lib/bloom.rs @@ -592,7 +592,7 @@ impl BloomHash for BloomNodeHasher { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { use std::fs; diff --git a/stackslib/src/util_lib/boot.rs b/stackslib/src/util_lib/boot.rs index 2585fe1b75..95cfca9c41 100644 --- a/stackslib/src/util_lib/boot.rs +++ b/stackslib/src/util_lib/boot.rs @@ -43,7 +43,7 @@ pub fn boot_code_acc(boot_code_address: StacksAddress, boot_code_nonce: u64) -> } } -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub fn boot_code_test_addr() -> StacksAddress { boot_code_addr(false) } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 44a2772c00..83a7ab2a25 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -5,7 +5,7 @@ pub mod boot; pub mod signed_structured_data; pub mod strings; -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub mod test { use std::sync::mpsc::sync_channel; use std::{panic, process, thread}; diff --git a/stx-genesis/Cargo.toml b/stx-genesis/Cargo.toml index 6914ca14a5..39e97465ce 100644 --- a/stx-genesis/Cargo.toml +++ b/stx-genesis/Cargo.toml @@ -15,6 +15,3 @@ path = "src/lib.rs" [build-dependencies] libflate = "1.0.3" sha2 = { version = "0.10" } - -[features] -testing = [] diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index fb05aa0355..19165db0a8 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -43,20 +43,15 @@ warp = "0.3.5" tokio = "1.15" reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } clarity = { path = "../../clarity", features = ["default", "testing"]} -rstest = "0.17.0" -rstest_reuse = "0.5.0" stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { package = "stackslib", path = "../../stackslib", features = ["default", "testing"] } stacks-signer = { path = "../../stacks-signer", features = ["testing"] } -stx-genesis = { path = "../../stx-genesis", features = ["testing"] } -stdext = "0.3.1" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" -rlimit = "0.10.2" [[bin]] name = "stacks-node" diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 0ac8e151a9..a7892b9a2d 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -62,7 +62,6 @@ mod integrations; mod mempool; pub mod nakamoto_integrations; pub mod neon_integrations; -pub mod p2p; mod signer; mod stackerdb; diff --git a/testnet/stacks-node/src/tests/p2p/mod.rs b/testnet/stacks-node/src/tests/p2p/mod.rs deleted file mode 100644 index c2a61de8ac..0000000000 --- a/testnet/stacks-node/src/tests/p2p/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -/// Integration tests that verify that sets of nodes in various initial topologies will, over time, -/// learn about every other node in the network -pub mod convergence; From e5b9a731e18d070b3142bcabcb4d1cc9f338549d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 16 Sep 2024 14:26:39 -0500 Subject: [PATCH 1118/1400] chore: last cleanup from p2p::conv test movement --- stackslib/src/net/chat.rs | 3 +-- stackslib/src/net/neighbors/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 99b07a6055..6cdf0b7e49 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -510,7 +510,7 @@ impl Neighbor { } }; - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer @@ -3060,7 +3060,6 @@ impl ConversationP2P { #[cfg(test)] mod test { - #![allow(unused)] use std::fs; use std::io::prelude::*; use std::io::{Read, Write}; diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index efe368efa1..450dc04463 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -42,9 +42,9 @@ pub use db::{NeighborReplacements, NeighborWalkDB, PeerDBNeighborWalk}; pub use walk::{NeighborPingback, NeighborWalk, NeighborWalkResult}; /// How often we can contact other neighbors, at a minimim -#[cfg(any(test, feature = "testing"))] +#[cfg(test)] pub const NEIGHBOR_MINIMUM_CONTACT_INTERVAL: u64 = 0; -#[cfg(not(any(test, feature = "testing")))] +#[cfg(not(test))] pub const NEIGHBOR_MINIMUM_CONTACT_INTERVAL: u64 = 600; /// Default number of seconds to wait for a reply from a neighbor From 23482ebd1a5d4399d92aff5c529dd117ae75a25e Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 16 Sep 2024 12:40:40 -0700 Subject: [PATCH 1119/1400] Add new workflow for p2p convergence tests --- .github/workflows/ci.yml | 23 +++++++ .github/workflows/p2p-tests.yml | 87 ++++++++++++++++++++++++++ .github/workflows/standalone-tests.yml | 18 ++++++ 3 files changed, 128 insertions(+) create mode 100644 .github/workflows/p2p-tests.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d1ae652266..1c59f23e8d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -186,6 +186,29 @@ jobs: - check-release uses: ./.github/workflows/bitcoin-tests.yml + + p2p-tests: + if: | + needs.check-release.outputs.is_release == 'true' || ( + github.event_name == 'workflow_dispatch' || + github.event_name == 'pull_request' || + github.event_name == 'merge_group' || + ( + contains(' + refs/heads/master + refs/heads/develop + refs/heads/next + ', github.event.pull_request.head.ref) && + github.event_name == 'push' + ) + ) + name: P2P Tests + needs: + - rustfmt + - create-cache + - check-release + uses: ./.github/workflows/p2p-tests.yml + ## Test to run on a tagged release ## ## Runs when: diff --git a/.github/workflows/p2p-tests.yml b/.github/workflows/p2p-tests.yml new file mode 100644 index 0000000000..a8346e2948 --- /dev/null +++ b/.github/workflows/p2p-tests.yml @@ -0,0 +1,87 @@ +## Github workflow to run p2p tests + +name: Tests::P2P + +on: + workflow_call: + +## env vars are transferred to composite action steps +env: + BITCOIND_TEST: 0 + RUST_BACKTRACE: full + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 15 + TEST_TIMEOUT: 30 + +concurrency: + group: stackslib-tests-${{ github.head_ref || github.ref || github.run_id}} + ## Only cancel in progress if this is for a PR + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + # p2p integration tests with code coverage + integration-tests: + name: Integration Tests + runs-on: ubuntu-latest + strategy: + ## Continue with the test matrix even if we've had a failure + fail-fast: false + ## Run a maximum of 32 concurrent tests from the test matrix + max-parallel: 32 + matrix: + test-name: + - net::tests::convergence::test_walk_ring_allow_15 + - net::tests::convergence::test_walk_ring_15_plain + - net::tests::convergence::test_walk_ring_15_pingback + - net::tests::convergence::test_walk_ring_15_org_biased + - net::tests::convergence::test_walk_line_allowed_15 + - net::tests::convergence::test_walk_line_15_plain + - net::tests::convergence::test_walk_line_15_org_biased + - net::tests::convergence::test_walk_line_15_pingback + - net::tests::convergence::test_walk_star_allowed_15 + - net::tests::convergence::test_walk_star_15_plain + - net::tests::convergence::test_walk_star_15_pingback + - net::tests::convergence::test_walk_star_15_org_biased + - net::tests::convergence::test_walk_inbound_line_15 + steps: + ## Setup test environment + - name: Setup Test Environment + id: setup_tests + uses: stacks-network/actions/stacks-core/testenv@main + with: + btc-version: "25.0" + + ## Increase open file descriptors limit + - name: Increase Open File Descriptors + run: | + sudo prlimit --nofile=4096:4096 + + ## Run test matrix using restored cache of archive file + ## - Test will timeout after env.TEST_TIMEOUT minutes + - name: Run Tests + id: run_tests + timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} + uses: stacks-network/actions/stacks-core/run-tests@main + with: + test-name: ${{ matrix.test-name }} + threads: 1 + + ## Create and upload code coverage file + - name: Code Coverage + id: codecov + uses: stacks-network/actions/codecov@main + with: + test-name: ${{ matrix.test-name }} + + check-tests: + name: Check Tests + runs-on: ubuntu-latest + if: always() + needs: + - integration-tests + steps: + - name: Check Tests Status + id: check_tests_status + uses: stacks-network/actions/check-jobs-status@main + with: + jobs: ${{ toJson(needs) }} + summary_print: "true" diff --git a/.github/workflows/standalone-tests.yml b/.github/workflows/standalone-tests.yml index e0fe2d345b..8a56acc3ec 100644 --- a/.github/workflows/standalone-tests.yml +++ b/.github/workflows/standalone-tests.yml @@ -21,6 +21,7 @@ on: - Atlas Tests - Bitcoin Tests - Epoch Tests + - P2P Tests - Slow Tests - Stacks-Core Tests - SBTC Tests @@ -69,6 +70,23 @@ jobs: - create-cache uses: ./.github/workflows/bitcoin-tests.yml + ## Runs when: + ## either or of the following: + ## - workflow is 'Release Tests' + ## - workflow is 'CI Tests' + ## - workflow is 'P2P Tests' + p2p-tests: + if: | + ( + inputs.workflow == 'Release Tests' || + inputs.workflow == 'CI Tests' || + inputs.workflow == 'P2P Tests' + ) + name: P2P Tests + needs: + - create-cache + uses: ./.github/workflows/p2p-tests.yml + ##################################################### ## Runs when: ## either or of the following: From 2d3dc50f670cdc312546e435cf0fd8bda178c8a7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 16 Sep 2024 15:44:14 -0400 Subject: [PATCH 1120/1400] fix: correct logic handling submit_operation errors --- testnet/stacks-node/src/neon_node.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b7e2843ece..4ba1e69111 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2753,19 +2753,21 @@ impl BlockMinerThread { } = self.config.get_node_config(false); let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); - self.failed_to_submit_last_attempt = match res { - Ok(_) => false, - Err(BurnchainControllerError::IdenticalOperation) => { - info!("Relayer: Block-commit already submitted"); - true - } + match res { + Ok(_) => self.failed_to_submit_last_attempt = false, Err(_) if mock_mining => { debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); - true + self.failed_to_submit_last_attempt = true; + } + Err(BurnchainControllerError::IdenticalOperation) => { + info!("Relayer: Block-commit already submitted"); + self.failed_to_submit_last_attempt = true; + return None; } Err(e) => { warn!("Relayer: Failed to submit Bitcoin transaction: {:?}", e); - true + self.failed_to_submit_last_attempt = true; + return None; } }; From edc73180f786baa2f4fc87e240193c0ffbcdb7d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:18:53 -0400 Subject: [PATCH 1121/1400] feat: report peer age in /v2/neighbors --- stackslib/src/net/api/getneighbors.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/net/api/getneighbors.rs b/stackslib/src/net/api/getneighbors.rs index 9e7d0402da..06f01e6e85 100644 --- a/stackslib/src/net/api/getneighbors.rs +++ b/stackslib/src/net/api/getneighbors.rs @@ -54,6 +54,8 @@ pub struct RPCNeighbor { #[serde(skip_serializing_if = "Option::is_none")] #[serde(with = "serde_opt_vec_qci")] pub stackerdbs: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub age: Option, } /// Serialize and deserialize `Option>` @@ -95,6 +97,7 @@ impl RPCNeighbor { pkh: Hash160, auth: bool, stackerdbs: Vec, + age: Option, ) -> RPCNeighbor { RPCNeighbor { network_id: nk.network_id, @@ -104,6 +107,7 @@ impl RPCNeighbor { public_key_hash: pkh, authenticated: auth, stackerdbs: Some(stackerdbs), + age, } } } @@ -138,6 +142,7 @@ impl RPCNeighborsInfo { Hash160::from_node_public_key(&n.public_key), true, stackerdb_contract_ids, + None, ) }) .collect(); @@ -164,6 +169,7 @@ impl RPCNeighborsInfo { Hash160::from_node_public_key(&n.public_key), true, stackerdb_contract_ids, + None, ) }) .collect(); @@ -185,6 +191,7 @@ impl RPCNeighborsInfo { naddr.public_key_hash, convo.is_authenticated(), convo.get_stackerdb_contract_ids().to_vec(), + Some(convo.age()), )); } else { inbound.push(RPCNeighbor::from_neighbor_key_and_pubkh( @@ -192,6 +199,7 @@ impl RPCNeighborsInfo { naddr.public_key_hash, convo.is_authenticated(), convo.get_stackerdb_contract_ids().to_vec(), + Some(convo.age()), )); } } From 95301f32b6c5fa7d5010edc0316c9a5ccb823b1b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:19:06 -0400 Subject: [PATCH 1122/1400] feat: compute peer age for p2p convo --- stackslib/src/net/chat.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 6cdf0b7e49..ba0b70b1a5 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -609,6 +609,10 @@ impl ConversationP2P { } } + pub fn age(&self) -> u64 { + get_epoch_time_secs().saturating_sub(self.instantiated) + } + pub fn set_public_key(&mut self, pubkey_opt: Option) -> () { self.connection.set_public_key(pubkey_opt); } From 6af50f51f471c17fe97dca898a7778cebb4dce2b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:19:35 -0400 Subject: [PATCH 1123/1400] fix: pin connections to peers we're inv-syncing with, so they don't get pruned --- stackslib/src/net/inv/epoch2x.rs | 52 +++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index fc5f073b2e..bbdd8f68ae 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -973,6 +973,9 @@ pub struct InvState { /// What's the last reward cycle we _started_ the inv scan at? pub block_sortition_start: u64, + + /// event IDs of connections we established, so they don't get pruned + pinned: HashSet, } impl InvState { @@ -994,11 +997,13 @@ impl InvState { num_inv_syncs: 0, block_sortition_start: 0, + pinned: HashSet::new(), } } fn reset_sync_peers( &mut self, + network: &PeerNetwork, peers: HashSet, bootstrap_peers: &HashSet, max_neighbors: usize, @@ -1042,6 +1047,24 @@ impl InvState { added, &peers ); + + // if we're still connected to these peers, then keep them pinned + self.pinned.clear(); + for peer in peers.iter() { + if let Some(event_id) = network.get_event_id(&peer) { + self.pinned.insert(event_id); + } + } + } + + /// Pin a connection + pub fn pin_connection(&mut self, event_id: usize) { + self.pinned.insert(event_id); + } + + /// Get the set of connections this state machine is using + pub fn get_pinned_connections(&self) -> &HashSet { + &self.pinned } pub fn get_peer_status(&self, nk: &NeighborKey) -> NodeStatus { @@ -1801,6 +1824,7 @@ impl PeerNetwork { /// Start requesting the next batch of PoX inventories fn inv_getpoxinv_begin( &mut self, + pins: &mut HashSet, sortdb: &SortitionDB, nk: &NeighborKey, stats: &mut NeighborBlockStats, @@ -1821,6 +1845,8 @@ impl PeerNetwork { }; let payload = StacksMessageType::GetPoxInv(getpoxinv); + let event_id_opt = self.get_event_id(&nk); + let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) @@ -1830,6 +1856,10 @@ impl PeerNetwork { })?; stats.getpoxinv_begin(request, target_pox_reward_cycle); + if let Some(event_id) = event_id_opt { + pins.insert(event_id); + } + Ok(()) } @@ -1988,6 +2018,7 @@ impl PeerNetwork { /// Start requesting the next batch of block inventories fn inv_getblocksinv_begin( &mut self, + pins: &mut HashSet, sortdb: &SortitionDB, nk: &NeighborKey, stats: &mut NeighborBlockStats, @@ -2008,6 +2039,7 @@ impl PeerNetwork { let num_blocks_expected = getblocksinv.num_blocks; let payload = StacksMessageType::GetBlocksInv(getblocksinv); + let event_id_opt = self.get_event_id(nk); let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) @@ -2017,6 +2049,9 @@ impl PeerNetwork { })?; stats.getblocksinv_begin(request, target_block_reward_cycle, num_blocks_expected); + if let Some(event_id) = event_id_opt { + pins.insert(event_id); + } Ok(()) } @@ -2114,6 +2149,7 @@ impl PeerNetwork { /// Run a single state-machine to completion fn inv_sync_run( &mut self, + pins: &mut HashSet, sortdb: &SortitionDB, nk: &NeighborKey, stats: &mut NeighborBlockStats, @@ -2130,13 +2166,13 @@ impl PeerNetwork { debug!("Inv sync state is {:?}", &stats.state); let again = match stats.state { InvWorkState::GetPoxInvBegin => self - .inv_getpoxinv_begin(sortdb, nk, stats, request_timeout) + .inv_getpoxinv_begin(pins, sortdb, nk, stats, request_timeout) .and_then(|_| Ok(true))?, InvWorkState::GetPoxInvFinish => { self.inv_getpoxinv_try_finish(sortdb, nk, stats, ibd)? } InvWorkState::GetBlocksInvBegin => self - .inv_getblocksinv_begin(sortdb, nk, stats, request_timeout) + .inv_getblocksinv_begin(pins, sortdb, nk, stats, request_timeout) .and_then(|_| Ok(true))?, InvWorkState::GetBlocksInvFinish => { self.inv_getblocksinv_try_finish(nk, stats, ibd)? @@ -2231,9 +2267,10 @@ impl PeerNetwork { ) -> (bool, bool, Vec, Vec) { PeerNetwork::with_inv_state(self, |network, inv_state| { debug!( - "{:?}: Inventory state has {} block stats tracked", + "{:?}: Inventory state has {} block stats tracked on connections {:?}", &network.local_peer, - inv_state.block_stats.len() + inv_state.block_stats.len(), + inv_state.pinned, ); let mut all_done = true; @@ -2261,6 +2298,7 @@ impl PeerNetwork { return (true, true, vec![], vec![]); } + let mut new_pins = HashSet::new(); for (nk, stats) in inv_state.block_stats.iter_mut() { debug!( "{:?}: inv state-machine for {:?} is in state {:?}, at PoX {},target={}; blocks {},target={}; status {:?}, done={}", @@ -2275,7 +2313,7 @@ impl PeerNetwork { stats.done ); if !stats.done { - match network.inv_sync_run(sortdb, nk, stats, inv_state.request_timeout, ibd) { + match network.inv_sync_run(&mut new_pins, sortdb, nk, stats, inv_state.request_timeout, ibd) { Ok(d) => d, Err(net_error::StaleView) => { // stop work on this state machine -- it needs to be restarted. @@ -2341,6 +2379,9 @@ impl PeerNetwork { } } } + let _ = new_pins + .into_iter() + .map(|event_id| inv_state.pin_connection(event_id)); if all_done { let mut new_sync_peers = network.get_outbound_sync_peers(); @@ -2450,6 +2491,7 @@ impl PeerNetwork { } inv_state.reset_sync_peers( + network, good_sync_peers_set, &bootstrap_peers, network.connection_opts.num_neighbors as usize, From 25e84f23e5d0d9783dc781d01c21dd414495dd3f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:19:59 -0400 Subject: [PATCH 1124/1400] fix: report pinned connections so the pruner won't disconnect nakamoto inv sync peers --- stackslib/src/net/inv/nakamoto.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index f24ad1a87c..d5d4931e34 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::StacksBlockId; @@ -557,6 +557,10 @@ impl NakamotoInvStateMachine { self.comms.reset(); } + pub fn get_pinned_connections(&self) -> &HashSet { + self.comms.get_pinned_connections() + } + /// Remove state for a particular neighbor pub fn del_peer(&mut self, peer: &NeighborAddress) { self.inventories.remove(peer); From c514062a53feab5fab1bd26ae12440c048232d0e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:20:53 -0400 Subject: [PATCH 1125/1400] fix: don't unpin a connection once it connects --- stackslib/src/net/neighbors/comms.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 8fdf38d87b..f3e160ff57 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -50,6 +50,8 @@ pub trait NeighborComms { fn get_connecting(&self, network: &PeerNetwork, nk: &NK) -> Option; /// Remove a neighbor from connecting state fn remove_connecting(&mut self, network: &PeerNetwork, nk: &NK); + /// Remove a neighbor from connecting state due to an error + fn remove_connecting_error(&mut self, network: &PeerNetwork, nk: &NK); /// Mark a neighbor as dead (inactive, unreachable, etc.) fn add_dead(&mut self, network: &PeerNetwork, nk: &NK); /// Mark a neighbor as broken (in protocol violation) @@ -150,7 +152,7 @@ pub trait NeighborComms { // is the peer network still working? if !network.is_connecting(event_id) { debug!("{:?}: Failed to connect to {:?} (event {} no longer connecting; assumed timed out)", network.get_local_peer(), event_id, &nk); - self.remove_connecting(network, &nk); + self.remove_connecting_error(network, &nk); return Err(net_error::PeerNotConnected); } @@ -518,7 +520,13 @@ impl NeighborComms for PeerNetworkComms { .map(|event_ref| *event_ref) } + /// Remove a connecting neighbor because it conected fn remove_connecting(&mut self, network: &PeerNetwork, nk: &NK) { + self.connecting.remove(&nk.to_neighbor_key(network)); + } + + /// Remove a connecting neighbor due to an error. The connection will be unpinned. + fn remove_connecting_error(&mut self, network: &PeerNetwork, nk: &NK) { let event_id_opt = self.connecting.remove(&nk.to_neighbor_key(network)); if let Some(event_id) = event_id_opt { self.unpin_connection(event_id); From a76ffa45bcf261d8e9f7b8b1647d661aaf4fec64 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:21:07 -0400 Subject: [PATCH 1126/1400] fix: don't prune connections the inv state machines and stackerdb state machine are using --- stackslib/src/net/p2p.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 45183cdf1b..20144f0d72 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -2399,7 +2399,7 @@ impl PeerNetwork { } /// Prune inbound and outbound connections if we can - fn prune_connections(&mut self) -> () { + pub(crate) fn prune_connections(&mut self) -> () { if cfg!(test) && self.connection_opts.disable_network_prune { return; } @@ -2443,6 +2443,22 @@ impl PeerNetwork { } } + // if we're in the middle of epoch2 inv sync, then don't prune any connections it + // established + if let Some(inv_state) = self.inv_state.as_ref() { + if inv_state.get_pinned_connections().contains(event_id) { + safe.insert(*event_id); + } + } + + // if we're in the middle of nakamoto inv sync, then don't prune any connections it + // established + if let Some(nakamoto_inv) = self.inv_state_nakamoto.as_ref() { + if nakamoto_inv.get_pinned_connections().contains(event_id) { + safe.insert(*event_id); + } + } + // if we're running stacker DBs, then don't prune any outbound connections it // established if let Some(stacker_db_syncs) = self.stacker_db_syncs.as_ref() { @@ -2454,6 +2470,7 @@ impl PeerNetwork { } } + debug!("Pinned connections: {:?}", &safe); self.prune_frontier(&safe); } From fdee274c40a94d8a65f292297eb5f950eea10914 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:21:32 -0400 Subject: [PATCH 1127/1400] chore: keep stackerdb replicas pinned across restarts, and only unpin on irrecoverable error --- stackslib/src/net/stackerdb/sync.rs | 50 ++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 467bc608e1..08e6e978ea 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -27,6 +27,7 @@ use stacks_common::util::hash::Hash160; use crate::net::chat::ConversationP2P; use crate::net::connection::ReplyHandleP2P; use crate::net::db::PeerDB; +use crate::net::neighbors::comms::ToNeighborKey; use crate::net::neighbors::NeighborComms; use crate::net::p2p::PeerNetwork; use crate::net::stackerdb::{ @@ -216,8 +217,22 @@ impl StackerDBSync { self.expected_versions.clear(); self.downloaded_chunks.clear(); - // reset comms, but keep all replicas pinned + // reset comms, but keep all connected replicas pinned self.comms.reset(); + if let Some(network) = network { + for naddr in self.replicas.iter() { + if let Some(event_id) = network.get_event_id(&naddr.to_neighbor_key(network)) { + self.comms.pin_connection(event_id); + debug!( + "{:?}: {}: reuse connection for replica {:?} on event {}", + network.get_local_peer(), + &self.smart_contract_id, + &naddr, + event_id + ); + } + } + } // reload from config self.num_slots = config.num_slots() as usize; @@ -240,6 +255,15 @@ impl StackerDBSync { self.comms.get_pinned_connections() } + /// Unpin and remove a connected replica by naddr + pub fn unpin_connected_replica(&mut self, network: &PeerNetwork, naddr: &NeighborAddress) { + let nk = naddr.to_neighbor_key(network); + if let Some(event_id) = network.get_event_id(&nk) { + self.comms.unpin_connection(event_id); + } + self.connected_replicas.remove(&naddr); + } + /// Make a chunk inv request pub fn make_getchunkinv(&self, rc_consensus_hash: &ConsensusHash) -> StacksMessageType { StacksMessageType::StackerDBGetChunkInv(StackerDBGetChunkInvData { @@ -743,6 +767,7 @@ impl StackerDBSync { &network.get_chain_view().rc_consensus_hash, &db_data.rc_consensus_hash ); + // don't unpin, since it's usually transient self.connected_replicas.remove(&naddr); continue; } @@ -756,11 +781,13 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView || data.error_code == NackErrorCodes::FutureView { + self.connected_replicas.remove(&naddr); self.stale_neighbors.insert(naddr); + } else { + self.unpin_connected_replica(network, &naddr); } continue; } @@ -788,7 +815,7 @@ impl StackerDBSync { ); // disconnect - self.connected_replicas.remove(&naddr); + self.unpin_connected_replica(network, &naddr); continue; } @@ -887,11 +914,13 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); if data.error_code == NackErrorCodes::StaleView || data.error_code == NackErrorCodes::FutureView { + self.connected_replicas.remove(&naddr); self.stale_neighbors.insert(naddr); + } else { + self.unpin_connected_replica(network, &naddr); } continue; } @@ -902,7 +931,7 @@ impl StackerDBSync { &self.smart_contract_id, &x ); - self.connected_replicas.remove(&naddr); + self.unpin_connected_replica(network, &naddr); continue; } }; @@ -958,6 +987,7 @@ impl StackerDBSync { ); let mut requested = 0; + let mut unpin = HashSet::new(); // fill up our comms with $capacity requests for _i in 0..self.request_capacity { @@ -1001,7 +1031,7 @@ impl StackerDBSync { &selected_neighbor, &e ); - self.connected_replicas.remove(&selected_neighbor); + unpin.insert(selected_neighbor.clone()); continue; } @@ -1013,6 +1043,10 @@ impl StackerDBSync { // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_fetch_priorities.len(); } + let _ = unpin + .into_iter() + .map(|naddr| self.unpin_connected_replica(network, &naddr)); + if requested == 0 && self.comms.count_inflight() == 0 { return Err(net_error::PeerNotConnected); } @@ -1058,7 +1092,7 @@ impl StackerDBSync { &self.smart_contract_id, &x ); - self.connected_replicas.remove(&naddr); + self.unpin_connected_replica(network, &naddr); continue; } }; @@ -1072,7 +1106,7 @@ impl StackerDBSync { &naddr, data.slot_id ); - self.connected_replicas.remove(&naddr); + self.unpin_connected_replica(network, &naddr); continue; } From 7f34262c6c81414efdaf02603f120a68a29a83f3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:21:54 -0400 Subject: [PATCH 1128/1400] chore: enhance stackerdb test to force the network pruner to run, so as to verify that connection pinning prevents decoherence --- stackslib/src/net/stackerdb/tests/sync.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index f45e3acb93..565a97f422 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -1070,6 +1070,19 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, peer_config.connection_opts.disable_stackerdb_get_chunks = true; } + // run up against pruner limits + peer_config.connection_opts.disable_network_prune = false; + peer_config.connection_opts.num_neighbors = 5; + peer_config.connection_opts.num_clients = 5; + peer_config.connection_opts.soft_num_neighbors = 5; + peer_config.connection_opts.soft_num_clients = 5; + peer_config.connection_opts.max_neighbors_per_host = 5; + peer_config.connection_opts.max_clients_per_host = 5; + peer_config.connection_opts.soft_max_neighbors_per_host = 5; + peer_config.connection_opts.soft_max_neighbors_per_org = 5; + peer_config.connection_opts.soft_max_clients_per_host = 5; + peer_config.connection_opts.max_neighbors_of_neighbor = 5; + // short-lived walks... peer_config.connection_opts.walk_max_duration = 10; let idx = add_stackerdb(&mut peer_config, Some(StackerDBConfig::template())); @@ -1129,6 +1142,9 @@ fn inner_test_stackerdb_10_replicas_10_neighbors_line_10_chunks(push_only: bool, peers[i].network.stacker_db_configs = peer_db_configs[i].clone(); let res = peers[i].step_with_ibd(false); + // force this to run + peers[i].network.prune_connections(); + if let Ok(res) = res { check_sync_results(&res); let rc_ch = peers[i].network.get_chain_view().rc_consensus_hash.clone(); From dbf7bf5312028e747039d2fa329135fd127ae709 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 16 Sep 2024 17:22:23 -0400 Subject: [PATCH 1129/1400] chore: fix test --- stackslib/src/net/tests/httpcore.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index d9c62eedf6..4bcf52605c 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -418,6 +418,7 @@ fn test_http_response_type_codec() { .unwrap(), authenticated: true, stackerdbs: Some(vec![]), + age: None, }, RPCNeighbor { network_id: 3, @@ -433,6 +434,7 @@ fn test_http_response_type_codec() { .unwrap(), authenticated: false, stackerdbs: Some(vec![]), + age: None, }, ], inbound: vec![], From dc454e171799eb902723659b0959552435bafa20 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 16 Sep 2024 18:29:55 -0700 Subject: [PATCH 1130/1400] fix: use `:principal` in metric name --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index f44d988138..2727205f64 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -648,7 +648,7 @@ impl StacksClient { address: &StacksAddress, ) -> Result { debug!("stacks_node_client: Getting account info..."); - let timer_label = format!("{}/v2/accounts/:stacks_address", self.http_origin); + let timer_label = format!("{}/v2/accounts/:principal", self.http_origin); let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); let send_request = || { self.stacks_node_client From 02fe4cb2416b66dc403c45032877ff76adbf5d0d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 16 Sep 2024 19:19:44 -0700 Subject: [PATCH 1131/1400] Check that stackerdb is set before configuring the signer Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 5 ++++ stacks-signer/src/client/stacks_client.rs | 36 +++++++++++++++++++---- stacks-signer/src/runloop.rs | 26 ++++++++++++++-- 3 files changed, 58 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 5ce8706274..d93e03f1ba 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -603,4 +603,9 @@ pub(crate) mod tests { serde_json::to_string(header_types).expect("Failed to serialize tenure tip info"); format!("HTTP/1.1 200 OK\n\n{response_json}") } + + pub fn build_get_last_set_cycle_response(cycle: u64) -> String { + let clarity_value = ClarityValue::UInt(cycle as u128); + build_read_only_response(&clarity_value) + } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index cc780166af..58c88c1cc4 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -19,7 +19,7 @@ use std::net::SocketAddr; use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ @@ -162,6 +162,20 @@ impl StacksClient { Ok(sortition_info) } + /// Get the last set reward cycle stored within the stackerdb contract + pub fn get_last_set_cycle(&self) -> Result { + let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); + let function_name_str = "stackerdb-get-last-set-cycle"; + let function_name = ClarityName::from(function_name_str); + let value = self.read_only_contract_call( + &signer_stackerdb_contract_id.issuer.clone().into(), + &signer_stackerdb_contract_id.name, + &function_name, + &[], + )?; + Ok(value.expect_u128()?) + } + /// Retrieve the signer slots stored within the stackerdb contract pub fn get_stackerdb_signer_slots( &self, @@ -962,11 +976,11 @@ mod tests { use super::*; use crate::client::tests::{ build_account_nonce_response, build_get_approved_aggregate_key_response, - build_get_last_round_response, build_get_medium_estimated_fee_ustx_response, - build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, - build_get_tenure_tip_response, build_get_vote_for_aggregate_key_response, - build_get_weight_threshold_response, build_read_only_response, write_response, - MockServerClient, + build_get_last_round_response, build_get_last_set_cycle_response, + build_get_medium_estimated_fee_ustx_response, build_get_peer_info_response, + build_get_pox_data_response, build_get_round_info_response, build_get_tenure_tip_response, + build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, + build_read_only_response, write_response, MockServerClient, }; #[test] @@ -1623,4 +1637,14 @@ mod tests { write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), header); } + + #[test] + fn get_last_set_cycle_should_succeed() { + let mock = MockServerClient::new(); + let reward_cycle = thread_rng().next_u64(); + let response = build_get_last_set_cycle_response(reward_cycle); + let h = spawn(move || mock.client.get_last_set_cycle()); + write_response(mock.server, response.as_bytes()); + assert_eq!(h.join().unwrap().unwrap(), reward_cycle as u128); + } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 5b05393bd7..7f16210ebd 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -34,6 +34,17 @@ use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, S use crate::config::{GlobalConfig, SignerConfig}; use crate::Signer as SignerTrait; +#[derive(thiserror::Error, Debug)] +/// Configuration error type +pub enum ConfigurationError { + /// Error occurred while fetching data from the stacks node + #[error("{0}")] + ClientError(#[from] ClientError), + /// The stackerdb signer config is not yet updated + #[error("The stackerdb config is not yet updated")] + StackerDBNotUpdated, +} + /// The internal signer state info #[derive(PartialEq, Clone, Debug)] pub struct StateInfo { @@ -274,14 +285,23 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo fn get_signer_config( &mut self, reward_cycle: u64, - ) -> Result, ClientError> { + ) -> Result, ConfigurationError> { + // We can only register for a reward cycle if its stackerdb has been updated + let last_calculated_reward_cycle = + self.stacks_client.get_last_set_cycle().inspect_err(|e| { + warn!("Error while fetching last calculated reward cycle: {e:?}"); + })?; + if last_calculated_reward_cycle < reward_cycle as u128 { + return Err(ConfigurationError::StackerDBNotUpdated); + } + // We can only register for a reward cycle if a reward set exists. let signer_entries = match self.get_parsed_reward_set(reward_cycle) { Ok(Some(x)) => x, Ok(None) => return Ok(None), Err(e) => { warn!("Error while fetching reward set {reward_cycle}: {e:?}"); - return Err(e); + return Err(e.into()); } }; let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) @@ -289,7 +309,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo Ok(x) => x, Err(e) => { warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); - return Err(e); + return Err(e.into()); } }; let current_addr = self.stacks_client.get_signer_address(); From 78b715cf7eb52e77bbcd41d1fc4a720d45df6af9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 16 Sep 2024 19:29:30 -0700 Subject: [PATCH 1132/1400] Ensure the last set cycle is set by putting it on an exponential backoff to prevent waiting needlessly for a burn block Signed-off-by: Jacinta Ferrant --- stacks-signer/src/runloop.rs | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 7f16210ebd..970b04d025 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -286,15 +286,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo &mut self, reward_cycle: u64, ) -> Result, ConfigurationError> { - // We can only register for a reward cycle if its stackerdb has been updated - let last_calculated_reward_cycle = - self.stacks_client.get_last_set_cycle().inspect_err(|e| { - warn!("Error while fetching last calculated reward cycle: {e:?}"); - })?; - if last_calculated_reward_cycle < reward_cycle as u128 { - return Err(ConfigurationError::StackerDBNotUpdated); - } - // We can only register for a reward cycle if a reward set exists. let signer_entries = match self.get_parsed_reward_set(reward_cycle) { Ok(Some(x)) => x, @@ -304,6 +295,25 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo return Err(e.into()); } }; + + // Ensure that the stackerdb has been updated for the reward cycle before proceeding + retry_with_exponential_backoff(|| { + let last_calculated_reward_cycle = self + .stacks_client + .get_last_set_cycle() + .map_err(|e| backoff::Error::transient(e.into()))?; + if last_calculated_reward_cycle < reward_cycle as u128 { + warn!( + "Stackerdb has not been updated for reward cycle {reward_cycle}. Last calculated reward cycle is {last_calculated_reward_cycle}." + ); + Err(backoff::Error::transient( + ConfigurationError::StackerDBNotUpdated, + )) + } else { + Ok(()) + } + })?; + let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) { Ok(x) => x, From 9b09f0b3776d65ab25fc9cd21ee4c1c15f465c2f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 10:29:15 -0500 Subject: [PATCH 1133/1400] call correct .signers function, error fast rather than retry --- stacks-signer/src/client/mod.rs | 2 +- stacks-signer/src/client/stacks_client.rs | 4 +-- stacks-signer/src/runloop.rs | 39 +++++++++++------------ 3 files changed, 21 insertions(+), 24 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index d93e03f1ba..ccf7a993f5 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -605,7 +605,7 @@ pub(crate) mod tests { } pub fn build_get_last_set_cycle_response(cycle: u64) -> String { - let clarity_value = ClarityValue::UInt(cycle as u128); + let clarity_value = ClarityValue::okay(ClarityValue::UInt(cycle as u128)).unwrap(); build_read_only_response(&clarity_value) } } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 58c88c1cc4..c10ceba779 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -165,7 +165,7 @@ impl StacksClient { /// Get the last set reward cycle stored within the stackerdb contract pub fn get_last_set_cycle(&self) -> Result { let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); - let function_name_str = "stackerdb-get-last-set-cycle"; + let function_name_str = "get-last-set-cycle"; let function_name = ClarityName::from(function_name_str); let value = self.read_only_contract_call( &signer_stackerdb_contract_id.issuer.clone().into(), @@ -173,7 +173,7 @@ impl StacksClient { &function_name, &[], )?; - Ok(value.expect_u128()?) + Ok(value.expect_result_ok()?.expect_u128()?) } /// Retrieve the signer slots stored within the stackerdb contract diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 970b04d025..1988be4785 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -297,31 +297,28 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo }; // Ensure that the stackerdb has been updated for the reward cycle before proceeding - retry_with_exponential_backoff(|| { - let last_calculated_reward_cycle = self - .stacks_client - .get_last_set_cycle() - .map_err(|e| backoff::Error::transient(e.into()))?; - if last_calculated_reward_cycle < reward_cycle as u128 { + let last_calculated_reward_cycle = + self.stacks_client.get_last_set_cycle().map_err(|e| { warn!( - "Stackerdb has not been updated for reward cycle {reward_cycle}. Last calculated reward cycle is {last_calculated_reward_cycle}." + "Failed to fetch last calculated stackerdb cycle from stacks-node"; + "reward_cycle" => reward_cycle, + "err" => ?e ); - Err(backoff::Error::transient( - ConfigurationError::StackerDBNotUpdated, - )) - } else { - Ok(()) - } - })?; + ConfigurationError::StackerDBNotUpdated + })?; + if last_calculated_reward_cycle < reward_cycle as u128 { + warn!( + "Stackerdb has not been updated for reward cycle {reward_cycle}. Last calculated reward cycle is {last_calculated_reward_cycle}." + ); + return Err(ConfigurationError::StackerDBNotUpdated); + } - let signer_slot_ids = match self.get_parsed_signer_slots(&self.stacks_client, reward_cycle) - { - Ok(x) => x, - Err(e) => { + let signer_slot_ids = self + .get_parsed_signer_slots(&self.stacks_client, reward_cycle) + .map_err(|e| { warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); - return Err(e.into()); - } - }; + e + })?; let current_addr = self.stacks_client.get_signer_address(); let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { From 0fb886db46a97929ee3793cf094f56d1d809efc0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 10:38:45 -0500 Subject: [PATCH 1134/1400] test: update reloads_signer_set_in --- testnet/stacks-node/src/tests/signer/v0.rs | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b3a78fcf6f..04f848670a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -831,9 +831,7 @@ fn reloads_signer_set_in() { ); info!("Waiting for signer set calculation."); - let mut reward_set_calculated = false; let short_timeout = Duration::from_secs(30); - let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event let reward_cycle = signer_test.get_current_reward_cycle() + 1; @@ -841,21 +839,23 @@ fn reloads_signer_set_in() { .running_nodes .btc_regtest_controller .build_next_block(1); - while !reward_set_calculated { - let reward_set = signer_test + wait_for(short_timeout.as_secs(), || { + let reward_set = match signer_test .stacks_client .get_reward_set_signers(reward_cycle) - .expect("Failed to check if reward set is calculated"); - reward_set_calculated = reward_set.is_some(); - if reward_set_calculated { - info!("Signer set: {:?}", reward_set.unwrap()); + { + Ok(x) => x, + Err(e) => { + warn!("Failed to check if reward set is calculated yet: {e:?}. Will try again"); + return Ok(false); + } + }; + if let Some(ref set) = reward_set { + info!("Signer set: {:?}", set); } - std::thread::sleep(Duration::from_secs(1)); - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for reward set calculation" - ); - } + Ok(reward_set.is_some()) + }) + .expect("Timed out waiting for reward set to be calculated"); info!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state From bba65534ecff2e1965bcfac7ee73d708bc44eae0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 11:56:07 -0400 Subject: [PATCH 1135/1400] fix: limit number of UTXOs retrieved with `listunspent` This prevents the response from being too large and exceeding the 16MB limit that we support. --- .../stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index e342b452fc..9399ff1eae 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2599,6 +2599,9 @@ impl BitcoinRPCRequest { let min_conf = 0i64; let max_conf = 9999999i64; let minimum_amount = ParsedUTXO::sat_to_serialized_btc(minimum_sum_amount); + // Specify the maximum number of UTXOs to get from listunspent, to + // ensure the response is not too large. + let maximum_count = 1024; let payload = BitcoinRPCRequest { method: "listunspent".to_string(), @@ -2607,7 +2610,7 @@ impl BitcoinRPCRequest { max_conf.into(), addresses.into(), include_unsafe.into(), - json!({ "minimumAmount": minimum_amount }), + json!({ "minimumAmount": minimum_amount, "maximumCount": maximum_count }), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), From e8e7de132ead7384422a3b34c09d0d5af3e9d172 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 12:44:03 -0500 Subject: [PATCH 1136/1400] Update stacks-signer/src/client/stacks_client.rs Co-authored-by: Brice Dobry --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 2dc9f4688a..d96bea94c0 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -581,7 +581,7 @@ impl StacksClient { backoff::Error::permanent(e.into()) })?; if error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - return Err(backoff::Error::permanent(ClientError::NoSortitionOnChain)); + Err(backoff::Error::permanent(ClientError::NoSortitionOnChain)) } else { warn!("Got error response ({status}): {}", error_data.err_msg); Err(backoff::Error::permanent(ClientError::RequestFailure( From 8c368eceda90ce71a9bb1681443928c7add5ee9f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 15:57:05 -0400 Subject: [PATCH 1137/1400] test: add test validating new config option `max_unspent_utxos` --- .../burnchains/bitcoin_regtest_controller.rs | 9 ++-- testnet/stacks-node/src/config.rs | 13 +++++ .../src/tests/nakamoto_integrations.rs | 2 - .../src/tests/neon_integrations.rs | 53 ++++++++++++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 1 - 5 files changed, 70 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 9399ff1eae..b42007da42 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2314,6 +2314,10 @@ impl UTXOSet { pub fn total_available(&self) -> u64 { self.utxos.iter().map(|o| o.amount).sum() } + + pub fn num_utxos(&self) -> usize { + self.utxos.len() + } } #[derive(Debug, Clone)] @@ -2599,9 +2603,6 @@ impl BitcoinRPCRequest { let min_conf = 0i64; let max_conf = 9999999i64; let minimum_amount = ParsedUTXO::sat_to_serialized_btc(minimum_sum_amount); - // Specify the maximum number of UTXOs to get from listunspent, to - // ensure the response is not too large. - let maximum_count = 1024; let payload = BitcoinRPCRequest { method: "listunspent".to_string(), @@ -2610,7 +2611,7 @@ impl BitcoinRPCRequest { max_conf.into(), addresses.into(), include_unsafe.into(), - json!({ "minimumAmount": minimum_amount, "maximumCount": maximum_count }), + json!({ "minimumAmount": minimum_amount, "maximumCount": config.burnchain.max_unspent_utxos }), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 3ff7e8bdb6..3852bf4224 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1446,6 +1446,10 @@ pub struct BurnchainConfig { /// fault injection to simulate a slow burnchain peer. /// Delay burnchain block downloads by the given number of millseconds pub fault_injection_burnchain_block_delay: u64, + /// The maximum number of unspent UTXOs to request from the bitcoin node. + /// This value is passed as the `maximumCount` query option to the + /// `listunspent` RPC call. + pub max_unspent_utxos: Option, } impl BurnchainConfig { @@ -1486,6 +1490,7 @@ impl BurnchainConfig { ast_precheck_size_height: None, affirmation_overrides: HashMap::new(), fault_injection_burnchain_block_delay: 0, + max_unspent_utxos: Some(1024), } } pub fn get_rpc_url(&self, wallet: Option) -> String { @@ -1582,6 +1587,7 @@ pub struct BurnchainConfigFile { pub ast_precheck_size_height: Option, pub affirmation_overrides: Option>, pub fault_injection_burnchain_block_delay: Option, + pub max_unspent_utxos: Option, } impl BurnchainConfigFile { @@ -1797,6 +1803,13 @@ impl BurnchainConfigFile { fault_injection_burnchain_block_delay: self .fault_injection_burnchain_block_delay .unwrap_or(default_burnchain_config.fault_injection_burnchain_block_delay), + max_unspent_utxos: self + .max_unspent_utxos + .map(|val| { + assert!(val <= 1024, "Value for max_unspent_utxos should be <= 1024"); + val + }) + .or(default_burnchain_config.max_unspent_utxos), }; if let BitcoinNetworkType::Mainnet = config.get_bitcoin_network().1 { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7379f1f16a..32924ab7b7 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2274,8 +2274,6 @@ fn correct_burn_outs() { let mut last_block_time = None; for block in new_blocks_with_reward_set.iter() { - let cycle_number = block["cycle_number"].as_u64().unwrap(); - let reward_set = block["reward_set"].as_object().unwrap(); if let Some(block_time) = block["block_time"].as_u64() { if let Some(last) = last_block_time { assert!(block_time > last, "Block times should be increasing"); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 6b02a3fac8..5494f41302 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -62,6 +62,7 @@ use stacks::net::atlas::{ AtlasConfig, AtlasDB, GetAttachmentResponse, GetAttachmentsInvResponse, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, }; +use stacks::types::PublicKey; use stacks::util_lib::boot::{boot_code_addr, boot_code_id}; use stacks::util_lib::db::{query_row_columns, query_rows, u64_to_sql}; use stacks::util_lib::signed_structured_data::pox4::{ @@ -82,7 +83,7 @@ use super::{ make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, ADDR_4, SK_1, SK_2, SK_3, }; -use crate::burnchains::bitcoin_regtest_controller::{self, BitcoinRPCRequest, UTXO}; +use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; use crate::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; @@ -12794,3 +12795,53 @@ fn mock_miner_replay() { miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } + +#[test] +#[ignore] +/// Verify that the config option, `burnchain.max_unspent_utxos`, is respected. +fn listunspent_max_utxos() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = neon_integration_test_conf(); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + conf.node.prometheus_bind = Some(prom_bind.clone()); + + conf.burnchain.max_rbf = 1000000; + conf.burnchain.max_unspent_utxos = Some(10); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let keychain = Keychain::default(conf.node.seed.clone()); + let mut op_signer = keychain.generate_op_signer(); + + let (_, network_id) = conf.burnchain.get_bitcoin_network(); + let hash160 = Hash160::from_data(&op_signer.get_public_key().to_bytes()); + let address = BitcoinAddress::from_bytes_legacy( + network_id, + LegacyBitcoinAddressType::PublicKeyHash, + &hash160.0, + ) + .expect("Public key incorrect"); + + let filter_addresses = vec![addr2str(&address)]; + + let res = BitcoinRPCRequest::list_unspent(&conf, filter_addresses, false, 1, &None, 0); + let utxos = res.expect("Failed to get utxos"); + assert_eq!(utxos.num_utxos(), 10); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); +} diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b3a78fcf6f..4ec7c2f98c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4796,7 +4796,6 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // Induce block N+2 to get mined let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - sender_nonce += 1; let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+2"); From a66e3b606e4579924f771414338ede9aea39f74a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 16:05:24 -0400 Subject: [PATCH 1138/1400] test: add new test to bitcoin-tests.yml --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a7a483665e..e618eedebe 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -74,6 +74,7 @@ jobs: - tests::neon_integrations::min_txs - tests::neon_integrations::vote_for_aggregate_key_burn_op_test - tests::neon_integrations::mock_miner_replay + - tests::neon_integrations::listunspent_max_utxos - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration From 2b3b0d01e6f5c9a2fb65a1d33214f7dba7a6a004 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 15:28:36 -0500 Subject: [PATCH 1139/1400] chore: fix two flaky tests Fixes: * signer::v0::locally_rejected_blocks_overriden_by_global_acceptance * signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds These tests used to depend on the `blocks_mined` counter and then immediately check a `v2/info` assertion -- this is a race condition: the stacks-node may not have processed the mined block yet. This caused test flake in CI (but usually not in local runs where machines are fast enough to never experience this condition). --- testnet/stacks-node/src/tests/signer/v0.rs | 98 ++++++++++------------ 1 file changed, 45 insertions(+), 53 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 04f848670a..13bc664d2a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4128,25 +4128,24 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); sender_nonce += 1; let info_after = signer_test @@ -4196,13 +4195,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); loop { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events @@ -4274,13 +4274,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4348,31 +4349,31 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); + assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height @@ -4400,13 +4401,12 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); - let start_time = Instant::now(); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - loop { + wait_for(short_timeout.as_secs(), || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4423,15 +4423,9 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { } }) .collect::>(); - if ignored_signers.len() + ignoring_signers.len() == num_signers { - break; - } - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block proposal acceptance", - ); - sleep_ms(1000); - } + Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); let blocks_after = mined_blocks.load(Ordering::SeqCst); let info_after = signer_test .stacks_client @@ -4464,25 +4458,23 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .lock() .unwrap() .replace(Vec::new()); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout.as_secs(), || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); let info_after = signer_test .stacks_client From fdbf170ff977a3e35673e778a78a25a90edd6d0b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 16:51:35 -0400 Subject: [PATCH 1140/1400] test: don't stop bitcoind at end of test This causes a problem in CI. --- testnet/stacks-node/src/tests/neon_integrations.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 5494f41302..967947cc56 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12840,8 +12840,4 @@ fn listunspent_max_utxos() { let res = BitcoinRPCRequest::list_unspent(&conf, filter_addresses, false, 1, &None, 0); let utxos = res.expect("Failed to get utxos"); assert_eq!(utxos.num_utxos(), 10); - - btcd_controller - .stop_bitcoind() - .expect("Failed to stop bitcoind"); } From c44954d96f65df54497d081f7b8b993d998fab55 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 16:34:59 -0500 Subject: [PATCH 1141/1400] chore: signer tests should wait for networking to come back up after 3.0 boundary --- testnet/stacks-node/src/tests/signer/v0.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 13bc664d2a..a47422431b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -264,6 +264,9 @@ impl SignerTest { info!("Signers initialized"); self.run_until_epoch_3_boundary(); + std::thread::sleep(Duration::from_secs(1)); + wait_for(60, || Ok(get_chain_info_opt(&self.running_nodes.conf).is_some())) + .expect("Timed out waiting for network to restart after 3.0 boundary reached"); // Wait until we see the first block of epoch 3.0. // Note, we don't use `nakamoto_blocks_mined` counter, because there From 6388ed703eff73a81164c2cbf0f4c332e1025993 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 17 Sep 2024 16:37:40 -0500 Subject: [PATCH 1142/1400] chore: cargo fmt --- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a47422431b..3ac091b0e2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -265,8 +265,10 @@ impl SignerTest { self.run_until_epoch_3_boundary(); std::thread::sleep(Duration::from_secs(1)); - wait_for(60, || Ok(get_chain_info_opt(&self.running_nodes.conf).is_some())) - .expect("Timed out waiting for network to restart after 3.0 boundary reached"); + wait_for(60, || { + Ok(get_chain_info_opt(&self.running_nodes.conf).is_some()) + }) + .expect("Timed out waiting for network to restart after 3.0 boundary reached"); // Wait until we see the first block of epoch 3.0. // Note, we don't use `nakamoto_blocks_mined` counter, because there From 5b330cfa044b94704d5f47d0568d40260c22c9e9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 17 Sep 2024 22:00:22 -0400 Subject: [PATCH 1143/1400] fix: run nakamoto inv, downloader, and natpunch state machines once per PeerNetwork::run() --- stackslib/src/net/p2p.rs | 117 ++++++++++++--------------------------- 1 file changed, 35 insertions(+), 82 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 45183cdf1b..7b36dc3c33 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -3560,8 +3560,8 @@ impl PeerNetwork { let prune = if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { debug!("{:?}: run Nakamoto work loop", self.get_local_peer()); - // in Nakamoto epoch, so do Nakamoto things - let prune = self.do_network_work_nakamoto( + // in Nakamoto epoch, so we can always prune + self.do_network_work_nakamoto( burnchain_height, sortdb, chainstate, @@ -3593,9 +3593,10 @@ impl PeerNetwork { "{:?}: ran Epoch 2.x work loop in Nakamoto epoch", self.get_local_peer() ); - prune || epoch2_prune + epoch2_prune } else { - prune + // we can always prune in Nakamoto, since all state machines pin their connections + true } } else { // in epoch 2.x, so do epoch 2.x things @@ -3623,89 +3624,41 @@ impl PeerNetwork { chainstate: &mut StacksChainState, ibd: bool, network_result: &mut NetworkResult, - ) -> bool { - // do some Actual Work(tm) - let mut do_prune = false; - let mut did_cycle = false; - - while !did_cycle { - // always do an inv sync - let learned = self.do_network_inv_sync_nakamoto(sortdb, ibd); - debug!( - "{:?}: network work state is {:?}", - self.get_local_peer(), - &self.nakamoto_work_state; - "learned_new_blocks?" => learned - ); - - // always do block download - let new_blocks = self - .do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd) - .map_err(|e| { - warn!( - "{:?}: Failed to perform Nakamoto block sync: {:?}", - &self.get_local_peer(), - &e - ); - e - }) - .unwrap_or(HashMap::new()); - - network_result.consume_nakamoto_blocks(new_blocks); - - let cur_state = self.nakamoto_work_state; - match self.nakamoto_work_state { - PeerNetworkWorkState::GetPublicIP => { - if cfg!(test) && self.connection_opts.disable_natpunch { - self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload; - } else { - // (re)determine our public IP address - let done = self.do_get_public_ip(); - if done { - self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload; - } - } - } - PeerNetworkWorkState::BlockInvSync => { - // this state is useless in Nakamoto since we're always doing inv-syncs - self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload; - } - PeerNetworkWorkState::BlockDownload => { - // this state is useless in Nakamoto since we're always doing download-syncs - self.nakamoto_work_state = PeerNetworkWorkState::AntiEntropy; - } - PeerNetworkWorkState::AntiEntropy => { - debug!( - "{:?}: Block anti-entropy for Nakamoto is not yet implemented", - self.get_local_peer() - ); - self.nakamoto_work_state = PeerNetworkWorkState::Prune; - } - PeerNetworkWorkState::Prune => { - // did one pass - did_cycle = true; - do_prune = true; + ) { + // always do an inv sync + let learned = self.do_network_inv_sync_nakamoto(sortdb, ibd); + debug!( + "{:?}: network work state is {:?}", + self.get_local_peer(), + &self.nakamoto_work_state; + "learned_new_blocks?" => learned + ); - // restart - self.nakamoto_work_state = PeerNetworkWorkState::GetPublicIP; - } - } + // always do block download + let new_blocks = self + .do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd) + .map_err(|e| { + warn!( + "{:?}: Failed to perform Nakamoto block sync: {:?}", + &self.get_local_peer(), + &e + ); + e + }) + .unwrap_or(HashMap::new()); - if self.nakamoto_work_state == cur_state { - // only break early if we can't make progress - break; - } - } + network_result.consume_nakamoto_blocks(new_blocks); - if did_cycle { - self.num_state_machine_passes += 1; - debug!( - "{:?}: Finished full p2p state-machine pass for Nakamoto ({})", - &self.local_peer, self.num_state_machine_passes - ); + // make sure our public IP is fresh (this self-throttles if we recently learned it). + if !self.connection_opts.disable_natpunch { + self.do_get_public_ip(); } - do_prune + self.num_state_machine_passes += 1; + debug!( + "{:?}: Finished full p2p state-machine pass for Nakamoto ({})", + &self.local_peer, self.num_state_machine_passes + ); } /// Do the actual work in the state machine. From 3359f1e590f882ce73775514b82ea1242dd317e1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 17 Sep 2024 22:02:01 -0400 Subject: [PATCH 1144/1400] test: try stopping bitcoind within Rust, rather than spawning the CLI --- .github/workflows/bitcoin-tests.yml | 6 +-- .../burnchains/bitcoin_regtest_controller.rs | 2 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 35 ++++++++-------- .../src/tests/neon_integrations.rs | 41 +++++++++++++++++++ 4 files changed, 64 insertions(+), 20 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a7a483665e..bab888e2e1 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -74,6 +74,9 @@ jobs: - tests::neon_integrations::min_txs - tests::neon_integrations::vote_for_aggregate_key_burn_op_test - tests::neon_integrations::mock_miner_replay + - tests::neon_integrations::bitcoin_reorg_flap + - tests::neon_integrations::bitcoin_reorg_flap_with_follower + - tests::neon_integrations::start_stop_bitcoind - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration @@ -121,9 +124,6 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - # Do not run this one until we figure out why it fails in CI - # - tests::neon_integrations::bitcoin_reorg_flap - # - tests::neon_integrations::bitcoin_reorg_flap_with_follower # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index e342b452fc..6af31c83a8 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2815,7 +2815,7 @@ impl BitcoinRPCRequest { Ok(()) } - fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { + pub fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 6619152f9f..621f92aa47 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -11,6 +11,7 @@ use stacks::core::StacksEpochId; use stacks_common::util::hash::hex_bytes; use super::PUBLISH_CONTRACT; +use crate::burnchains::bitcoin_regtest_controller::BitcoinRPCRequest; use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::to_addr; @@ -19,12 +20,14 @@ use crate::Config; #[derive(Debug)] pub enum BitcoinCoreError { SpawnFailed(String), + StopFailed(String), } impl std::fmt::Display for BitcoinCoreError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), + Self::StopFailed(msg) => write!(f, "bitcoind stop failed: {msg}"), } } } @@ -109,25 +112,25 @@ impl BitcoinCoreController { pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { if let Some(_) = self.bitcoind_process.take() { - let mut command = Command::new("bitcoin-cli"); - command.stdout(Stdio::piped()).arg("-rpcconnect=127.0.0.1"); - - self.add_rpc_cli_args(&mut command); - - command.arg("stop"); - - let mut process = match command.spawn() { - Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), + let payload = BitcoinRPCRequest { + method: "stop".to_string(), + params: vec![], + id: "stacks".to_string(), + jsonrpc: "2.0".to_string(), }; - let mut out_reader = BufReader::new(process.stdout.take().unwrap()); - let mut line = String::new(); - while let Ok(bytes_read) = out_reader.read_line(&mut line) { - if bytes_read == 0 { - break; + let res = BitcoinRPCRequest::send(&self.config, payload) + .map_err(|e| BitcoinCoreError::StopFailed(format!("{e:?}")))?; + + if let Some(err) = res.get("error") { + if !err.is_null() { + return Err(BitcoinCoreError::StopFailed(format!("{err}"))); } - eprintln!("{line}"); + } else { + return Err(BitcoinCoreError::StopFailed(format!( + "Invalid response: {:?}", + res + ))); } } Ok(()) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 6b02a3fac8..84181fdc63 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12794,3 +12794,44 @@ fn mock_miner_replay() { miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } + +#[test] +#[ignore] +/// Test out stopping bitcoind and restarting it +fn start_stop_bitcoind() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = neon_integration_test_conf(); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + conf.node.prometheus_bind = Some(prom_bind.clone()); + + conf.burnchain.max_rbf = 1000000; + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); + + thread::sleep(Duration::from_secs(5)); + + btcd_controller + .start_bitcoind() + .expect("Failed to start bitcoind"); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); +} From dec98a9d0cf37cd6110a3eb9fb95f3cf22ec7bda Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 17 Sep 2024 23:26:05 -0400 Subject: [PATCH 1145/1400] fix: Remove NakamotoDownloadStateMachine::load_tenure_start_blocks(), and all code paths that depended on it --- .../nakamoto/download_state_machine.rs | 65 +--------- .../download/nakamoto/tenure_downloader.rs | 111 +----------------- .../nakamoto/tenure_downloader_set.rs | 106 +---------------- stackslib/src/net/tests/download/nakamoto.rs | 42 +------ 4 files changed, 11 insertions(+), 313 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 8cef43a9aa..a2f4fe5dc5 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -115,8 +115,6 @@ pub struct NakamotoDownloadStateMachine { unconfirmed_tenure_downloads: HashMap, /// Ongoing confirmed tenure downloads for when we know the start and end block hashes. tenure_downloads: NakamotoTenureDownloaderSet, - /// resolved tenure-start blocks - tenure_start_blocks: HashMap, /// comms to remote neighbors pub(super) neighbor_rpc: NeighborRPC, /// Nakamoto chain tip @@ -140,7 +138,6 @@ impl NakamotoDownloadStateMachine { unconfirmed_tenure_download_schedule: VecDeque::new(), tenure_downloads: NakamotoTenureDownloaderSet::new(), unconfirmed_tenure_downloads: HashMap::new(), - tenure_start_blocks: HashMap::new(), neighbor_rpc: NeighborRPC::new(), nakamoto_tip, last_unconfirmed_download_run_ms: 0, @@ -367,48 +364,6 @@ impl NakamotoDownloadStateMachine { ) } - /// Find all stored (but not necessarily processed) tenure-start blocks for a list - /// of wanted tenures that this node has locally. NOTE: these tenure-start blocks - /// do not correspond to the tenure; they correspond to the _parent_ tenure (since a - /// `WantedTenure` captures the tenure-start block hash of the parent tenure; the same data - /// captured by a sortition). - /// - /// This method is static to ease testing. - /// - /// Returns Ok(()) on success and fills in newly-discovered blocks into `tenure_start_blocks`. - /// Returns Err(..) on DB error. - pub(crate) fn load_tenure_start_blocks( - wanted_tenures: &[WantedTenure], - chainstate: &mut StacksChainState, - tenure_start_blocks: &mut HashMap, - ) -> Result<(), NetError> { - for wt in wanted_tenures { - let candidate_tenure_start_blocks = chainstate - .nakamoto_blocks_db() - .get_nakamoto_tenure_start_blocks(&wt.tenure_id_consensus_hash)?; - - for candidate_tenure_start_block in candidate_tenure_start_blocks.into_iter() { - tenure_start_blocks.insert( - candidate_tenure_start_block.block_id(), - candidate_tenure_start_block, - ); - } - } - Ok(()) - } - - /// Update our local tenure start block data - fn update_tenure_start_blocks( - &mut self, - chainstate: &mut StacksChainState, - ) -> Result<(), NetError> { - Self::load_tenure_start_blocks( - &self.wanted_tenures, - chainstate, - &mut self.tenure_start_blocks, - ) - } - /// Update `self.wanted_tenures` with newly-discovered sortition data. fn extend_wanted_tenures( &mut self, @@ -670,7 +625,6 @@ impl NakamotoDownloadStateMachine { &mut self, network: &PeerNetwork, sortdb: &SortitionDB, - chainstate: &mut StacksChainState, ) -> Result<(), NetError> { let sort_tip = &network.burnchain_tip; @@ -688,7 +642,6 @@ impl NakamotoDownloadStateMachine { // not at a reward cycle boundary, so just extend self.wanted_tenures debug!("Extend wanted tenures since no sort_rc change and we have tenure data"); self.extend_wanted_tenures(network, sortdb)?; - self.update_tenure_start_blocks(chainstate)?; return Ok(()); } @@ -728,7 +681,6 @@ impl NakamotoDownloadStateMachine { self.wanted_tenures = new_wanted_tenures; self.reward_cycle = sort_rc; - self.update_tenure_start_blocks(chainstate)?; Ok(()) } @@ -1485,21 +1437,6 @@ impl NakamotoDownloadStateMachine { // run all downloaders let new_blocks = self.tenure_downloads.run(network, &mut self.neighbor_rpc); - // give blocked downloaders their tenure-end blocks from other downloaders that have - // obtained their tenure-start blocks - let new_tenure_starts = self.tenure_downloads.find_new_tenure_start_blocks(); - self.tenure_start_blocks - .extend(new_tenure_starts.into_iter()); - - let dead = self - .tenure_downloads - .handle_tenure_end_blocks(&self.tenure_start_blocks); - - // bookkeeping - for naddr in dead.into_iter() { - self.neighbor_rpc.add_dead(network, &naddr); - } - new_blocks } @@ -1729,7 +1666,7 @@ impl NakamotoDownloadStateMachine { ) -> Result>, NetError> { self.nakamoto_tip = network.stacks_tip.block_id(); debug!("Downloader: Nakamoto tip is {:?}", &self.nakamoto_tip); - self.update_wanted_tenures(&network, sortdb, chainstate)?; + self.update_wanted_tenures(&network, sortdb)?; self.update_processed_tenures(chainstate)?; let new_blocks = self.run_downloads(burnchain_height, network, sortdb, chainstate, ibd); self.last_sort_tip = Some(network.burnchain_tip.clone()); diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 95d97f67d5..92e032fa38 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -68,31 +68,7 @@ use crate::util_lib::db::{DBConn, Error as DBError}; pub enum NakamotoTenureDownloadState { /// Getting the tenure-start block (the given StacksBlockId is it's block ID). GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. + /// Getting the tenure-end block. /// /// The field here is the block ID of the tenure end block. GetTenureEndBlock(StacksBlockId), @@ -163,8 +139,7 @@ pub struct NakamotoTenureDownloader { pub tenure_start_block: Option, /// Pre-stored tenure end block. /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. + /// the start-block for the current tenure is downloaded. pub tenure_end_block: Option, /// Tenure blocks pub tenure_blocks: Option>, @@ -205,16 +180,6 @@ impl NakamotoTenureDownloader { self } - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - /// Validate and accept a given tenure-start block. If accepted, then advance the state. /// Returns Ok(()) if the start-block is valid. /// Returns Err(..) if it is not valid. @@ -266,66 +231,15 @@ impl NakamotoTenureDownloader { tenure_end_block.block_id(), &self.tenure_id_consensus_hash ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); self.try_accept_tenure_end_block(&tenure_end_block)?; } else { - // need to get tenure_end_block. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); + // need to get tenure_end_block. + self.state = + NakamotoTenureDownloadState::GetTenureEndBlock(self.tenure_end_block_id.clone()); } Ok(()) } - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - /// Validate and accept a tenure-end block. If accepted, then advance the state. /// Once accepted, this function extracts the tenure-change transaction and block header from /// this block (it does not need the entire block). @@ -338,8 +252,7 @@ impl NakamotoTenureDownloader { ) -> Result<(), NetError> { if !matches!( &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) + NakamotoTenureDownloadState::GetTenureEndBlock(_) ) { warn!("Invalid state for this method"; "state" => %self.state); @@ -577,14 +490,6 @@ impl NakamotoTenureDownloader { debug!("Request tenure-start block {}", &start_block_id); StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, _deadline - ); - return Ok(None); - } NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { debug!("Request tenure-end block {}", &end_block_id); StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) @@ -665,10 +570,6 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_start_block(block)?; Ok(None) } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { debug!("Got download response to tenure-end block {}", &_block_id); let block = response.decode_nakamoto_block().map_err(|e| { diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 28a40e7eb5..74ff83460d 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -172,9 +172,6 @@ impl NakamotoTenureDownloaderSet { if downloader.idle { continue; } - if downloader.is_waiting() { - continue; - } if downloader.is_done() { continue; } @@ -233,9 +230,6 @@ impl NakamotoTenureDownloaderSet { if !downloader.idle { continue; } - if downloader.is_waiting() { - continue; - } if downloader.naddr != naddr { continue; } @@ -264,7 +258,7 @@ impl NakamotoTenureDownloaderSet { idled.push(naddr.clone()); continue; }; - if downloader.idle || downloader.is_waiting() { + if downloader.idle { debug!( "Remove idled peer {} for tenure download {}", &naddr, &downloader.tenure_id_consensus_hash @@ -306,43 +300,6 @@ impl NakamotoTenureDownloaderSet { ret } - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - /// Does there exist a downloader (possibly unscheduled) for the given tenure? pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { for downloader_opt in self.downloaders.iter() { @@ -351,11 +308,8 @@ impl NakamotoTenureDownloaderSet { }; if &downloader.tenure_id_consensus_hash == tenure_id { debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state + "Have downloader for tenure {} already (idle={}, state={})", + tenure_id, downloader.idle, &downloader.state ); return true; } @@ -363,59 +317,6 @@ impl NakamotoTenureDownloaderSet { false } - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - /// Create a given number of downloads from a schedule and availability set. /// Removes items from the schedule, and neighbors from the availability set. /// A neighbor will be issued at most one request. @@ -438,7 +339,6 @@ impl NakamotoTenureDownloaderSet { self.clear_finished_downloaders(); self.clear_available_peers(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); while self.inflight() < count { let Some(ch) = schedule.front() else { break; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 44bbaed7d2..a6307b324b 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -255,7 +255,7 @@ fn test_nakamoto_tenure_downloader() { .try_accept_tenure_start_block(blocks.first().unwrap().clone()) .is_ok()); - let NakamotoTenureDownloadState::WaitForTenureEndBlock(block_id, _) = td.state else { + let NakamotoTenureDownloadState::GetTenureEndBlock(block_id) = td.state else { panic!("wrong state"); }; assert_eq!(block_id, next_tenure_start_block.header.block_id()); @@ -1456,46 +1456,6 @@ fn test_make_tenure_downloaders() { } } - // test load_tenure_start_blocks - { - let sortdb = peer.sortdb(); - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures( - &ih, - nakamoto_start, - tip.block_height + 1, - ) - .unwrap(); - - // the first block loaded won't have data, since the blocks are loaded by consensus hash - // but the resulting map is keyed by block ID (and we don't have the first block ID) - let wanted_tenures_with_blocks = wanted_tenures[1..].to_vec(); - - let nakamoto_tip = peer.network.stacks_tip.block_id(); - let chainstate = peer.chainstate(); - let mut tenure_start_blocks = HashMap::new(); - NakamotoDownloadStateMachine::load_tenure_start_blocks( - &wanted_tenures, - chainstate, - &mut tenure_start_blocks, - ) - .unwrap(); - - // remove malleablized blocks - tenure_start_blocks.retain(|_, block| block.header.version == 0); - - assert_eq!(tenure_start_blocks.len(), wanted_tenures.len()); - - for wt in wanted_tenures_with_blocks { - if tenure_start_blocks.get(&wt.winning_block_id).is_none() { - warn!("No tenure start block for wanted tenure {:?}", &wt); - } - - let block = tenure_start_blocks.get(&wt.winning_block_id).unwrap(); - assert!(block.is_wellformed_tenure_start_block().unwrap()); - } - } - // test find_available_tenures { // test for reward cycle From a3d4d42dad79437f32439698a0538055f6b6f16d Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 18 Sep 2024 17:03:24 +0300 Subject: [PATCH 1146/1400] add integration test to run on CI --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a7a483665e..04b4a51f70 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -77,6 +77,7 @@ jobs: - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration + - tests::nakamoto_integrations::simple_neon_integration_with_flash_blocks_on_epoch_3 - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb From 7c073faaa2051a903347a9e7cc642a2d60d9db07 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 18 Sep 2024 11:12:14 -0700 Subject: [PATCH 1147/1400] Cannot assume stacks transaction will get mined AFTER the burn block is mined Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 44 +++---------------- 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 32924ab7b7..7a71725f6c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5185,51 +5185,21 @@ fn clarity_burn_state() { vec![&Value::UInt(burn_block_height)], ); result.expect_result_ok().expect("Read-only call failed"); - - // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) - let call_tx = tests::make_contract_call( - &sender_sk, - sender_nonce, - tx_fee, - &sender_addr, - contract_name, - "bar", - &[Value::UInt(burn_block_height + 1)], - ); - sender_nonce += 1; - submit_tx(&http_origin, &call_tx); } let commits_before = commits_submitted.load(Ordering::SeqCst); - next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) - .unwrap(); + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; info!("Expecting burn block height to be {}", burn_block_height); - // Assert that the contract call was successful - test_observer::get_mined_nakamoto_blocks() - .last() - .unwrap() - .tx_events - .iter() - .for_each(|event| match event { - TransactionEvent::Success(TransactionSuccessEvent { result, fee, .. }) => { - // Ignore coinbase and tenure transactions - if *fee == 0 { - return; - } - - info!("Contract call result: {}", result); - result.clone().expect_result_ok().expect("Ok result"); - } - _ => { - info!("Unsuccessful event: {:?}", event); - panic!("Expected a successful transaction"); - } - }); - // mine the interim blocks for interim_block_ix in 0..inter_blocks_per_tenure { info!("Mining interim block {interim_block_ix}"); From d9c002c70f56a825ba318c2e4731914d360ca6e0 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 18 Sep 2024 12:42:53 -0700 Subject: [PATCH 1148/1400] CRC: ensure that the tenure change transaction and contract call get mined in the same stacks block Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 11 ++++ .../src/tests/nakamoto_integrations.rs | 63 ++++++++++++++++--- 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index da1c75c708..1a5f4aa3c2 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -60,6 +60,8 @@ use crate::neon_node; use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; +#[cfg(test)] +pub static TEST_MINE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); #[cfg(test)] pub static TEST_BROADCAST_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); #[cfg(test)] @@ -291,6 +293,15 @@ impl BlockMinerThread { let mut attempts = 0; // now, actually run this tenure loop { + #[cfg(test)] + if *TEST_MINE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Mining is stalled due to testing directive"); + while *TEST_MINE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("Mining is no longer stalled due to testing directive. Continuing..."); + } let new_block = loop { // If we're mock mining, we may not have processed the block that the // actual tenure winner committed to yet. So, before attempting to diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 7a71725f6c..6fa0dafc88 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -94,7 +94,9 @@ use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; -use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; +use crate::nakamoto_node::miner::{ + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, +}; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; @@ -5185,21 +5187,68 @@ fn clarity_burn_state() { vec![&Value::UInt(burn_block_height)], ); result.expect_result_ok().expect("Read-only call failed"); + + // Pause mining to prevent the stacks block from being mined before the tenure change is processed + TEST_MINE_STALL.lock().unwrap().replace(true); + // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) + let call_tx = tests::make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + &sender_addr, + contract_name, + "bar", + &[Value::UInt(burn_block_height + 1)], + ); + sender_nonce += 1; + submit_tx(&http_origin, &call_tx); } let commits_before = commits_submitted.load(Ordering::SeqCst); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + next_block_and(&mut btc_regtest_controller, 60, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); + TEST_MINE_STALL.lock().unwrap().replace(false); + wait_for(20, || { + Ok(coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed() + > blocks_processed_before) + }) .unwrap(); let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; info!("Expecting burn block height to be {}", burn_block_height); + // Assert that the contract call was successful + test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .for_each(|event| match event { + TransactionEvent::Success(TransactionSuccessEvent { result, fee, .. }) => { + // Ignore coinbase and tenure transactions + if *fee == 0 { + return; + } + + info!("Contract call result: {}", result); + result.clone().expect_result_ok().expect("Ok result"); + } + _ => { + info!("Unsuccessful event: {:?}", event); + panic!("Expected a successful transaction"); + } + }); + // mine the interim blocks for interim_block_ix in 0..inter_blocks_per_tenure { info!("Mining interim block {interim_block_ix}"); From 27410626b0b75d60030e7c550cef149456f13685 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Wed, 18 Sep 2024 22:55:15 +0300 Subject: [PATCH 1149/1400] update flashblocks integration test for CI --- .../src/tests/nakamoto_integrations.rs | 35 ++++++++++++++----- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 54d82df92c..85bbf9120d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1676,7 +1676,6 @@ fn simple_neon_integration() { } #[test] -#[ignore] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, /// having flash blocks when epoch updates and expects everything to work normally, @@ -1900,15 +1899,35 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); // Check that we have the expected burn blocks - // We expect to have blocks 220-230 and 234 onwards, with a gap for the flash blocks + // We expect to have around the blocks 220-230 and 234 onwards, with a gap of 3 blocks for the flash blocks let bhh = u64::from(tip.burn_header_height); - test_observer::contains_burn_block_range(220..=230).unwrap(); - test_observer::contains_burn_block_range(234..=bhh).unwrap(); - // Verify that we're missing the expected flash blocks - assert!( - test_observer::contains_burn_block_range(231..=233).is_err(), - "Expected to be missing burn blocks 231-233 due to flash blocks" + // Find the gap in burn blocks + let mut gap_start = 0; + let mut gap_end = 0; + for i in 220..=bhh { + if test_observer::contains_burn_block_range(i..=i).is_err() { + if gap_start == 0 { + gap_start = i; + } + gap_end = i; + } else if gap_start != 0 { + break; + } + } + + // Verify that there's a gap of exactly 3 blocks + assert_eq!( + gap_end - gap_start + 1, + 3, + "Expected a gap of exactly 3 burn blocks due to flash blocks, found gap from {} to {}", + gap_start, + gap_end ); + + // Verify blocks before and after the gap + test_observer::contains_burn_block_range(220..=(gap_start - 1)).unwrap(); + test_observer::contains_burn_block_range((gap_end + 1)..=bhh).unwrap(); + info!("Verified burn block ranges, including expected gap for flash blocks"); coord_channel From 33ff38a2d25182ff107bd3bd333c354c60e73c4b Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 19 Sep 2024 00:01:11 +0300 Subject: [PATCH 1150/1400] add ignore to flashblocks test header --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 85bbf9120d..9b89e04856 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1676,6 +1676,7 @@ fn simple_neon_integration() { } #[test] +#[ignore] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, /// having flash blocks when epoch updates and expects everything to work normally, From 08a62a25329e7ab2855d4b28dbcce5a5da95251e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 18 Sep 2024 16:32:18 -0700 Subject: [PATCH 1151/1400] CRC: add unit tests and tighten regex restriction and update change log with full path Signed-off-by: Jacinta Ferrant --- CHANGELOG.md | 2 +- stackslib/src/net/api/getsigner.rs | 19 ++++- stackslib/src/net/api/tests/getsigner.rs | 96 ++++++++++++++++++++++++ stackslib/src/net/api/tests/mod.rs | 1 + 4 files changed, 113 insertions(+), 5 deletions(-) create mode 100644 stackslib/src/net/api/tests/getsigner.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 05d3ada08d..f5c84db9a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-stacks-block-info?` added - `get-tenure-info?` added - `get-block-info?` removed -- Added `/v3/signer/` endpoint +- Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint ## [2.5.0.0.7] diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs index 1231e195c1..90bcc796bf 100644 --- a/stackslib/src/net/api/getsigner.rs +++ b/stackslib/src/net/api/getsigner.rs @@ -45,8 +45,17 @@ use crate::util_lib::db::Error as DBError; #[derive(Clone, Default)] pub struct GetSignerRequestHandler { - signer_pubkey: Option, - reward_cycle: Option, + pub signer_pubkey: Option, + pub reward_cycle: Option, +} + +impl GetSignerRequestHandler { + pub fn new() -> Self { + Self { + signer_pubkey: None, + reward_cycle: None, + } + } } #[derive(Debug, Serialize, Deserialize)] @@ -61,8 +70,10 @@ impl HttpRequest for GetSignerRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/signer/(?P[0-9a-f]{66})/(?P[0-9]{1,10})$"#) - .unwrap() + Regex::new( + r#"^/v3/signer/(?P0[23][0-9a-f]{64})/(?P[0-9]{1,10})$"#, + ) + .unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs new file mode 100644 index 0000000000..92e30057d7 --- /dev/null +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -0,0 +1,96 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::types::chainstate::{StacksBlockId, StacksPrivateKey, StacksPublicKey}; +use rand::{thread_rng, RngCore}; +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; +use stacks_common::types::net::PeerHost; + +use crate::net::api::getsigner::{self, GetSignerRequestHandler}; +use crate::net::api::tests::{test_rpc, TestRPC}; +use crate::net::connection::ConnectionOptions; +use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; +use crate::net::httpcore::{ + RPCRequestHandler, StacksHttp, StacksHttpPreamble, StacksHttpRequest, TipRequest, +}; +use crate::net::test::TestEventObserver; +use crate::net::{Error as NetError, ProtocolFamily}; + +fn make_preamble(query: &str) -> HttpRequestPreamble { + HttpRequestPreamble { + version: HttpVersion::Http11, + verb: "GET".into(), + path_and_query_str: format!("/v3/signer{query}"), + host: PeerHost::DNS("localhost".into(), 0), + content_type: None, + content_length: Some(0), + keep_alive: false, + headers: BTreeMap::new(), + } +} + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + let private_key = StacksPrivateKey::new(); + let signer_pubkey = StacksPublicKey::from_private(&private_key); + let signer_pubkey_hex = signer_pubkey.to_hex(); + let cycle_num = thread_rng().next_u32() as u64; + + let mut handler = getsigner::GetSignerRequestHandler::new(); + let mut bad_content_length_preamble = + make_preamble(&format!("/{signer_pubkey_hex}/{cycle_num}")); + bad_content_length_preamble.content_length = Some(1); + let tests = vec![ + ( + make_preamble(&format!("/{signer_pubkey_hex}/{cycle_num}")), + Ok((Some(signer_pubkey), Some(cycle_num))), + ), + ( + make_preamble(&format!("/foo/{cycle_num}")), + Err(NetError::NotFoundError), + ), + ( + make_preamble(&format!("/{signer_pubkey_hex}/bar")), + Err(NetError::NotFoundError), + ), + ( + bad_content_length_preamble, + Err( + HttpError::DecodeError("Invalid Http request: expected 0-length body".into()) + .into(), + ), + ), + ]; + + for (inp, expected_result) in tests.into_iter() { + handler.restart(); + let parsed_request = http.handle_try_parse_request(&mut handler, &inp, &[]); + match expected_result { + Ok((key, cycle)) => { + assert!(parsed_request.is_ok()); + assert_eq!(handler.signer_pubkey, key); + assert_eq!(handler.reward_cycle, cycle); + } + Err(e) => { + assert_eq!(e, parsed_request.unwrap_err()); + } + } + } +} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ded0360555..d19854bf02 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -73,6 +73,7 @@ mod getmicroblocks_indexed; mod getmicroblocks_unconfirmed; mod getneighbors; mod getpoxinfo; +mod getsigner; mod getsortition; mod getstackerdbchunk; mod getstackerdbmetadata; From 84c836902722466e6d3cd98ad9644466ddc121b5 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 18 Sep 2024 16:50:15 -0700 Subject: [PATCH 1152/1400] fix: apply needed sortdb migrations before 8 --- stackslib/src/chainstate/burn/db/sortdb.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index f446e98a66..53dc2d0547 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2883,7 +2883,7 @@ impl SortitionDB { sql_pragma(self.conn(), "journal_mode", &"WAL")?; sql_pragma(self.conn(), "foreign_keys", &true)?; - let db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; + let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; // create first (sentinel) snapshot debug!("Make first snapshot"); @@ -2909,13 +2909,6 @@ impl SortitionDB { SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; SortitionDB::apply_schema_7(&db_tx, epochs_ref)?; SortitionDB::apply_schema_8_tables(&db_tx, epochs_ref)?; - // `apply_schema_8_migration` creates new transactions, so - // commit this first. - db_tx.commit()?; - // NOTE: we don't need to provide a migrator here because we're not migrating - self.apply_schema_8_migration(None)?; - let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; - SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -2933,6 +2926,14 @@ impl SortitionDB { db_tx.commit()?; + // NOTE: we don't need to provide a migrator here because we're not migrating + self.apply_schema_8_migration(None)?; + + let db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; + SortitionDB::apply_schema_9(&db_tx, epochs_ref)?; + + db_tx.commit()?; + self.add_indexes()?; debug!("Instantiated SortDB"); From 4d0e3330e3eb501c591beb4eb1f32c72a46b8ca2 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 18 Sep 2024 17:08:38 -0700 Subject: [PATCH 1153/1400] fix: move signer skip broadcast injection logic to own function --- stacks-signer/src/v0/signer.rs | 43 ++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index a5f635cf16..654a00dc66 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -944,24 +944,8 @@ impl Signer { block.header.signer_signature = signatures; #[cfg(any(test, feature = "testing"))] - { - if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { - warn!( - "{self}: Skipping block broadcast due to testing directive"; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - "consensus_hash" => %block.header.consensus_hash - ); - - if let Err(e) = self.signer_db.set_block_broadcasted( - self.reward_cycle, - &block_hash, - get_epoch_time_secs(), - ) { - warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); - } - return; - } + if self.test_skip_block_broadcast(&block) { + return; } debug!( "{self}: Broadcasting Stacks block {} to node", @@ -986,6 +970,29 @@ impl Signer { } } + #[cfg(any(test, feature = "testing"))] + fn test_skip_block_broadcast(&self, block: &NakamotoBlock) -> bool { + if *TEST_SKIP_BLOCK_BROADCAST.lock().unwrap() == Some(true) { + let block_hash = block.header.signer_signature_hash(); + warn!( + "{self}: Skipping block broadcast due to testing directive"; + "block_id" => %block.block_id(), + "height" => block.header.chain_length, + "consensus_hash" => %block.header.consensus_hash + ); + + if let Err(e) = self.signer_db.set_block_broadcasted( + self.reward_cycle, + &block_hash, + get_epoch_time_secs(), + ) { + warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); + } + return true; + } + false + } + /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); From 3296f9865ea8dae40c3a7e1418afd75533d903d7 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 18 Sep 2024 17:11:02 -0700 Subject: [PATCH 1154/1400] fix: check if db_version exists to determine staging_blocks schema --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 0fcdaffad8..c0b364eea8 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -35,7 +35,7 @@ use crate::chainstate::stacks::{Error as ChainstateError, StacksBlock, StacksBlo use crate::stacks_common::codec::StacksMessageCodec; use crate::util_lib::db::{ query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, - tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, + table_exists, tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, }; /// The means by which a block is obtained. @@ -666,13 +666,17 @@ impl StacksChainState { pub fn get_nakamoto_staging_blocks_db_version( conn: &Connection, ) -> Result { + let db_version_exists = table_exists(&conn, "db_version")?; + if !db_version_exists { + return Ok(1); + } let qry = "SELECT version FROM db_version ORDER BY version DESC LIMIT 1"; let args = NO_PARAMS; let version: Option = match query_row(&conn, qry, args) { Ok(x) => x, Err(e) => { debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); - return Ok(1); + return Err(ChainstateError::DBError(DBError::Corruption)); } }; @@ -684,7 +688,7 @@ impl StacksChainState { } None => { debug!("No version present in Nakamoto staging blocks DB; defaulting to 1"); - Ok(1) + Err(ChainstateError::DBError(DBError::Corruption)) } } } From 56ae16867c4d7a581e7f4e4c0f7bced1df9f56e0 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Thu, 19 Sep 2024 14:31:05 +0300 Subject: [PATCH 1155/1400] check that epoch3 start burn block is in the missing blocks --- .../src/tests/nakamoto_integrations.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9b89e04856..b50f9a459e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1902,6 +1902,12 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { // Check that we have the expected burn blocks // We expect to have around the blocks 220-230 and 234 onwards, with a gap of 3 blocks for the flash blocks let bhh = u64::from(tip.burn_header_height); + + // Get the Epoch 3.0 activation height (in terms of Bitcoin block height) + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + // Find the gap in burn blocks let mut gap_start = 0; let mut gap_end = 0; @@ -1925,11 +1931,21 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { gap_end ); + // Verify that the gap includes the Epoch 3.0 activation height + assert!( + gap_start <= epoch_3_start_height && epoch_3_start_height <= gap_end, + "Expected the gap ({}..={}) to include the Epoch 3.0 activation height ({})", + gap_start, + gap_end, + epoch_3_start_height + ); + // Verify blocks before and after the gap test_observer::contains_burn_block_range(220..=(gap_start - 1)).unwrap(); test_observer::contains_burn_block_range((gap_end + 1)..=bhh).unwrap(); info!("Verified burn block ranges, including expected gap for flash blocks"); + info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); coord_channel .lock() From 95b01c17a26e620a1af4077368d9d486d98a24dd Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 11:38:17 -0500 Subject: [PATCH 1156/1400] fix: ongoing commit logic + better error messages * correct the ongoing commit logic (and RBF handling) in bitcoin tx submissions * better error messages from send_http_request --- stackslib/src/net/httpcore.rs | 13 +++++++------ .../src/burnchains/bitcoin_regtest_controller.rs | 7 +++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 804add6f33..3b4bf8c9b9 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1953,15 +1953,16 @@ pub fn send_http_request( // Step 5: decode the HTTP message and return it if it's not an error. let response_data = match response { StacksHttpMessage::Response(response_data) => response_data, - StacksHttpMessage::Error(path, response) => { + StacksHttpMessage::Error(_path, response) => { + let verb = &request.preamble().verb; + let path = &request.preamble().path_and_query_str; + let resp_status_code = response.preamble().status_code; + let resp_body = response.body(); return Err(io::Error::new( io::ErrorKind::Other, format!( - "Request did not succeed ({} != 200). Path: '{}'", - response.preamble().status_code, - &path - ) - .as_str(), + "HTTP '{verb} {path}' did not succeed ({resp_status_code} != 200). Response body = {resp_body:?}" + ), )); } _ => { diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 3338e3cf5f..568e9559c3 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1662,7 +1662,10 @@ impl BitcoinRegtestController { ) } else { // Case 2) ii): Attempt to RBF - info!("Attempt to replace by fee an outdated leader block commit"); + info!( + "Attempt to replace by fee an outdated leader block commit"; + "ongoing_txids" => ?ongoing_op.txids + ); self.send_block_commit_operation( epoch_id, payload, @@ -1674,7 +1677,7 @@ impl BitcoinRegtestController { ) }; - if res.is_ok() { + if res.is_err() { self.ongoing_block_commit = Some(ongoing_op); } From 56141129f09d858582a454c81a0d5fe6b9439eb5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 11:11:48 -0700 Subject: [PATCH 1157/1400] Do not count received valid signatures towards threshold weight when ignore flag set Signed-off-by: Jacinta Ferrant --- .../src/nakamoto_node/sign_coordinator.rs | 12 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 60 ++++++++++--------- 2 files changed, 37 insertions(+), 35 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 29a64cfb27..1ac2618a53 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -886,11 +886,6 @@ impl SignCoordinator { ); continue; } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } if Self::fault_injection_ignore_signatures() { warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; @@ -906,6 +901,12 @@ impl SignCoordinator { continue; } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + info!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), @@ -986,7 +987,6 @@ impl SignCoordinator { } }; } - // After gathering all signatures, return them if we've hit the threshold if total_weight_signed >= self.weight_threshold { info!("SignCoordinator: Received enough signatures. Continuing."; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f14d962404..c123217ce0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2227,26 +2227,39 @@ fn signers_broadcast_signed_blocks() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - sleep_ms(10_000); - + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30)); - sleep_ms(10_000); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + wait_for(30, || { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + ); + Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for first nakamoto block to be mined"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - let signer_pushed_before = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - // submit a tx so that the miner will mine a block + // submit a tx so that the miner will mine a blockn let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); @@ -2254,26 +2267,16 @@ fn signers_broadcast_signed_blocks() { debug!("Transaction sent; waiting for block-mining"); - let start = Instant::now(); - let duration = 60; - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + wait_for(30, || { let signer_pushed = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before - && signer_pushed > signer_pushed_before - && info.stacks_tip_height > info_before.stacks_tip_height - { - break; - } - debug!( "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", blocks_mined, @@ -2283,12 +2286,11 @@ fn signers_broadcast_signed_blocks() { info.stacks_tip_height, info_before.stacks_tip_height ); - - std::thread::sleep(Duration::from_millis(100)); - if start.elapsed() >= Duration::from_secs(duration) { - panic!("Timed out"); - } - } + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height + && signer_pushed > signer_pushed_before) + }) + .expect("Timed out waiting for second nakamoto block to be mined"); signer_test.shutdown(); } @@ -4754,7 +4756,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); - info!("Allowing singers to broadcast block N+1 to the miner"); + info!("Allowing signers to broadcast block N+1 to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); // Assert the N+1' block was rejected From a531b50ce8c1f9160aa07b9dbbafc3b8caf672aa Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 11:49:25 -0700 Subject: [PATCH 1158/1400] CRC: fix mainnet flag Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index cb1d4f8a6d..a042600977 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -608,7 +608,7 @@ impl Signer { // authenticate the signature -- it must be signed by one of the stacking set let is_valid_sig = self.signer_addresses.iter().any(|addr| { - let stacker_address = StacksAddress::p2pkh(true, &public_key); + let stacker_address = StacksAddress::p2pkh(self.mainnet, &public_key); // it only matters that the address hash bytes match stacker_address.bytes == addr.bytes From 065df11f9e8d2928a0ec1ec1163d172b33461312 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 12:48:40 -0700 Subject: [PATCH 1159/1400] Print a warning if failed to parse the stackers response Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 6e3bab341e..c4ea485406 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -590,9 +590,10 @@ impl StacksClient { .map_err(|e| backoff::Error::transient(e.into()))?; let status = response.status(); if status.is_success() { - return response - .json() - .map_err(|e| backoff::Error::permanent(e.into())); + return response.json().map_err(|e| { + warn!("Failed to parse the GetStackers response: {e}"); + backoff::Error::permanent(e.into()) + }); } let error_data = response.json::().map_err(|e| { warn!("Failed to parse the GetStackers error response: {e}"); From b2acfd72bde0378a94b24d36124f20426ddceaff Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 13:26:17 -0700 Subject: [PATCH 1160/1400] Do not assume every signers signature makes it before miner quits waiting for unnecessary signatures Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 156 +++++++++++++-------- 1 file changed, 95 insertions(+), 61 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c123217ce0..8d8ff07ac0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4130,31 +4130,34 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); + let long_timeout = 60; + let short_timeout = 30; signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block + + // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for N to be mined and processed"); - sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() @@ -4173,13 +4176,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers); - // Ensure that the block was accepted globally so the stacks tip has not advanced to N + // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); - // Make less than 30% of the signers reject the block to ensure it is marked globally accepted + // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted let rejecting_signers: Vec<_> = signer_test .signer_stacks_private_keys .iter() @@ -4191,18 +4194,20 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N+1"); - let start_time = Instant::now(); + + // submit a tx so that the miner will mine a stacks block N+1 let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4210,7 +4215,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) .expect("Timed out waiting for block to be mined and processed"); - loop { + wait_for(long_timeout, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events .into_iter() @@ -4235,14 +4240,10 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { } }) .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + Ok(block_rejections.len() == rejecting_signers.len()) + }) + .expect("Timed out waiting for block proposal rejections"); + // Assert the block was mined let info_after = signer_test .stacks_client @@ -4263,13 +4264,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1 + // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); assert_ne!(block_n_1, block_n); info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); + // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4277,11 +4279,12 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(Vec::new()); + // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4297,20 +4300,35 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height, ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_2 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); assert_ne!(block_n_2, block_n_1); + + // Make sure that ALL signers accepted the block proposal + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n_2.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); } #[test] @@ -4351,7 +4369,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); @@ -4360,13 +4378,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4375,17 +4395,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }) .expect("Timed out waiting for block to be mined and processed"); - sender_nonce += 1; + // Ensure that the block was accepted globally so the stacks tip has advanced to N let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); @@ -4404,16 +4422,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .replace(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4433,6 +4454,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); let info_after = signer_test .stacks_client @@ -4440,13 +4462,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to get peer info"); assert_eq!(blocks_after, blocks_before); assert_eq!(info_after, info_before); - // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_ne!(block_n_1, block_n); assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -4458,23 +4481,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }, ) .unwrap(); + info!( "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" ); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4491,15 +4510,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -4509,6 +4519,30 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n); + + // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n_1_prime.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); } #[test] From 2cdd31b6bc831b4bb7fc888d18a842b8cf1e15a4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Sep 2024 16:58:37 -0400 Subject: [PATCH 1161/1400] test: move the 2.5 and 3.0 activation heights earlier for this test This allows us to avoid hitting block 240, which is when the stackers get unstacked and the chain stalls, making `partial_tenure_fork` less flaky --- testnet/stacks-node/src/tests/signer/mod.rs | 19 ++++++++++++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 11 +++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 0b38a79234..9532166492 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -748,9 +748,22 @@ fn setup_stx_btc_node ()>( info!("Make new BitcoinRegtestController"); let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - info!("Bootstraping..."); - // Should be 201 for other tests? - btc_regtest_controller.bootstrap_chain_to_pks(195, btc_miner_pubkeys); + let epoch_2_5_start = usize::try_from( + naka_conf + .burnchain + .epochs + .as_ref() + .unwrap() + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height, + ) + .expect("Failed to get epoch 2.5 start height"); + let bootstrap_block = epoch_2_5_start - 6; + + info!("Bootstraping to block {bootstrap_block}..."); + btc_regtest_controller.bootstrap_chain_to_pks(bootstrap_block, btc_miner_pubkeys); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8d8ff07ac0..27f48b6917 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3610,6 +3610,17 @@ fn partial_tenure_fork() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + // Move epoch 2.5 and 3.0 earlier, so we have more time for the + // test before re-stacking is required. + if let Some(epochs) = config.burnchain.epochs.as_mut() { + epochs[6].end_height = 121; + epochs[7].start_height = 121; + epochs[7].end_height = 151; + epochs[8].start_height = 151; + } else { + panic!("Expected epochs to be set"); + } }, Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), None, From fd4c2379986a6abbd8cb0aa920a512a613619df9 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 16:12:38 -0500 Subject: [PATCH 1162/1400] feat: add a consolidated endpoint for current and prior sortitions --- stacks-signer/src/chainstate.rs | 35 +-- stacks-signer/src/client/stacks_client.rs | 50 ++++ stackslib/src/net/api/getsortition.rs | 229 ++++++++++++------ .../src/tests/nakamoto_integrations.rs | 2 +- 4 files changed, 207 insertions(+), 109 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4012fd48a0..4bbb9741a5 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -25,7 +25,7 @@ use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; use stacks_common::util::hash::Hash160; use stacks_common::{info, warn}; -use crate::client::{ClientError, StacksClient}; +use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; use crate::config::SignerConfig; use crate::signerdb::{BlockState, SignerDb}; @@ -138,8 +138,6 @@ pub struct SortitionsView { pub last_sortition: Option, /// the current successful sortition (this corresponds to the "current" miner slot) pub cur_sortition: SortitionState, - /// the hash at which the sortitions view was fetched - pub latest_consensus_hash: ConsensusHash, /// configuration settings for evaluating proposals pub config: ProposalEvalConfig, } @@ -608,42 +606,21 @@ impl SortitionsView { config: ProposalEvalConfig, client: &StacksClient, ) -> Result { - let latest_state = client.get_latest_sortition()?; - let latest_ch = latest_state.consensus_hash; - - // figure out what cur_sortition will be set to. - // if the latest sortition wasn't successful, query the last one that was. - let latest_success = if latest_state.was_sortition { - latest_state - } else { - info!("Latest state wasn't a sortition: {latest_state:?}"); - let last_sortition_ch = latest_state - .last_sortition_ch - .as_ref() - .ok_or_else(|| ClientError::NoSortitionOnChain)?; - client.get_sortition(last_sortition_ch)? - }; - - // now, figure out what `last_sortition` will be set to. - let last_sortition = latest_success - .last_sortition_ch - .as_ref() - .map(|ch| client.get_sortition(ch)) - .transpose()?; + let CurrentAndLastSortition { + current_sortition, + last_sortition, + } = client.get_current_and_last_sortition()?; - let cur_sortition = SortitionState::try_from(latest_success)?; + let cur_sortition = SortitionState::try_from(current_sortition)?; let last_sortition = last_sortition .map(SortitionState::try_from) .transpose() .ok() .flatten(); - let latest_consensus_hash = latest_ch; - Ok(Self { cur_sortition, last_sortition, - latest_consensus_hash, config, }) } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 6e3bab341e..e59438db9f 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -88,6 +88,15 @@ struct GetStackersErrorResp { err_msg: String, } +/// Result from fetching current and last sortition: +/// two sortition infos +pub struct CurrentAndLastSortition { + /// the latest winning sortition in the current burnchain fork + pub current_sortition: SortitionInfo, + /// the last winning sortition prior to `current_sortition`, if there was one + pub last_sortition: Option, +} + impl From<&GlobalConfig> for StacksClient { fn from(config: &GlobalConfig) -> Self { Self { @@ -484,6 +493,47 @@ impl StacksClient { Ok(tenures) } + /// Get the current winning sortition and the last winning sortition + pub fn get_current_and_last_sortition(&self) -> Result { + debug!("stacks_node_client: Getting current and prior sortition..."); + let path = format!("{}/latest_and_last", self.sortition_info_path()); + let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); + let send_request = || { + self.stacks_node_client.get(&path).send().map_err(|e| { + warn!("Signer failed to request latest sortition"; "err" => ?e); + e + }) + }; + let response = send_request()?; + timer.stop_and_record(); + if !response.status().is_success() { + return Err(ClientError::RequestFailure(response.status())); + } + let mut info_list: VecDeque = response.json()?; + let Some(current_sortition) = info_list.pop_front() else { + return Err(ClientError::UnexpectedResponseFormat( + "Empty SortitionInfo returned".into(), + )); + }; + if !current_sortition.was_sortition { + return Err(ClientError::UnexpectedResponseFormat( + "'Current' SortitionInfo returned which was not a winning sortition".into(), + )); + } + let last_sortition = if current_sortition.last_sortition_ch.is_some() { + let Some(last_sortition) = info_list.pop_back() else { + return Err(ClientError::UnexpectedResponseFormat("'Current' SortitionInfo has `last_sortition_ch` field, but corresponding data not returned".into())); + }; + Some(last_sortition) + } else { + None + }; + Ok(CurrentAndLastSortition { + current_sortition, + last_sortition, + }) + } + /// Get the sortition information for the latest sortition pub fn get_latest_sortition(&self) -> Result { debug!("stacks_node_client: Getting latest sortition..."); diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 5e0557ca26..7b594530c2 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -28,6 +28,7 @@ use stacks_common::util::HexError; use {serde, serde_json}; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; @@ -51,10 +52,13 @@ pub enum QuerySpecifier { BurnchainHeaderHash(BurnchainHeaderHash), BlockHeight(u64), Latest, + /// Fetch the latest sortition *which was a winning sortition* and that sortition's + /// last sortition, returning two SortitionInfo structs. + LatestAndLast, } pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortitions"; -static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; +static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})(/(?P[0-9a-f]{1,64}))?)?$"; /// Struct for sortition information returned via the GetSortition API call #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -112,6 +116,7 @@ impl TryFrom<(&str, &str)> for QuerySpecifier { value.1 }; match value.0 { + "latest_and_last" => Ok(Self::LatestAndLast), "consensus" => Ok(Self::ConsensusHash( ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, )), @@ -141,6 +146,74 @@ impl GetSortitionHandler { query: QuerySpecifier::Latest, } } + + fn get_sortition_info( + sortition_sn: BlockSnapshot, + sortdb: &SortitionDB, + ) -> Result { + let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = + if !sortition_sn.sortition { + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let last_sortition = + handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + (None, None, None, Some(last_sortition.consensus_hash)) + } else { + let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? + .ok_or_else(|| { + error!( + "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let stacks_parent_sn = handle + .get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? + .ok_or_else(|| { + warn!( + "Failed to load the snapshot of the winning block commits parent"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + + // try to figure out what the last snapshot in this fork was with a successful + // sortition. + // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` + let last_sortition_ch = if stacks_parent_sn.sortition { + stacks_parent_sn.consensus_hash.clone() + } else { + // we actually need to perform the marf lookup + let last_sortition = handle.get_last_snapshot_with_sortition( + sortition_sn.block_height.saturating_sub(1), + )?; + last_sortition.consensus_hash + }; + + ( + sortition_sn.miner_pk_hash.clone(), + Some(stacks_parent_sn.consensus_hash), + Some(block_commit.block_header_hash), + Some(last_sortition_ch), + ) + }; + + Ok(SortitionInfo { + burn_block_hash: sortition_sn.burn_header_hash, + burn_block_height: sortition_sn.block_height, + burn_header_timestamp: sortition_sn.burn_header_timestamp, + sortition_id: sortition_sn.sortition_id, + parent_sortition_id: sortition_sn.parent_sortition_id, + consensus_hash: sortition_sn.consensus_hash, + was_sortition: sortition_sn.sortition, + miner_pk_hash160, + stacks_parent_ch, + last_sortition_ch, + committed_block_hash, + }) + } } /// Decode the HTTP request impl HttpRequest for GetSortitionHandler { @@ -169,9 +242,15 @@ impl HttpRequest for GetSortitionHandler { let req_contents = HttpRequestContents::new().query_string(query); self.query = QuerySpecifier::Latest; - if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { - self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; - } + match (captures.name("key"), captures.name("value")) { + (Some(key), None) => { + self.query = QuerySpecifier::try_from((key.as_str(), ""))?; + } + (Some(key), Some(value)) => { + self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; + } + _ => {} + }; Ok(req_contents) } @@ -194,81 +273,37 @@ impl RPCRequestHandler for GetSortitionHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let result = - node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { - let query_result = match self.query { - QuerySpecifier::Latest => { + let result = node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { + let query_result = match self.query { + QuerySpecifier::Latest => Ok(Some(network.burnchain_tip.clone())), + QuerySpecifier::ConsensusHash(ref consensus_hash) => { + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) + } + QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot(burn_hash) + } + QuerySpecifier::BlockHeight(burn_height) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot_by_height(burn_height) + } + QuerySpecifier::LatestAndLast => { + if network.burnchain_tip.sortition { + // optimization: if the burn chain tip had a sortition, just return that Ok(Some(network.burnchain_tip.clone())) - }, - QuerySpecifier::ConsensusHash(ref consensus_hash) => { - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) - }, - QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { - let handle = sortdb.index_handle_at_tip(); - handle.get_block_snapshot(burn_hash) - }, - QuerySpecifier::BlockHeight(burn_height) => { - let handle = sortdb.index_handle_at_tip(); - handle.get_block_snapshot_by_height(burn_height) - }, - }; - let sortition_sn = query_result? - .ok_or_else(|| ChainError::NoSuchBlockError)?; - - let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { - let handle = sortdb.index_handle(&sortition_sn.sortition_id); - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; - (None, None, None, Some(last_sortition.consensus_hash)) - } else { - let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? - .ok_or_else(|| { - error!( - "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; - "sortition_id" => %sortition_sn.sortition_id, - "txid" => %sortition_sn.winning_block_txid, - ); - ChainError::NoSuchBlockError - })?; - let handle = sortdb.index_handle(&sortition_sn.sortition_id); - let stacks_parent_sn = handle.get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? - .ok_or_else(|| { - warn!( - "Failed to load the snapshot of the winning block commits parent"; - "sortition_id" => %sortition_sn.sortition_id, - "txid" => %sortition_sn.winning_block_txid, - ); - ChainError::NoSuchBlockError - })?; - - // try to figure out what the last snapshot in this fork was with a successful - // sortition. - // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` - let last_sortition_ch = if sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1 { - stacks_parent_sn.consensus_hash.clone() } else { - // we actually need to perform the marf lookup - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height.saturating_sub(1))?; - last_sortition.consensus_hash - }; - - (sortition_sn.miner_pk_hash.clone(), Some(stacks_parent_sn.consensus_hash), Some(block_commit.block_header_hash), - Some(last_sortition_ch)) - }; - - Ok(SortitionInfo { - burn_block_hash: sortition_sn.burn_header_hash, - burn_block_height: sortition_sn.block_height, - burn_header_timestamp: sortition_sn.burn_header_timestamp, - sortition_id: sortition_sn.sortition_id, - parent_sortition_id: sortition_sn.parent_sortition_id, - consensus_hash: sortition_sn.consensus_hash, - was_sortition: sortition_sn.sortition, - miner_pk_hash160, - stacks_parent_ch, - last_sortition_ch, - committed_block_hash, - }) - }); + // we actually need to perform a marf lookup to find that last snapshot + // with a sortition + let handle = sortdb.index_handle_at_tip(); + let last_sortition = handle + .get_last_snapshot_with_sortition(network.burnchain_tip.block_height)?; + Ok(Some(last_sortition)) + } + } + }; + let sortition_sn = query_result?.ok_or_else(|| ChainError::NoSuchBlockError)?; + Self::get_sortition_info(sortition_sn, sortdb) + }); let block = match result { Ok(block) => block, @@ -290,8 +325,44 @@ impl RPCRequestHandler for GetSortitionHandler { } }; + let last_sortition_ch = block.last_sortition_ch.clone(); + let mut info_list = vec![block]; + if self.query == QuerySpecifier::LatestAndLast { + // if latest **and** last are requested, lookup the sortition info for last_sortition_ch + if let Some(last_sortition_ch) = last_sortition_ch { + let result = node.with_node_state(|_, sortdb, _, _, _| { + let last_sortition_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &last_sortition_ch, + )? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + Self::get_sortition_info(last_sortition_sn, sortdb) + }); + let last_block = match result { + Ok(block) => block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Could not find snapshot for the `last_sortition_ch`({last_sortition_ch})\n")), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load snapshot for `last_sortition_ch`({last_sortition_ch}): {:?}\n", &e); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + info_list.push(last_block); + } + } + let preamble = HttpResponsePreamble::ok_json(&preamble); - let result = HttpResponseContents::try_from_json(&block)?; + let result = HttpResponseContents::try_from_json(&info_list)?; Ok((preamble, result)) } } @@ -302,7 +373,7 @@ impl HttpResponse for GetSortitionHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let sortition_info: SortitionInfo = parse_json(preamble, body)?; + let sortition_info: Vec = parse_json(preamble, body)?; Ok(HttpResponsePayload::try_from_json(sortition_info)?) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 32924ab7b7..4057852c41 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5506,7 +5506,7 @@ fn signer_chainstate() { let time_start = Instant::now(); let proposal = loop { let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); - if proposal.0.header.consensus_hash == sortitions_view.latest_consensus_hash { + if proposal.0.header.consensus_hash == sortitions_view.cur_sortition.consensus_hash { break proposal; } if time_start.elapsed() > Duration::from_secs(20) { From 77c1036bf96494a95198d5c7381dc74c8e5887b7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 20:45:14 -0500 Subject: [PATCH 1163/1400] chore: remove dead code, fix unit test build --- stacks-signer/src/client/stacks_client.rs | 43 +---------------------- stacks-signer/src/tests/chainstate.rs | 1 - 2 files changed, 1 insertion(+), 43 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index e59438db9f..09c0040aea 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,4 +1,3 @@ -use std::collections::VecDeque; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -14,6 +13,7 @@ use std::collections::VecDeque; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::VecDeque; use std::net::SocketAddr; use blockstack_lib::burnchains::Txid; @@ -534,47 +534,6 @@ impl StacksClient { }) } - /// Get the sortition information for the latest sortition - pub fn get_latest_sortition(&self) -> Result { - debug!("stacks_node_client: Getting latest sortition..."); - let path = self.sortition_info_path(); - let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); - let send_request = || { - self.stacks_node_client.get(&path).send().map_err(|e| { - warn!("Signer failed to request latest sortition"; "err" => ?e); - e - }) - }; - let response = send_request()?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - let sortition_info = response.json()?; - Ok(sortition_info) - } - - /// Get the sortition information for a given sortition - pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { - debug!("stacks_node_client: Getting sortition with consensus hash {ch}..."); - let path = format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex()); - let timer_label = format!("{}/consensus/:consensus_hash", self.sortition_info_path()); - let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); - let send_request = || { - self.stacks_node_client.get(&path).send().map_err(|e| { - warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); - e - }) - }; - let response = send_request()?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - let sortition_info = response.json()?; - Ok(sortition_info) - } - /// Get the current peer info data from the stacks node pub fn get_peer_info(&self) -> Result { debug!("stacks_node_client: Getting peer info..."); diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index a13ab24a59..53f60e9cfe 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -82,7 +82,6 @@ fn setup_test_environment( }); let view = SortitionsView { - latest_consensus_hash: cur_sortition.consensus_hash, cur_sortition, last_sortition, config: ProposalEvalConfig { From b396798c489361516d96bc432e390a415842fe88 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 21:03:20 -0500 Subject: [PATCH 1164/1400] add /v3/sortitions to openapi.yaml --- .../api/core-node/get_sortitions.example.json | 15 +++++++ ...t_sortitions_latest_and_prior.example.json | 28 +++++++++++++ docs/rpc/openapi.yaml | 40 +++++++++++++++++++ 3 files changed, 83 insertions(+) create mode 100644 docs/rpc/api/core-node/get_sortitions.example.json create mode 100644 docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json diff --git a/docs/rpc/api/core-node/get_sortitions.example.json b/docs/rpc/api/core-node/get_sortitions.example.json new file mode 100644 index 0000000000..a56fd887b1 --- /dev/null +++ b/docs/rpc/api/core-node/get_sortitions.example.json @@ -0,0 +1,15 @@ +[ + { + "burn_block_hash": "0x046f54cd1924a5d80fc3b8186d0334b7521acae90f9e136e2bee680c720d0e83", + "burn_block_height": 231, + "burn_header_timestamp": 1726797570, + "sortition_id": "0x8a5116b7b4306dc4f6db290d1adfff9e1347f3e921bb793fc4c33e2ff05056e2", + "parent_sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "consensus_hash": "0x8d2c51db737597a93191f49bcdc9c7bb44b90892", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "last_sortition_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "committed_block_hash": "0xeea47d6d639c565027110e192e308fb11656183d5c077bcd718d830652800183" + } +] diff --git a/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json b/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json new file mode 100644 index 0000000000..db970637ed --- /dev/null +++ b/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json @@ -0,0 +1,28 @@ +[ + { + "burn_block_hash": "0x046f54cd1924a5d80fc3b8186d0334b7521acae90f9e136e2bee680c720d0e83", + "burn_block_height": 231, + "burn_header_timestamp": 1726797570, + "sortition_id": "0x8a5116b7b4306dc4f6db290d1adfff9e1347f3e921bb793fc4c33e2ff05056e2", + "parent_sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "consensus_hash": "0x8d2c51db737597a93191f49bcdc9c7bb44b90892", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "last_sortition_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "committed_block_hash": "0xeea47d6d639c565027110e192e308fb11656183d5c077bcd718d830652800183" + }, + { + "burn_block_hash": "0x496ff02cb63a4850d0bdee5fab69284b6eb0392b4538e1c462f82362c5becfa4", + "burn_block_height": 230, + "burn_header_timestamp": 1726797570, + "sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "parent_sortition_id": "0xf9058692055cbd879d7f71e566e44b905a887b2b182407ed596b5d6499ceae2a", + "consensus_hash": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0xf7d1bd7d9d5c5a5c368402b6ef9510bd014d70f7", + "last_sortition_ch": "0xf7d1bd7d9d5c5a5c368402b6ef9510bd014d70f7", + "committed_block_hash": "0x36ee5f7f7271de1c1d4cd830e36320b51e01605547621267ae6e9b4e9b10f95e" + } +] diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 3d4249329e..e01a0956d1 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -675,3 +675,43 @@ paths: schema: type: string + /v3/sortitions/{lookup_kind}/{lookup}: + get: + summary: Fetch information about evaluated burnchain blocks (i.e., sortitions). + tags: + - Blocks + operationId: get_sortitions + description: + Fetch sortition information about a burnchain block. If the `lookup_kind` and `lookup` parameters are empty, it will return information about the latest burn block. + responses: + "200": + description: Information for the given reward cycle + content: + application/json: + example: + $ref: ./api/core-node/get_sortitions.example.json + "200": + description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. + content: + application/json: + example: + $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json + parameters: + - name: lookup_kind + in: path + description: |- + The style of lookup that should be performed. If not given, the most recent burn block processed will be returned. + Otherwise, the `lookup_kind` should be one of the following strings: + * `consensus` - find the burn block using the consensus hash supplied in the `lookup` field. + * `burn_height` - find the burn block using the burn block height supplied in the `lookup` field. + * `burn` - find the burn block using the burn block hash supplied in the `lookup` field. + * `latest_and_last` - return information about the latest burn block with a winning miner *and* the previous such burn block + required: false + schema: + type: string + - name: lookup + in: path + description: The value to use for the lookup if `lookup_kind` is `consensus`, `burn_height`, or `burn` + required: false + schema: + type: string From e857672a8924e76a540b96f0dc6722847274e6b0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 19 Sep 2024 21:29:30 -0500 Subject: [PATCH 1165/1400] docs: correct multi-example openapi.yaml --- docs/rpc/openapi.yaml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index e01a0956d1..5547d3bcb6 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -685,17 +685,18 @@ paths: Fetch sortition information about a burnchain block. If the `lookup_kind` and `lookup` parameters are empty, it will return information about the latest burn block. responses: "200": - description: Information for the given reward cycle + description: Information for the burn block or in the case of `latest_and_last`, multiple burn blocks content: application/json: - example: - $ref: ./api/core-node/get_sortitions.example.json - "200": - description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. - content: - application/json: - example: - $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json + examples: + Latest: + description: A single element list is returned when just one sortition is requested + value: + $ref: ./api/core-node/get_sortitions.example.json + LatestAndLast: + description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. + value: + $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json parameters: - name: lookup_kind in: path From 7ef8809ad0e54166a5a87527e29ecbd15c50acb1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Sep 2024 22:32:36 -0400 Subject: [PATCH 1166/1400] test: reduce flakiness in `partial_tenure_fork` integration test --- testnet/stacks-node/src/tests/signer/v0.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 27f48b6917..034daa9e2d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3567,7 +3567,7 @@ fn partial_tenure_fork() { } let num_signers = 5; - let max_nakamoto_tenures = 20; + let max_nakamoto_tenures = 30; let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer @@ -3611,13 +3611,18 @@ fn partial_tenure_fork() { config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + // Increase the reward cycle length to avoid missing a prepare phase + // while we are intentionally forking. + config.burnchain.pox_reward_length = Some(40); + config.burnchain.pox_prepare_length = Some(10); + // Move epoch 2.5 and 3.0 earlier, so we have more time for the // test before re-stacking is required. if let Some(epochs) = config.burnchain.epochs.as_mut() { - epochs[6].end_height = 121; - epochs[7].start_height = 121; - epochs[7].end_height = 151; - epochs[8].start_height = 151; + epochs[6].end_height = 131; + epochs[7].start_height = 131; + epochs[7].end_height = 166; + epochs[8].start_height = 166; } else { panic!("Expected epochs to be set"); } @@ -3694,8 +3699,8 @@ fn partial_tenure_fork() { let mut min_miner_2_tenures = u64::MAX; let mut ignore_block = 0; - while !(miner_1_tenures >= min_miner_1_tenures && miner_2_tenures >= min_miner_2_tenures) { - if btc_blocks_mined > max_nakamoto_tenures { + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3851,7 +3856,7 @@ fn partial_tenure_fork() { Err(e) => { if e.to_string().contains("TooMuchChaining") { info!("TooMuchChaining error, skipping block"); - continue; + break; } else { panic!("Failed to submit tx: {}", e); } From cb1a47cef615fc480f1eb7abdafc4a16a4c6a9a6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 20 Sep 2024 10:50:04 -0500 Subject: [PATCH 1167/1400] feat: optimize mempool iteration by skipping repeated invocation after mempool exhausted --- stackslib/src/chainstate/stacks/miner.rs | 21 +++++++++++++----- .../stacks/tests/block_construction.rs | 1 + stackslib/src/core/mempool.rs | 22 +++++++++++++------ 3 files changed, 32 insertions(+), 12 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 0195385d3b..78d6a47781 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2228,10 +2228,10 @@ impl StacksBlockBuilder { debug!("Block transaction selection begins (parent height = {tip_height})"); let result = { - let mut intermediate_result: Result<_, Error> = Ok(0); + let mut loop_result = Ok(()); while block_limit_hit != BlockLimitFunction::LIMIT_REACHED { let mut num_considered = 0; - intermediate_result = mempool.iterate_candidates( + let intermediate_result = mempool.iterate_candidates( epoch_tx, &mut tx_events, mempool_settings.clone(), @@ -2390,8 +2390,19 @@ impl StacksBlockBuilder { let _ = mempool.drop_and_blacklist_txs(&to_drop_and_blacklist); } - if intermediate_result.is_err() { - break; + match intermediate_result { + Err(e) => { + loop_result = Err(e); + break; + } + Ok((_txs_considered, stop_reason)) => { + match stop_reason { + MempoolIterationStopReason::NoMoreCandidates => break, + MempoolIterationStopReason::DeadlineReached => break, + // if the iterator function exited, let the loop tick: it checks the block limits + MempoolIterationStopReason::IteratorExited => {} + } + } } if num_considered == 0 { @@ -2399,7 +2410,7 @@ impl StacksBlockBuilder { } } debug!("Block transaction selection finished (parent height {}): {} transactions selected ({} considered)", &tip_height, num_txs, considered.len()); - intermediate_result + loop_result }; mempool.drop_txs(&invalidated_txs)?; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 3699710535..352679c209 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5072,6 +5072,7 @@ fn paramaterized_mempool_walk_test( }, ) .unwrap() + .0 == 0 { break; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index fe75d62bd2..0dff4796dc 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -144,6 +144,14 @@ pub enum MemPoolSyncData { TxTags([u8; 32], Vec), } +pub enum MempoolIterationStopReason { + NoMoreCandidates, + DeadlineReached, + /// If the iteration function supplied to mempool iteration exited + /// (i.e., the transaction evaluator returned an early exit command) + IteratorExited, +} + impl StacksMessageCodec for MemPoolSyncData { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { match *self { @@ -1592,7 +1600,7 @@ impl MemPoolDB { output_events: &mut Vec, settings: MemPoolWalkSettings, mut todo: F, - ) -> Result + ) -> Result<(u64, MempoolIterationStopReason), E> where C: ClarityConnection, F: FnMut( @@ -1643,11 +1651,11 @@ impl MemPoolDB { .query(NO_PARAMS) .map_err(|err| Error::SqliteError(err))?; - loop { + let stop_reason = loop { if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { debug!("Mempool iteration deadline exceeded"; "deadline_ms" => settings.max_walk_time_ms); - break; + break MempoolIterationStopReason::DeadlineReached; } let start_with_no_estimate = @@ -1687,7 +1695,7 @@ impl MemPoolDB { ), None => { debug!("No more transactions to consider in mempool"); - break; + break MempoolIterationStopReason::NoMoreCandidates; } } } @@ -1875,7 +1883,7 @@ impl MemPoolDB { } None => { debug!("Mempool iteration early exit from iterator"); - break; + break MempoolIterationStopReason::IteratorExited; } } @@ -1885,7 +1893,7 @@ impl MemPoolDB { candidate_cache.len() ); candidate_cache.reset(); - } + }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the // connection prevents us from beginning a transaction below (which requires a mutable @@ -1908,7 +1916,7 @@ impl MemPoolDB { "considered_txs" => u128::from(total_considered), "elapsed_ms" => start_time.elapsed().as_millis() ); - Ok(total_considered) + Ok((total_considered, stop_reason)) } pub fn conn(&self) -> &DBConn { From 908c40b3e3f009c3cc95038f86fa75eace8931bc Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 20 Sep 2024 10:19:27 -0700 Subject: [PATCH 1168/1400] fix: update log levels with signerDB write errors --- stacks-signer/src/v0/signer.rs | 2 +- stacks-signer/src/v1/signer.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 654a00dc66..fa34cc4b42 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -782,7 +782,7 @@ impl Signer { warn!("{self}: Failed to mark block as globally rejected: {e:?}",); } if let Err(e) = self.signer_db.insert_block(&block_info) { - warn!("{self}: Failed to update block state: {e:?}",); + error!("{self}: Failed to update block state: {e:?}",); panic!("{self} Failed to update block state: {e}"); } } diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs index 08ccde5a92..aa8fcfb0d2 100644 --- a/stacks-signer/src/v1/signer.rs +++ b/stacks-signer/src/v1/signer.rs @@ -239,12 +239,13 @@ impl SignerTrait for Signer { self.signer_db .insert_burn_block(burn_header_hash, *burn_height, received_time) { - warn!( + error!( "Failed to write burn block event to signerdb"; "err" => ?e, "burn_header_hash" => %burn_header_hash, "burn_height" => burn_height ); + panic!("Failed to write burn block event to signerdb"); } } } From 85e41f7d602a5bb0c054f4b448410894cdfeb70b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 20 Sep 2024 10:19:50 -0700 Subject: [PATCH 1169/1400] Fix: update error messages when looking up staging_blocks version --- stackslib/src/chainstate/nakamoto/staging_blocks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index c0b364eea8..91aad5a325 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -675,7 +675,7 @@ impl StacksChainState { let version: Option = match query_row(&conn, qry, args) { Ok(x) => x, Err(e) => { - debug!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); + error!("Failed to get Nakamoto staging blocks DB version: {:?}", &e); return Err(ChainstateError::DBError(DBError::Corruption)); } }; @@ -687,7 +687,7 @@ impl StacksChainState { Ok(ver) } None => { - debug!("No version present in Nakamoto staging blocks DB; defaulting to 1"); + error!("No version present in Nakamoto staging blocks `db_version` table"); Err(ChainstateError::DBError(DBError::Corruption)) } } From 89fdacb4d06065b81de898b70c4f7da813fa2dda Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 20 Sep 2024 10:23:54 -0700 Subject: [PATCH 1170/1400] fix: include pox_treatment in miner_signature_hash --- stackslib/src/chainstate/nakamoto/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 756212ee54..24f92ad02b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -734,6 +734,7 @@ impl NakamotoBlockHeader { write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; write_next(fd, &self.timestamp)?; + write_next(fd, &self.pox_treatment)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -1876,7 +1877,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? @@ -1888,7 +1889,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; if connected_sort_id != parent_burn_view_sn.sortition_id { warn!( From 218bd0b40afd4417b889695619007b464f505e67 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 11:11:48 -0700 Subject: [PATCH 1171/1400] Do not count received valid signatures towards threshold weight when ignore flag set Signed-off-by: Jacinta Ferrant --- .../src/nakamoto_node/sign_coordinator.rs | 12 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 60 ++++++++++--------- 2 files changed, 37 insertions(+), 35 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 29a64cfb27..1ac2618a53 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -886,11 +886,6 @@ impl SignCoordinator { ); continue; } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } if Self::fault_injection_ignore_signatures() { warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; @@ -906,6 +901,12 @@ impl SignCoordinator { continue; } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + info!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), @@ -986,7 +987,6 @@ impl SignCoordinator { } }; } - // After gathering all signatures, return them if we've hit the threshold if total_weight_signed >= self.weight_threshold { info!("SignCoordinator: Received enough signatures. Continuing."; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f14d962404..c123217ce0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2227,26 +2227,39 @@ fn signers_broadcast_signed_blocks() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - sleep_ms(10_000); - + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30)); - sleep_ms(10_000); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + wait_for(30, || { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + ); + Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for first nakamoto block to be mined"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - let signer_pushed_before = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - // submit a tx so that the miner will mine a block + // submit a tx so that the miner will mine a blockn let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); @@ -2254,26 +2267,16 @@ fn signers_broadcast_signed_blocks() { debug!("Transaction sent; waiting for block-mining"); - let start = Instant::now(); - let duration = 60; - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + wait_for(30, || { let signer_pushed = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before - && signer_pushed > signer_pushed_before - && info.stacks_tip_height > info_before.stacks_tip_height - { - break; - } - debug!( "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", blocks_mined, @@ -2283,12 +2286,11 @@ fn signers_broadcast_signed_blocks() { info.stacks_tip_height, info_before.stacks_tip_height ); - - std::thread::sleep(Duration::from_millis(100)); - if start.elapsed() >= Duration::from_secs(duration) { - panic!("Timed out"); - } - } + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height + && signer_pushed > signer_pushed_before) + }) + .expect("Timed out waiting for second nakamoto block to be mined"); signer_test.shutdown(); } @@ -4754,7 +4756,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); - info!("Allowing singers to broadcast block N+1 to the miner"); + info!("Allowing signers to broadcast block N+1 to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); // Assert the N+1' block was rejected From 351f9e6251503f0544a8fee4f06f46eeb06e1cb4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 19 Sep 2024 13:26:17 -0700 Subject: [PATCH 1172/1400] Do not assume every signers signature makes it before miner quits waiting for unnecessary signatures Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 156 +++++++++++++-------- 1 file changed, 95 insertions(+), 61 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c123217ce0..8d8ff07ac0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4130,31 +4130,34 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); + let long_timeout = 60; + let short_timeout = 30; signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block + + // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for N to be mined and processed"); - sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() @@ -4173,13 +4176,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers); - // Ensure that the block was accepted globally so the stacks tip has not advanced to N + // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); - // Make less than 30% of the signers reject the block to ensure it is marked globally accepted + // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted let rejecting_signers: Vec<_> = signer_test .signer_stacks_private_keys .iter() @@ -4191,18 +4194,20 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N+1"); - let start_time = Instant::now(); + + // submit a tx so that the miner will mine a stacks block N+1 let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4210,7 +4215,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) .expect("Timed out waiting for block to be mined and processed"); - loop { + wait_for(long_timeout, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events .into_iter() @@ -4235,14 +4240,10 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { } }) .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + Ok(block_rejections.len() == rejecting_signers.len()) + }) + .expect("Timed out waiting for block proposal rejections"); + // Assert the block was mined let info_after = signer_test .stacks_client @@ -4263,13 +4264,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .len(); assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1 + // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); assert_ne!(block_n_1, block_n); info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); + // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4277,11 +4279,12 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(Vec::new()); + // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4297,20 +4300,35 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height, ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_2 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); assert_ne!(block_n_2, block_n_1); + + // Make sure that ALL signers accepted the block proposal + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n_2.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); } #[test] @@ -4351,7 +4369,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); @@ -4360,13 +4378,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4375,17 +4395,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }) .expect("Timed out waiting for block to be mined and processed"); - sender_nonce += 1; + // Ensure that the block was accepted globally so the stacks tip has advanced to N let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); @@ -4404,16 +4422,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .replace(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4433,6 +4454,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); let info_after = signer_test .stacks_client @@ -4440,13 +4462,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to get peer info"); assert_eq!(blocks_after, blocks_before); assert_eq!(info_after, info_before); - // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_ne!(block_n_1, block_n); assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -4458,23 +4481,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }, ) .unwrap(); + info!( "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" ); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4491,15 +4510,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -4509,6 +4519,30 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n); + + // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n_1_prime.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); } #[test] From 804f6f3a719cf0c74a34e704d1118c060c7ec254 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Sep 2024 16:58:37 -0400 Subject: [PATCH 1173/1400] test: move the 2.5 and 3.0 activation heights earlier for this test This allows us to avoid hitting block 240, which is when the stackers get unstacked and the chain stalls, making `partial_tenure_fork` less flaky --- testnet/stacks-node/src/tests/signer/mod.rs | 19 ++++++++++++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 11 +++++++++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 0b38a79234..9532166492 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -748,9 +748,22 @@ fn setup_stx_btc_node ()>( info!("Make new BitcoinRegtestController"); let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - info!("Bootstraping..."); - // Should be 201 for other tests? - btc_regtest_controller.bootstrap_chain_to_pks(195, btc_miner_pubkeys); + let epoch_2_5_start = usize::try_from( + naka_conf + .burnchain + .epochs + .as_ref() + .unwrap() + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height, + ) + .expect("Failed to get epoch 2.5 start height"); + let bootstrap_block = epoch_2_5_start - 6; + + info!("Bootstraping to block {bootstrap_block}..."); + btc_regtest_controller.bootstrap_chain_to_pks(bootstrap_block, btc_miner_pubkeys); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8d8ff07ac0..27f48b6917 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3610,6 +3610,17 @@ fn partial_tenure_fork() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + // Move epoch 2.5 and 3.0 earlier, so we have more time for the + // test before re-stacking is required. + if let Some(epochs) = config.burnchain.epochs.as_mut() { + epochs[6].end_height = 121; + epochs[7].start_height = 121; + epochs[7].end_height = 151; + epochs[8].start_height = 151; + } else { + panic!("Expected epochs to be set"); + } }, Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), None, From 6ed7d4edd9bdc5c60522e4bfcc512d1a280c1734 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Sep 2024 22:32:36 -0400 Subject: [PATCH 1174/1400] test: reduce flakiness in `partial_tenure_fork` integration test --- testnet/stacks-node/src/tests/signer/v0.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 27f48b6917..034daa9e2d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3567,7 +3567,7 @@ fn partial_tenure_fork() { } let num_signers = 5; - let max_nakamoto_tenures = 20; + let max_nakamoto_tenures = 30; let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer @@ -3611,13 +3611,18 @@ fn partial_tenure_fork() { config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + // Increase the reward cycle length to avoid missing a prepare phase + // while we are intentionally forking. + config.burnchain.pox_reward_length = Some(40); + config.burnchain.pox_prepare_length = Some(10); + // Move epoch 2.5 and 3.0 earlier, so we have more time for the // test before re-stacking is required. if let Some(epochs) = config.burnchain.epochs.as_mut() { - epochs[6].end_height = 121; - epochs[7].start_height = 121; - epochs[7].end_height = 151; - epochs[8].start_height = 151; + epochs[6].end_height = 131; + epochs[7].start_height = 131; + epochs[7].end_height = 166; + epochs[8].start_height = 166; } else { panic!("Expected epochs to be set"); } @@ -3694,8 +3699,8 @@ fn partial_tenure_fork() { let mut min_miner_2_tenures = u64::MAX; let mut ignore_block = 0; - while !(miner_1_tenures >= min_miner_1_tenures && miner_2_tenures >= min_miner_2_tenures) { - if btc_blocks_mined > max_nakamoto_tenures { + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3851,7 +3856,7 @@ fn partial_tenure_fork() { Err(e) => { if e.to_string().contains("TooMuchChaining") { info!("TooMuchChaining error, skipping block"); - continue; + break; } else { panic!("Failed to submit tx: {}", e); } From 054033c0b6aba27b5caaa4ab849b44e2dce2ad64 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 11:11:32 -0700 Subject: [PATCH 1175/1400] Empty sortition needed a longer wait time before ignoring a block and should wait for a block more definitively Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 48 +++++++--------------- 1 file changed, 15 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f14d962404..2c5c8484ca 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -231,27 +231,20 @@ impl SignerTest { Some(self.num_stacking_cycles), ); info!("Waiting for signer set calculation."); - let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(60); - let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event let reward_cycle = self.get_current_reward_cycle() + 1; - while !reward_set_calculated { - let reward_set = self + wait_for(30, || { + Ok(self .stacks_client .get_reward_set_signers(reward_cycle) - .expect("Failed to check if reward set is calculated"); - reward_set_calculated = reward_set.is_some(); - if reward_set_calculated { - debug!("Signer set: {:?}", reward_set.unwrap()); - } - std::thread::sleep(Duration::from_secs(1)); - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for reward set calculation" - ); - } + .expect("Failed to check if reward set is calculated") + .map(|reward_set| { + debug!("Signer set: {:?}", reward_set); + }) + .is_some()) + }) + .expect("Timed out waiting for reward set calculation"); info!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state @@ -264,8 +257,7 @@ impl SignerTest { info!("Signers initialized"); self.run_until_epoch_3_boundary(); - std::thread::sleep(Duration::from_secs(1)); - wait_for(60, || { + wait_for(30, || { Ok(get_chain_info_opt(&self.running_nodes.conf).is_some()) }) .expect("Timed out waiting for network to restart after 3.0 boundary reached"); @@ -275,11 +267,11 @@ impl SignerTest { // could be other miners mining blocks. let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; info!("Waiting for first Nakamoto block: {}", height_before + 1); - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let height = get_chain_info(&self.running_nodes.conf).stacks_tip_height; - Ok(height > height_before) + self.mine_nakamoto_block(Duration::from_secs(30)); + wait_for(30, || { + Ok(get_chain_info(&self.running_nodes.conf).stacks_tip_height > height_before) }) - .unwrap(); + .expect("Timed out waiting for first Nakamoto block after 3.0 boundary"); info!("Ready to mine Nakamoto blocks!"); } @@ -553,19 +545,9 @@ fn miner_gather_signatures() { let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); - // give the system a chance to reach the Nakamoto start tip - // mine a Nakamoto block - wait_for(30, || { - let blocks_mined = mined_blocks.load(Ordering::SeqCst); - Ok(blocks_mined > blocks_mined_before) - }) - .unwrap(); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); @@ -2317,7 +2299,7 @@ fn empty_sortition() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let block_proposal_timeout = Duration::from_secs(5); + let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], From 72a56cfa60d2522e9dc3e8da78100f8b2101a605 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 11:11:32 -0700 Subject: [PATCH 1176/1400] Empty sortition needed a longer wait time before ignoring a block and should wait for a block more definitively Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 48 +++++++--------------- 1 file changed, 15 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 034daa9e2d..12302e5e6b 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -231,27 +231,20 @@ impl SignerTest { Some(self.num_stacking_cycles), ); info!("Waiting for signer set calculation."); - let mut reward_set_calculated = false; - let short_timeout = Duration::from_secs(60); - let now = std::time::Instant::now(); // Make sure the signer set is calculated before continuing or signers may not // recognize that they are registered signers in the subsequent burn block event let reward_cycle = self.get_current_reward_cycle() + 1; - while !reward_set_calculated { - let reward_set = self + wait_for(30, || { + Ok(self .stacks_client .get_reward_set_signers(reward_cycle) - .expect("Failed to check if reward set is calculated"); - reward_set_calculated = reward_set.is_some(); - if reward_set_calculated { - debug!("Signer set: {:?}", reward_set.unwrap()); - } - std::thread::sleep(Duration::from_secs(1)); - assert!( - now.elapsed() < short_timeout, - "Timed out waiting for reward set calculation" - ); - } + .expect("Failed to check if reward set is calculated") + .map(|reward_set| { + debug!("Signer set: {:?}", reward_set); + }) + .is_some()) + }) + .expect("Timed out waiting for reward set calculation"); info!("Signer set calculated"); // Manually consume one more block to ensure signers refresh their state @@ -264,8 +257,7 @@ impl SignerTest { info!("Signers initialized"); self.run_until_epoch_3_boundary(); - std::thread::sleep(Duration::from_secs(1)); - wait_for(60, || { + wait_for(30, || { Ok(get_chain_info_opt(&self.running_nodes.conf).is_some()) }) .expect("Timed out waiting for network to restart after 3.0 boundary reached"); @@ -275,11 +267,11 @@ impl SignerTest { // could be other miners mining blocks. let height_before = get_chain_info(&self.running_nodes.conf).stacks_tip_height; info!("Waiting for first Nakamoto block: {}", height_before + 1); - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let height = get_chain_info(&self.running_nodes.conf).stacks_tip_height; - Ok(height > height_before) + self.mine_nakamoto_block(Duration::from_secs(30)); + wait_for(30, || { + Ok(get_chain_info(&self.running_nodes.conf).stacks_tip_height > height_before) }) - .unwrap(); + .expect("Timed out waiting for first Nakamoto block after 3.0 boundary"); info!("Ready to mine Nakamoto blocks!"); } @@ -553,19 +545,9 @@ fn miner_gather_signatures() { let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let timeout = Duration::from_secs(30); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_mined_before = mined_blocks.load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); - // give the system a chance to reach the Nakamoto start tip - // mine a Nakamoto block - wait_for(30, || { - let blocks_mined = mined_blocks.load(Ordering::SeqCst); - Ok(blocks_mined > blocks_mined_before) - }) - .unwrap(); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); @@ -2319,7 +2301,7 @@ fn empty_sortition() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let block_proposal_timeout = Duration::from_secs(5); + let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr.clone(), send_amt + send_fee)], From 63fa7fa2b1fce294897bcd20ef2be5d28a6881eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 11:42:41 -0700 Subject: [PATCH 1177/1400] Fix locally_rejected_blocks_overriden_by_global_acceptance Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 33 ++++++++++++++++------ 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 12302e5e6b..4238f1c615 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4164,21 +4164,36 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + // Make sure that ALL signers accepted the block proposal + wait_for(short_timeout, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + if hash == block_n.signer_signature_hash { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted let rejecting_signers: Vec<_> = signer_test From 0f8c4b83c31133f565329a4037d05f1bedcea663 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 12:36:04 -0700 Subject: [PATCH 1178/1400] Cleanup locally and globally rejected tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 81 +++++- testnet/stacks-node/src/tests/signer/v0.rs | 300 +++++++------------- 2 files changed, 188 insertions(+), 193 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 9532166492..a0c8041401 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -37,6 +37,7 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; +use libsigner::v0::messages::{BlockResponse, RejectCode, SignerMessage}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -46,7 +47,9 @@ use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; -use stacks::types::chainstate::StacksAddress; +use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; +use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -678,6 +681,82 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { + // Make sure that ALL signers accepted the block proposal + wait_for(timeout_secs, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(( + hash, + signature, + ))) => { + if hash == *signer_signature_hash + && expected_signers.iter().any(|pk| { + pk.verify(hash.bits(), &signature) + .expect("Failed to verify signature") + }) + { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == expected_signers.len()) + }) + } + + pub fn wait_for_block_rejections( + &self, + timeout_secs: u64, + expected_signers: &[StacksPublicKey], + reject_code: Option, + ) -> Result<(), String> { + wait_for(timeout_secs, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + if expected_signers.contains(&rejected_pubkey) { + if let Some(reject_code) = reject_code.as_ref() { + if reject_code != &rejection.reason_code { + return None; + } + } + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(block_rejections.len() == expected_signers.len()) + }) + } } fn setup_stx_btc_node ()>( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4238f1c615..83b1dd3df7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3956,29 +3956,36 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(20); signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N to be mined"); sender_nonce += 1; let info_after = signer_test.stacks_client.get_peer_info().unwrap(); assert_eq!( @@ -3988,15 +3995,13 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + signer_test + .wait_for_block_acceptance(30, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected - let rejecting_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .take(num_signers / 2) - .collect(); + let rejecting_signers: Vec<_> = all_signers.iter().cloned().take(num_signers / 2).collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() @@ -4006,42 +4011,12 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} to mine block N+1"); - let start_time = Instant::now(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - loop { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - if rejecting_signers.contains(&rejected_pubkey) - && rejection.reason_code == RejectCode::TestingDirective - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + signer_test + .wait_for_block_rejections(60, &rejecting_signers, Some(RejectCode::TestingDirective)) + .expect("Timed out waiting for block rejection of N+1"); assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); let info_after = signer_test.stacks_client.get_peer_info().unwrap(); @@ -4057,13 +4032,17 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(Vec::new()); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height + && test_observer::get_mined_nakamoto_blocks().last().unwrap() != block_n_1) + }) + .expect("Timed out waiting for stacks block N+1' to be mined"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4073,14 +4052,6 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info_before.stacks_tip_height + 1 ); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' - let start_time = Instant::now(); - while test_observer::get_mined_nakamoto_blocks().last().unwrap() == block_n_1 { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1_prime = nakamoto_blocks.last().unwrap(); assert_eq!( @@ -4088,6 +4059,10 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n_1); + // Verify that all signers accepted the new block proposal + signer_test + .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] @@ -4122,13 +4097,20 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let send_amt = 100; let send_fee = 180; let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = 60; let short_timeout = 30; signer_test.boot_to_epoch_3(); @@ -4148,11 +4130,12 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info!("Submitted tx {tx} in to mine block N"); wait_for(short_timeout, || { - let info_after = signer_test + Ok(signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) }) .expect("Timed out waiting for N to be mined and processed"); @@ -4171,35 +4154,15 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); // Make sure that ALL signers accepted the block proposal - wait_for(short_timeout, || { - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - if hash == block_n.signer_signature_hash { - Some(signature) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(signatures.len() == num_signers) - }) - .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted - let rejecting_signers: Vec<_> = signer_test - .signer_stacks_private_keys + let rejecting_signers: Vec<_> = all_signers .iter() - .map(StacksPublicKey::from_private) + .cloned() .take(num_signers * 3 / 10) .collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4220,42 +4183,24 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { sender_nonce += 1; info!("Submitted tx {tx} in to mine block N+1"); - wait_for(short_timeout, || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for block to be mined and processed"); - wait_for(long_timeout, || { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - if rejecting_signers.contains(&rejected_pubkey) - && rejection.reason_code == RejectCode::TestingDirective - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(block_rejections.len() == rejecting_signers.len()) + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block proposal rejections"); + .expect("Timed out waiting for stacks block N+1 to be mined"); + + signer_test + .wait_for_block_rejections( + short_timeout, + &rejecting_signers, + Some(RejectCode::TestingDirective), + ) + .expect("Timed out waiting for block rejection of N+1"); // Assert the block was mined let info_after = signer_test @@ -4267,15 +4212,6 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -4283,6 +4219,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); assert_ne!(block_n_1, block_n); + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_1.signer_signature_hash, + &all_signers[num_signers * 3 / 10 + 1..], + ) + .expect("Timed out waiting for block acceptance of N+1"); + info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); @@ -4297,14 +4241,16 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - wait_for(short_timeout, || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for stacks block N+2 to be mined"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4320,28 +4266,13 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { assert_ne!(block_n_2, block_n_1); // Make sure that ALL signers accepted the block proposal - wait_for(short_timeout, || { - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - if hash == block_n_2.signer_signature_hash { - Some(signature) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(signatures.len() == num_signers) - }) - .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_2.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+2"); } #[test] @@ -4381,6 +4312,11 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = 30; signer_test.boot_to_epoch_3(); @@ -4423,10 +4359,9 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected - let ignoring_signers: Vec<_> = signer_test - .signer_stacks_private_keys + let ignoring_signers: Vec<_> = all_signers .iter() - .map(StacksPublicKey::from_private) + .cloned() .take(num_signers * 7 / 10) .collect(); TEST_IGNORE_ALL_BLOCK_PROPOSALS @@ -4534,28 +4469,9 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { assert_ne!(block_n_1_prime, block_n); // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure - wait_for(short_timeout, || { - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - if hash == block_n_1_prime.signer_signature_hash { - Some(signature) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(signatures.len() == num_signers) - }) - .expect("FAIL: Timed out waiting for block proposal acceptance by ALL signers"); + signer_test + .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] From 5f54144c9ce4e5a926ba497355fdc150ee4f3b53 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 13:55:15 -0700 Subject: [PATCH 1179/1400] Do not assume every signer rejects for testing directive reasons as they may hit the threshold rejection first Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 6 ----- testnet/stacks-node/src/tests/signer/v0.rs | 26 +++++++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a0c8041401..a48677e0f6 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -724,7 +724,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest, ) -> Result<(), String> { wait_for(timeout_secs, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); @@ -740,11 +739,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], @@ -3976,7 +3977,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - wait_for(30, || { + wait_for(short_timeout_secs, || { Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before && signer_test .stacks_client @@ -3996,7 +3997,11 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); signer_test - .wait_for_block_acceptance(30, &block_n.signer_signature_hash, &all_signers) + .wait_for_block_acceptance( + short_timeout_secs, + &block_n.signer_signature_hash, + &all_signers, + ) .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); @@ -4014,8 +4019,9 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + // We cannot gaurantee that ALL signers will reject due to the testing directive as we may hit majority first..So ensure that we only assert that up to the threshold number rejected signer_test - .wait_for_block_rejections(60, &rejecting_signers, Some(RejectCode::TestingDirective)) + .wait_for_block_rejections(short_timeout_secs, &rejecting_signers) .expect("Timed out waiting for block rejection of N+1"); assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); @@ -4032,7 +4038,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(Vec::new()); - wait_for(30, || { + wait_for(short_timeout_secs, || { Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before && signer_test .stacks_client @@ -4061,7 +4067,11 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { assert_ne!(block_n_1_prime, block_n_1); // Verify that all signers accepted the new block proposal signer_test - .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .wait_for_block_acceptance( + short_timeout_secs, + &block_n_1_prime.signer_signature_hash, + &all_signers, + ) .expect("Timed out waiting for block acceptance of N+1'"); } @@ -4195,11 +4205,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .expect("Timed out waiting for stacks block N+1 to be mined"); signer_test - .wait_for_block_rejections( - short_timeout, - &rejecting_signers, - Some(RejectCode::TestingDirective), - ) + .wait_for_block_rejections(short_timeout, &rejecting_signers) .expect("Timed out waiting for block rejection of N+1"); // Assert the block was mined From 550cd52156dd62919b8503cb9a19ed25004630ea Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 20 Sep 2024 17:26:15 -0400 Subject: [PATCH 1180/1400] test: resolve issues with `bitcoin_reorg_flap_with_follower` --- .../src/tests/neon_integrations.rs | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 8043c2032e..ae661c5f11 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12398,6 +12398,10 @@ fn bitcoin_reorg_flap() { channel.stop_chains_coordinator(); } +/// Advance the bitcoin chain and wait for the miner and any followers to +/// process the next block. +/// NOTE: This only works if the followers are mock-mining, or else the counter +/// will not be updated. fn next_block_and_wait_all( btc_controller: &mut BitcoinRegtestController, miner_blocks_processed: &Arc, @@ -12447,7 +12451,7 @@ fn bitcoin_reorg_flap_with_follower() { } let (conf, _miner_account) = neon_integration_test_conf(); - let timeout = None; + let timeout = Some(Duration::from_secs(60)); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -12461,10 +12465,12 @@ fn bitcoin_reorg_flap_with_follower() { eprintln!("Chain bootstrapped..."); let mut miner_run_loop = neon::RunLoop::new(conf.clone()); + let run_loop_stopper = miner_run_loop.get_termination_switch(); let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc(); let miner_channel = miner_run_loop.get_coordinator_channel().unwrap(); let mut follower_conf = conf.clone(); + follower_conf.node.mock_mining = true; follower_conf.events_observers.clear(); follower_conf.node.working_dir = format!("{}-follower", &conf.node.working_dir); follower_conf.node.seed = vec![0x01; 32]; @@ -12483,7 +12489,7 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); - thread::spawn(move || miner_run_loop.start(None, 0)); + let run_loop_thread = thread::spawn(move || miner_run_loop.start(None, 0)); wait_for_runloop(&miner_blocks_processed); // figure out the started node's port @@ -12499,23 +12505,20 @@ fn bitcoin_reorg_flap_with_follower() { ); let mut follower_run_loop = neon::RunLoop::new(follower_conf.clone()); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc(); let follower_channel = follower_run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || follower_run_loop.start(None, 0)); + let follower_thread = thread::spawn(move || follower_run_loop.start(None, 0)); wait_for_runloop(&follower_blocks_processed); eprintln!("Follower bootup complete!"); // first block wakes up the run loop - next_block_and_wait_all( - &mut btc_regtest_controller, - &miner_blocks_processed, - &[], - timeout, - ); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &miner_blocks_processed, 60); - // first block will hold our VRF registration + // next block will hold our VRF registration + // Note that the follower will not see its block processed counter bumped here next_block_and_wait_all( &mut btc_regtest_controller, &miner_blocks_processed, @@ -12609,9 +12612,11 @@ fn bitcoin_reorg_flap_with_follower() { assert_eq!(miner_channel.get_sortitions_processed(), 225); assert_eq!(follower_channel.get_sortitions_processed(), 225); - btcd_controller.stop_bitcoind().unwrap(); - miner_channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); } /// Tests the following: From dc28e8b620ac7a79144c9310e301e4006ca3c084 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:05:56 -0400 Subject: [PATCH 1181/1400] feat: mine a single block tenure off of a designated chain tip --- .../chainstate/nakamoto/coordinator/tests.rs | 62 +++++++++++++++++-- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 6a2a484790..e56e55754c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -538,6 +538,36 @@ impl<'a> TestPeer<'a> { miner_setup: F, after_block: G, ) -> NakamotoBlock + where + F: FnMut(&mut NakamotoBlockBuilder), + G: FnMut(&mut NakamotoBlock) -> bool, + { + let nakamoto_tip = { + let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + self.mine_single_block_tenure_at_tip( + &nakamoto_tip.index_block_hash(), + sender_key, + tenure_change_tx, + coinbase_tx, + miner_setup, + after_block, + ) + } + + pub fn mine_single_block_tenure_at_tip( + &mut self, + nakamoto_tip: &StacksBlockId, + sender_key: &StacksPrivateKey, + tenure_change_tx: &StacksTransaction, + coinbase_tx: &StacksTransaction, + miner_setup: F, + after_block: G, + ) -> NakamotoBlock where F: FnMut(&mut NakamotoBlockBuilder), G: FnMut(&mut NakamotoBlock) -> bool, @@ -547,6 +577,8 @@ impl<'a> TestPeer<'a> { let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let sender_acct = self.get_account(nakamoto_tip, &sender_addr.to_account_principal()); + // do a stx transfer in each block to a given recipient let mut blocks_and_sizes = self.make_nakamoto_tenure_and( tenure_change_tx.clone(), @@ -555,12 +587,11 @@ impl<'a> TestPeer<'a> { miner_setup, |_miner, chainstate, sortdb, blocks_so_far| { if blocks_so_far.len() < 1 { - let account = get_account(chainstate, sortdb, &sender_addr); let stx_transfer = make_token_transfer( chainstate, sortdb, &sender_key, - account.nonce, + sender_acct.nonce, 100, 1, &recipient_addr, @@ -607,12 +638,33 @@ impl<'a> TestPeer<'a> { tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + let nakamoto_tip = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + nakamoto_parent_tenure.last().as_ref().unwrap().block_id() + } else { + let tip = { + let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + tip.index_block_hash() + }; + + let miner_addr = self.miner.origin_address().unwrap(); + let miner_acct = self.get_account(&nakamoto_tip, &miner_addr.to_account_principal()); + let tenure_change_tx = self .miner - .make_nakamoto_tenure_change(tenure_change.clone()); - let coinbase_tx = self.miner.make_nakamoto_coinbase(None, vrf_proof); + .make_nakamoto_tenure_change_with_nonce(tenure_change.clone(), miner_acct.nonce); + + let coinbase_tx = + self.miner + .make_nakamoto_coinbase_with_nonce(None, vrf_proof, miner_acct.nonce + 1); - let block = self.mine_single_block_tenure( + let block = self.mine_single_block_tenure_at_tip( + &nakamoto_tip, sender_key, &tenure_change_tx, &coinbase_tx, From 639ab057ebeb4843332d90da12698440c75bee1f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:06:27 -0400 Subject: [PATCH 1182/1400] feat: get the parent ID of a nakamoto block --- stackslib/src/chainstate/nakamoto/mod.rs | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 756212ee54..9e30e4bcb3 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -110,8 +110,8 @@ use crate::net::Error as net_error; use crate::util_lib::boot; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{ - query_int, query_row, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql, - DBConn, Error as DBError, FromRow, + query_int, query_row, query_row_columns, query_row_panic, query_rows, sqlite_open, + tx_begin_immediate, u64_to_sql, DBConn, Error as DBError, FromRow, }; use crate::{chainstate, monitoring}; @@ -2479,6 +2479,26 @@ impl NakamotoChainState { Ok(None) } + /// Load the parent block ID of a Nakamoto block + pub fn get_nakamoto_parent_block_id( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = "SELECT parent_block_id FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let mut result = query_row_columns( + chainstate_conn, + sql, + &[&index_block_hash], + "parent_block_id", + )?; + if result.len() > 1 { + // even though `(consensus_hash,block_hash)` is the primary key, these are hashed to + // produce `index_block_hash`. So, `index_block_hash` is also unique w.h.p. + unreachable!("FATAL: multiple instances of index_block_hash"); + } + Ok(result.pop()) + } + /// Load a Nakamoto header pub fn get_block_header_nakamoto( chainstate_conn: &Connection, From 47171ce51fa0574351d5105fd68558fe5a059415 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:06:44 -0400 Subject: [PATCH 1183/1400] feat: allow the TestPeer user to specify a nakamoto tenure to build off of when mining --- .../src/chainstate/nakamoto/tests/node.rs | 167 ++++++++++-------- 1 file changed, 97 insertions(+), 70 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index aa00430f89..d23d608ec7 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -214,6 +214,15 @@ impl TestMiner { &mut self, recipient: Option, vrf_proof: VRFProof, + ) -> StacksTransaction { + self.make_nakamoto_coinbase_with_nonce(recipient, vrf_proof, self.nonce) + } + + pub fn make_nakamoto_coinbase_with_nonce( + &mut self, + recipient: Option, + vrf_proof: VRFProof, + nonce: u64, ) -> StacksTransaction { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, @@ -226,7 +235,7 @@ impl TestMiner { ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; - tx_coinbase.auth.set_origin_nonce(self.nonce); + tx_coinbase.auth.set_origin_nonce(nonce); let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); self.sign_as_origin(&mut tx_signer); @@ -237,6 +246,14 @@ impl TestMiner { pub fn make_nakamoto_tenure_change( &mut self, tenure_change: TenureChangePayload, + ) -> StacksTransaction { + self.make_nakamoto_tenure_change_with_nonce(tenure_change, self.nonce) + } + + pub fn make_nakamoto_tenure_change_with_nonce( + &mut self, + tenure_change: TenureChangePayload, + nonce: u64, ) -> StacksTransaction { let mut tx_tenure_change = StacksTransaction::new( TransactionVersion::Testnet, @@ -245,7 +262,7 @@ impl TestMiner { ); tx_tenure_change.chain_id = 0x80000000; tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; - tx_tenure_change.auth.set_origin_nonce(self.nonce); + tx_tenure_change.auth.set_origin_nonce(nonce); let mut tx_signer = StacksTransactionSigner::new(&tx_tenure_change); self.sign_as_origin(&mut tx_signer); @@ -504,38 +521,50 @@ impl TestStacksNode { }; // the tenure-change contains a pointer to the end of the last tenure, which is currently - // the canonical tip - let (previous_tenure_end, previous_tenure_consensus_hash, previous_tenure_blocks) = { - let hdr = NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - if hdr.anchored_header.as_stacks_nakamoto().is_some() { - // building atop nakamoto - let tenure_len = NakamotoChainState::get_nakamoto_tenure_length( - self.chainstate.db(), - &hdr.index_block_hash(), - ) - .unwrap(); - debug!( - "Tenure length of Nakamoto tenure {} is {}; tipped at {}", - &hdr.consensus_hash, - tenure_len, - &hdr.index_block_hash() - ); - (hdr.index_block_hash(), hdr.consensus_hash, tenure_len) - } else { - // building atop epoch2 - debug!( - "Tenure length of epoch2 tenure {} is {}; tipped at {}", - &parent_block_snapshot.consensus_hash, 1, &last_tenure_id - ); + // the canonical tip unless overridden + let (previous_tenure_end, previous_tenure_consensus_hash, previous_tenure_blocks) = + if let Some(nakamoto_parent_tenure) = parent_nakamoto_tenure.as_ref() { + let start_block = nakamoto_parent_tenure.first().clone().unwrap(); + let end_block = nakamoto_parent_tenure.last().clone().unwrap(); + let tenure_len = + end_block.header.chain_length + 1 - start_block.header.chain_length; ( - last_tenure_id, - parent_block_snapshot.consensus_hash.clone(), - 1, + end_block.block_id(), + end_block.header.consensus_hash, + tenure_len as u32, ) - } - }; + } else { + let hdr = + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + if hdr.anchored_header.as_stacks_nakamoto().is_some() { + // building atop nakamoto + let tenure_len = NakamotoChainState::get_nakamoto_tenure_length( + self.chainstate.db(), + &hdr.index_block_hash(), + ) + .unwrap(); + debug!( + "Tenure length of Nakamoto tenure {} is {}; tipped at {}", + &hdr.consensus_hash, + tenure_len, + &hdr.index_block_hash() + ); + (hdr.index_block_hash(), hdr.consensus_hash, tenure_len) + } else { + // building atop epoch2 + debug!( + "Tenure length of epoch2 tenure {} is {}; tipped at {}", + &parent_block_snapshot.consensus_hash, 1, &last_tenure_id + ); + ( + last_tenure_id, + parent_block_snapshot.consensus_hash.clone(), + 1, + ) + } + }; let tenure_change_payload = TenureChangePayload { tenure_consensus_hash: ConsensusHash([0x00; 20]), // will be overwritten @@ -576,7 +605,7 @@ impl TestStacksNode { /// * the block /// * its size /// * its execution cost - /// * a list of malleablized blocks with the same sighash + /// * a list of malleablized blocks with the same contents, if desired pub fn make_nakamoto_tenure_blocks<'a, S, F, G>( chainstate: &mut StacksChainState, sortdb: &mut SortitionDB, @@ -597,6 +626,8 @@ impl TestStacksNode { mut miner_setup: S, mut block_builder: F, mut after_block: G, + malleablize: bool, + mined_canonical: bool, ) -> Vec<(NakamotoBlock, u64, ExecutionCost, Vec)> where S: FnMut(&mut NakamotoBlockBuilder), @@ -829,8 +860,9 @@ impl TestStacksNode { coord.handle_new_nakamoto_stacks_block().unwrap(); processed_blocks.push(block_to_store.clone()); - if block_to_store.block_id() == block_id { - // confirm that the chain tip advanced + if block_to_store.block_id() == block_id && mined_canonical { + // confirm that the chain tip advanced -- we intended to mine on the + // canonical tip let stacks_chain_tip = NakamotoChainState::get_canonical_block_header( chainstate.db(), &sortdb, @@ -858,6 +890,11 @@ impl TestStacksNode { } } + if !malleablize { + debug!("Will not produce malleablized blocks"); + break; + } + let num_sigs = block_to_store.header.signer_signature.len(); // force this block to have a different sighash, in addition to different @@ -977,7 +1014,6 @@ impl<'a> TestPeer<'a> { StacksBlockId, Option, Option>, - Option, ) { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { @@ -987,43 +1023,22 @@ impl<'a> TestPeer<'a> { let first_parent = parent_blocks.first().unwrap(); debug!("First parent is {:?}", first_parent); - let first_parent_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &first_parent.header.consensus_hash, - ) - .unwrap() - .unwrap(); - - assert!(first_parent_sn.sortition); - - let parent_sortition_id = SortitionDB::get_block_commit_parent_sortition_id( - sortdb.conn(), - &first_parent_sn.winning_block_txid, - &first_parent_sn.sortition_id, - ) - .unwrap() - .unwrap(); - let parent_sortition = - SortitionDB::get_block_snapshot(sortdb.conn(), &parent_sortition_id) - .unwrap() - .unwrap(); - - debug!( - "First parent Nakamoto block sortition: {:?}", - &parent_sortition + // sanity check -- this parent must correspond to a sortition + assert!( + SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &first_parent.header.consensus_hash, + ) + .unwrap() + .unwrap() + .sortition ); - let parent_sortition_opt = Some(parent_sortition); let last_tenure_id = StacksBlockId::new( &first_parent.header.consensus_hash, &first_parent.header.block_hash(), ); - ( - last_tenure_id, - None, - Some(parent_blocks), - parent_sortition_opt, - ) + (last_tenure_id, None, Some(parent_blocks)) } else { // parent may be an epoch 2.x block let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = @@ -1059,7 +1074,7 @@ impl<'a> TestPeer<'a> { // must be a genesis block (testing only!) StacksBlockId(BOOT_BLOCK_HASH.0.clone()) }; - (last_tenure_id, parent_opt, None, parent_sortition_opt) + (last_tenure_id, parent_opt, None) } } @@ -1080,8 +1095,16 @@ impl<'a> TestPeer<'a> { let mut burn_block = TestBurnchainBlock::new(&tip, 0); let mut stacks_node = self.stacks_node.take().unwrap(); - let (last_tenure_id, parent_block_opt, parent_tenure_opt, parent_sortition_opt) = - Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); + let (last_tenure_id, parent_block_opt, parent_tenure_opt) = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + ( + nakamoto_parent_tenure.first().as_ref().unwrap().block_id(), + None, + Some(nakamoto_parent_tenure.clone()), + ) + } else { + Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) + }; // find the VRF leader key register tx to use. // it's the one pointed to by the parent tenure @@ -1345,6 +1368,8 @@ impl<'a> TestPeer<'a> { miner_setup, block_builder, after_block, + self.mine_malleablized_blocks, + self.nakamoto_parent_tenure_opt.is_none(), ); let just_blocks = blocks @@ -1435,6 +1460,8 @@ impl<'a> TestPeer<'a> { |_| {}, block_builder, |_| true, + self.mine_malleablized_blocks, + self.nakamoto_parent_tenure_opt.is_none(), ); let just_blocks = blocks From 2d21bffb83c8d1ddf07daf03d11cd54ba0fa171d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:07:05 -0400 Subject: [PATCH 1184/1400] chore: expect() --> unwrap_or_else() --- .../src/chainstate/stacks/db/accounts.rs | 52 ++++++++++++++++--- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 58ffdaeb60..105d3ed516 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -267,14 +267,26 @@ impl StacksChainState { }) }) .map_err(Error::ClarityError) - .unwrap() + .unwrap_or_else(|e| { + error!( + "FATAL: Failed to query account for {:?}: {:?}", + principal, &e + ); + panic!(); + }) } pub fn get_nonce(clarity_tx: &mut T, principal: &PrincipalData) -> u64 { clarity_tx .with_clarity_db_readonly(|ref mut db| db.get_account_nonce(principal)) .map_err(|x| Error::ClarityError(x.into())) - .unwrap() + .unwrap_or_else(|e| { + error!( + "FATAL: Failed to query account nonce for {:?}: {:?}", + principal, &e + ); + panic!(); + }) } pub fn get_account_ft( @@ -337,7 +349,13 @@ impl StacksChainState { snapshot.save()?; Ok(()) }) - .expect("FATAL: failed to debit account") + .unwrap_or_else(|e| { + error!( + "FATAL: failed to debit account {:?} for {} uSTX: {:?}", + principal, amount, &e + ); + panic!(); + }) } /// Called each time a transaction sends STX to this principal. @@ -358,7 +376,13 @@ impl StacksChainState { info!("{} credited: {} uSTX", principal, new_balance); Ok(()) }) - .expect("FATAL: failed to credit account") + .unwrap_or_else(|e| { + error!( + "FATAL: failed to credit account {:?} for {} uSTX: {:?}", + principal, amount, &e + ); + panic!(); + }) } /// Called during the genesis / boot sequence. @@ -374,7 +398,13 @@ impl StacksChainState { snapshot.save()?; Ok(()) }) - .expect("FATAL: failed to credit account") + .unwrap_or_else(|e| { + error!( + "FATAL: failed to credit genesis account {:?} for {} uSTX: {:?}", + principal, amount, &e + ); + panic!(); + }) } /// Increment an account's nonce @@ -385,11 +415,19 @@ impl StacksChainState { ) { clarity_tx .with_clarity_db(|ref mut db| { - let next_nonce = cur_nonce.checked_add(1).expect("OUT OF NONCES"); + let next_nonce = cur_nonce + .checked_add(1) + .unwrap_or_else(|| panic!("OUT OF NONCES")); db.set_account_nonce(&principal, next_nonce)?; Ok(()) }) - .expect("FATAL: failed to set account nonce") + .unwrap_or_else(|e| { + error!( + "FATAL: failed to update account nonce for account {:?} from {}: {:?}", + principal, cur_nonce, &e + ); + panic!(); + }) } /// Schedule a miner payment in the future. From cde0547c57f5620ec41029e3195a0d8e699115fb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:07:35 -0400 Subject: [PATCH 1185/1400] feat: address #5044 by tracking loaded tenure data by stacks tip, and making it so that a descendant of a stacks tip can inherit its cached data --- stackslib/src/net/inv/nakamoto.rs | 157 +++++++++++++++++++++++++----- 1 file changed, 132 insertions(+), 25 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index f24ad1a87c..9be77949b1 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -35,12 +35,13 @@ use crate::net::{ }; use crate::util_lib::db::Error as DBError; +const TIP_ANCESTOR_SEARCH_DEPTH: u64 = 10; + /// Cached data for a sortition in the sortition DB. /// Caching this allows us to avoid calls to `SortitionDB::get_block_snapshot_consensus()`. #[derive(Clone, Debug, PartialEq)] pub(crate) struct InvSortitionInfo { parent_consensus_hash: ConsensusHash, - block_height: u64, } impl InvSortitionInfo { @@ -57,7 +58,6 @@ impl InvSortitionInfo { Ok(Self { parent_consensus_hash: parent_sn.consensus_hash, - block_height: sn.block_height, }) } } @@ -105,8 +105,14 @@ impl InvTenureInfo { /// in sync. By caching (immutable) tenure data in this struct, we can enusre that this happens /// all the time except for during node bootup. pub struct InvGenerator { - processed_tenures: HashMap>, + /// Map stacks tips to a table of (tenure ID, optional tenure info) + processed_tenures: HashMap>>, + /// Map consensus hashes to sortition data about them sortitions: HashMap, + /// how far back to search for ancestor Stacks blocks when processing a new tip + tip_ancestor_search_depth: u64, + /// count cache misses for `processed_tenures` + cache_misses: u128, } impl InvGenerator { @@ -114,24 +120,134 @@ impl InvGenerator { Self { processed_tenures: HashMap::new(), sortitions: HashMap::new(), + tip_ancestor_search_depth: TIP_ANCESTOR_SEARCH_DEPTH, + cache_misses: 0, + } + } + + pub fn with_tip_ancestor_search_depth(mut self, depth: u64) -> Self { + self.tip_ancestor_search_depth = depth; + self + } + + #[cfg(test)] + pub(crate) fn cache_misses(&self) -> u128 { + self.cache_misses + } + + /// Find the highest ancestor of `tip_block_id` that has an entry in `processed_tenures`. + /// Search up to `self.tip_ancestor_search_depth` ancestors back. + /// + /// The intuition here is that `tip_block_id` is the highest block known to the node, and it + /// can advance when new blocks are processed. We associate a set of cached processed tenures with + /// each tip, but if the tip advances, we simply move the cached processed tenures "up to" the + /// new tip instead of reloading them from disk each time. + /// + /// However, searching for an ancestor tip incurs a sqlite DB read, so we want to bound the + /// search depth. In practice, the bound on this depth would be derived from how often the + /// chain tip changes relative to how often we serve up inventory data. The depth should be + /// the maximum expected number of blocks to be processed in-between handling `GetNakamotoInv` + /// messages. + /// + /// If found, then return the ancestor block ID represented in `self.processed_tenures`. + /// If not, then reutrn None. + pub(crate) fn find_ancestor_processed_tenures( + &self, + chainstate: &StacksChainState, + tip_block_id: &StacksBlockId, + ) -> Result, NetError> { + let mut cursor = tip_block_id.clone(); + for _ in 0..self.tip_ancestor_search_depth { + let parent_id_opt = + NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor)?; + let Some(parent_id) = parent_id_opt else { + return Ok(None); + }; + if self.processed_tenures.contains_key(&parent_id) { + return Ok(Some(parent_id)); + } + cursor = parent_id; } + Ok(None) } - /// Get a processed tenure. If it's not cached, then load it. - /// Returns Some(..) if there existed a tenure-change tx for this given consensus hash - fn get_processed_tenure( + /// Get a processed tenure. If it's not cached, then load it from disk. + /// + /// Loading it is expensive, so once loaded, store it with the cached processed tenure map + /// associated with `tip_block_id`. + /// + /// If there is no such map, then see if a recent ancestor of `tip_block_id` is represented. If + /// so, then remove that map and associate it with `tip_block_id`. This way, as the blockchain + /// advances, cached tenure information for the same Stacks fork stays associated with that + /// fork's chain tip (assuming this code gets run sufficiently often relative to the + /// advancement of the `tip_block_id` tip value). + /// + /// Returns Ok(Some(..)) if there existed a tenure-change tx for this given consensus hash + /// Returns Ok(None) if not + /// Returns Err(..) on DB error + pub(crate) fn get_processed_tenure( &mut self, chainstate: &StacksChainState, tip_block_id: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { - // TODO: MARF-aware cache - // not cached so go load it - let loaded_info_opt = - InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; - self.processed_tenures - .insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); - Ok(loaded_info_opt) + if self.processed_tenures.get(tip_block_id).is_none() { + // this tip has no known table. + // does it have an ancestor with a table? If so, then move its ancestor's table to this + // tip. Otherwise, make a new table. + if let Some(ancestor_tip_id) = + self.find_ancestor_processed_tenures(chainstate, tip_block_id)? + { + let ancestor_tenures = self + .processed_tenures + .remove(&ancestor_tip_id) + .unwrap_or_else(|| { + panic!("FATAL: did not have ancestor tip reported by search"); + }); + + self.processed_tenures + .insert(tip_block_id.clone(), ancestor_tenures); + } else { + self.processed_tenures + .insert(tip_block_id.clone(), HashMap::new()); + } + } + + let Some(tenure_infos) = self.processed_tenures.get_mut(tip_block_id) else { + unreachable!("FATAL: inserted table for chain tip, but didn't get it back"); + }; + + // this tip has a known table + if let Some(loaded_tenure_info) = tenure_infos.get_mut(tenure_id_consensus_hash) { + // we've loaded this tenure info before for this tip + return Ok(loaded_tenure_info.clone()); + } else { + // we have not loaded the tenure info for this tip, so go get it + let loaded_info_opt = + InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; + tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); + + self.cache_misses = self.cache_misses.saturating_add(1); + return Ok(loaded_info_opt); + } + } + + /// Get sortition info, loading it from our cache if needed + pub(crate) fn get_sortition_info( + &mut self, + sortdb: &SortitionDB, + cur_consensus_hash: &ConsensusHash, + ) -> Result<&InvSortitionInfo, NetError> { + if !self.sortitions.contains_key(cur_consensus_hash) { + let loaded_info = InvSortitionInfo::load(sortdb, cur_consensus_hash)?; + self.sortitions + .insert(cur_consensus_hash.clone(), loaded_info); + }; + + Ok(self + .sortitions + .get(cur_consensus_hash) + .expect("infallible: just inserted this data")) } /// Generate an block inventory bit vector for a reward cycle. @@ -210,19 +326,10 @@ impl InvGenerator { // done scanning this reward cycle break; } - let cur_sortition_info = if let Some(info) = self.sortitions.get(&cur_consensus_hash) { - info - } else { - let loaded_info = InvSortitionInfo::load(sortdb, &cur_consensus_hash)?; - self.sortitions - .insert(cur_consensus_hash.clone(), loaded_info); - self.sortitions - .get(&cur_consensus_hash) - .expect("infallible: just inserted this data") - }; - let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash.clone(); + let cur_sortition_info = self.get_sortition_info(sortdb, &cur_consensus_hash)?; + let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash; - debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, cur_sortition_info = {:?}", cur_height, &cur_consensus_hash, &cur_tenure_opt, cur_sortition_info); + debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, parent_sortition_consensus_hash = {}", cur_height, &cur_consensus_hash, &cur_tenure_opt, &parent_sortition_consensus_hash); if let Some(cur_tenure_info) = cur_tenure_opt.as_ref() { // a tenure was active when this sortition happened... From 927a099e226a3bf70e7720dbc73ecff24888953f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:08:11 -0400 Subject: [PATCH 1186/1400] feat: API for choosing which Nakamoto tenure to mine on --- stackslib/src/net/mod.rs | 47 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 6ad3c5501c..df3560f3b1 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2275,6 +2275,10 @@ pub mod test { >, /// list of malleablized blocks produced when mining. pub malleablized_blocks: Vec, + pub mine_malleablized_blocks: bool, + /// tenure-start block of tenure to mine on. + /// gets consumed on the call to begin_nakamoto_tenure + pub nakamoto_parent_tenure_opt: Option>, } impl<'a> TestPeer<'a> { @@ -2689,6 +2693,8 @@ pub mod test { coord: coord, indexer: Some(indexer), malleablized_blocks: vec![], + mine_malleablized_blocks: true, + nakamoto_parent_tenure_opt: None, } } @@ -3509,6 +3515,10 @@ pub mod test { self.sortdb.as_mut().unwrap() } + pub fn sortdb_ref(&mut self) -> &SortitionDB { + self.sortdb.as_ref().unwrap() + } + pub fn with_db_state(&mut self, f: F) -> Result where F: FnOnce( @@ -4198,6 +4208,43 @@ pub mod test { } } } + + /// Set the nakamoto tenure to mine on + pub fn mine_nakamoto_on(&mut self, parent_tenure: Vec) { + self.nakamoto_parent_tenure_opt = Some(parent_tenure); + } + + /// Clear the tenure to mine on. This causes the miner to build on the canonical tip + pub fn mine_nakamoto_on_canonical_tip(&mut self) { + self.nakamoto_parent_tenure_opt = None; + } + + /// Get an account off of a tip + pub fn get_account( + &mut self, + tip: &StacksBlockId, + account: &PrincipalData, + ) -> StacksAccount { + let sortdb = self.sortdb.take().expect("FATAL: sortdb not restored"); + let mut node = self + .stacks_node + .take() + .expect("FATAL: chainstate not restored"); + + let acct = node + .chainstate + .maybe_read_only_clarity_tx( + &sortdb.index_handle_at_block(&node.chainstate, tip).unwrap(), + tip, + |clarity_tx| StacksChainState::get_account(clarity_tx, account), + ) + .unwrap() + .unwrap(); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + acct + } } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { From 64a272bfebf646d1cf6d1c3690c5ba725a8bf602 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 20 Sep 2024 18:08:22 -0400 Subject: [PATCH 1187/1400] feat: unit tests for inv generator with caching behavior --- stackslib/src/net/tests/inv/nakamoto.rs | 598 +++++++++++++++++++++++- 1 file changed, 595 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index fd9f1dcc1f..2f60027207 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -46,7 +46,7 @@ use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; use crate::net::inv::nakamoto::{InvGenerator, NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::comms::NeighborComms; -use crate::net::test::{TestEventObserver, TestPeer}; +use crate::net::test::{to_addr, TestEventObserver, TestPeer}; use crate::net::tests::{NakamotoBootPlan, NakamotoBootStep, NakamotoBootTenure}; use crate::net::{ Error as NetError, GetNakamotoInvData, HandshakeData, NakamotoInvData, NeighborAddress, @@ -806,7 +806,7 @@ fn test_nakamoto_inv_sync_state_machine() { vec![ true, false, false, false, false, false, false, true, true, true, ], - // atlernating rc + // alternating rc vec![ false, true, false, true, false, true, false, true, true, true, ], @@ -938,7 +938,7 @@ fn test_nakamoto_inv_sync_across_epoch_change() { vec![ true, false, false, false, false, false, false, true, true, true, ], - // atlernating rc + // alternating rc vec![ false, true, false, true, false, true, false, true, true, true, ], @@ -1077,3 +1077,595 @@ fn test_nakamoto_inv_sync_across_epoch_change() { ); } } + +#[test] +fn test_nakamoto_make_tenure_inv_in_forks() { + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let (mut peer, _) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 0, + initial_balances, + ); + peer.refresh_burnchain_view(); + peer.mine_malleablized_blocks = false; + + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + + // + // ---------------------- basic operations ---------------------- + // + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let first_burn_block_height = sortdb.first_block_height; + + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + let naka_tip = peer.network.stacks_tip.block_id(); + let first_naka_tip = naka_tip.clone(); + let first_sort_tip = sort_tip.clone(); + + // find the first block in this tenure + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + let naka_tenure_start_header = NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + &naka_tip, + &naka_tip_header.consensus_hash, + ) + .unwrap() + .unwrap(); + let (naka_tenure_start_block, _) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&naka_tenure_start_header.index_block_hash()) + .unwrap() + .unwrap(); + + assert_eq!(invgen.cache_misses(), 0); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!(bits, vec![true, true]); + assert_eq!(invgen.cache_misses(), 3); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!(bits, vec![true, true]); + assert_eq!(invgen.cache_misses(), 3); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + vec![false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 13); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + vec![false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 13); + + // + // ---------------------- the inv generator can keep up with new blocks ---------------------- + // + + let mut expected_bits = vec![true, true]; + let mut expected_cache_misses = 13; + let mut naka_tip_block = None; + + for i in 0..3 { + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced {}: {:?}", + &naka_block.block_id(), + &naka_block + ); + + peer.refresh_burnchain_view(); + let naka_tip = peer.network.stacks_tip.block_id(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + // only one additional cache miss + expected_bits.push(true); + expected_cache_misses += 1; + + assert_eq!(bits, expected_bits); + assert_eq!(invgen.cache_misses(), expected_cache_misses); + + naka_tip_block = Some(naka_block); + } + + let naka_tip_block = naka_tip_block.unwrap(); + + peer.refresh_burnchain_view(); + let naka_tip = peer.network.stacks_tip.block_id(); + + // + // ---------------------- the inv generator can track multiple forks at once ---------------------- + // + + peer.mine_nakamoto_on(vec![naka_tenure_start_block.clone()]); + let (fork_naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced fork {}: {:?}", + &fork_naka_block.block_id(), + &fork_naka_block + ); + + peer.refresh_burnchain_view(); + let new_naka_tip = peer.network.stacks_tip.block_id(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + // this will not have reorged + assert_eq!(naka_tip, new_naka_tip); + + // load inv off of the canonical tip. + // It should show a missed sortition. + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &naka_tip, tip_rc, &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + assert_eq!(bits, [true, true, true, true, true, false]); + assert_eq!(invgen.cache_misses(), 17); + + // load inv off of the non-canonical tip. + // it should show the last 3 canonical tenures as missing, and this forked block as present + let bits = invgen + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &fork_naka_block.block_id(), + tip_rc, + ) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &fork_naka_block.block_id(), + tip_rc, + &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + assert_eq!(bits, [true, true, false, false, false, true]); + assert_eq!(invgen.cache_misses(), 21); + + // add more to the fork + peer.mine_nakamoto_on(vec![fork_naka_block.clone()]); + + let (fork_naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced fork {}: {:?}", + &fork_naka_block.block_id(), + &fork_naka_block + ); + + peer.refresh_burnchain_view(); + let new_naka_tip = peer.network.stacks_tip.block_id(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + // this will not have reorged (yet) + assert_eq!(naka_tip, new_naka_tip); + + // load inv off of the canonical tip. + // It should show two missed sortitions, for each fork. + // only one additional cache miss + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &naka_tip, tip_rc, &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + assert_eq!(bits, [true, true, true, true, true, false, false]); + assert_eq!(invgen.cache_misses(), 22); + + // load inv off of the non-canonical tip again. + // it should show the last 3 last canonical tenures as missing, and this forked block as + // present. Only one additional cache miss should manifest. + let bits = invgen + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &fork_naka_block.block_id(), + tip_rc, + ) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &fork_naka_block.block_id(), + tip_rc, + &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + // only one more cache miss + assert_eq!(bits, [true, true, false, false, false, true, true]); + assert_eq!(invgen.cache_misses(), 23); + + // load inv off of the canonical tip again. + // It should show two missed sortitions. + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!( + "test: Bits in fork on {} at rc {}: {:?}", + &naka_tip, tip_rc, &bits + ); + debug!( + "test: invgen.cache_misses() in fork = {}", + invgen.cache_misses() + ); + + // no new cache misses + assert_eq!(bits, [true, true, true, true, true, false, false]); + assert_eq!(invgen.cache_misses(), 23); + + // + // ---------------------- the inv generator will search only a maximum depth before giving up ---------------------- + // + + // advance the canonical chain by 3 more blocks, so the delta between `first_naka_tip` and + // `naka_tip` is now 6 blocks + peer.mine_nakamoto_on(vec![naka_tip_block.clone()]); + for i in 0..3 { + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced {}: {:?}", + &naka_block.block_id(), + &naka_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![naka_block.clone()]); + } + let naka_tip = peer.network.stacks_tip.block_id(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // new inv generator with a search depth of 3 + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(3); + + // load an old tip on the canonical chain + let bits = invgen + .make_tenure_bitvector( + &first_sort_tip, + &sortdb, + &chainstate, + &first_naka_tip, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, [true, true]); + assert_eq!(invgen.cache_misses(), 3); + + // load a descendant that is 6 blocks higher + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + assert_eq!( + bits, + [true, true, true, true, true, false, false, true, true, true] + ); + + // all 10 tenures were loaded, because we had to search more than 5 blocks back + assert_eq!(invgen.cache_misses(), 12); + + // new inv generator with a search depth of 10 + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(10); + + // load an old tip on the canonical chain + let bits = invgen + .make_tenure_bitvector( + &first_sort_tip, + &sortdb, + &chainstate, + &first_naka_tip, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, [true, true]); + assert_eq!(invgen.cache_misses(), 3); + + // load a descendant that is 6 blocks higher + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + assert_eq!( + bits, + [true, true, true, true, true, false, false, true, true, true] + ); + + // reused old canonical tip information + assert_eq!(invgen.cache_misses(), 9); +} + +#[test] +fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + // sparce rc + vec![ + true, false, false, false, false, false, false, true, true, true, + ], + // alternating rc + vec![ + false, true, false, true, false, true, false, true, true, true, + ], + // sparse rc + vec![ + false, false, false, false, false, false, true, true, true, true, + ], + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let (mut peer, _) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 0, + initial_balances, + ); + peer.refresh_burnchain_view(); + peer.mine_malleablized_blocks = false; + + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let first_burn_block_height = sortdb.first_block_height; + + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_rc = sortdb + .pox_constants + .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) + .unwrap(); + + let naka_tip = peer.network.stacks_tip.block_id(); + let first_naka_tip = naka_tip.clone(); + let first_sort_tip = sort_tip.clone(); + + // find the first block in this tenure + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + let naka_tenure_start_header = NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + &naka_tip, + &naka_tip_header.consensus_hash, + ) + .unwrap() + .unwrap(); + let (naka_tenure_start_block, _) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&naka_tenure_start_header.index_block_hash()) + .unwrap() + .unwrap(); + + assert_eq!(invgen.cache_misses(), 0); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!(bits, [true, true]); + assert_eq!(invgen.cache_misses(), 3); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 13); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 2) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, false, false, false, false, false, false, true, true] + ); + assert_eq!(invgen.cache_misses(), 17); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 3) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, false, true, false, true, false, true, false, true] + ); + assert_eq!(invgen.cache_misses(), 23); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 4) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, true, false, false, false, false, false, false, true] + ); + assert_eq!(invgen.cache_misses(), 27); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 5) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + // load them all again. cache misses should remain the same. + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!(bits, [true, true]); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 2) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, false, false, false, false, false, false, true, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 3) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, false, true, false, true, false, true, false, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 4) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [true, true, true, false, false, false, false, false, false, true] + ); + assert_eq!(invgen.cache_misses(), 37); + + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 5) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); + debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + + assert_eq!( + bits, + [false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!(invgen.cache_misses(), 37); +} From a2c912db208e4df6c71ecdb3c52a24f801d8bcec Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 15:09:37 -0700 Subject: [PATCH 1188/1400] CRC: move monitor_signers to its own mod Signed-off-by: Jacinta Ferrant --- stacks-signer/src/cli.rs | 4 +- stacks-signer/src/client/stacks_client.rs | 22 +- stacks-signer/src/lib.rs | 4 + stacks-signer/src/main.rs | 319 +-------------------- stacks-signer/src/monitor_signers.rs | 331 ++++++++++++++++++++++ stacks-signer/src/utils.rs | 24 ++ 6 files changed, 379 insertions(+), 325 deletions(-) create mode 100644 stacks-signer/src/monitor_signers.rs create mode 100644 stacks-signer/src/utils.rs diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 37e9218a9d..c691e7bb69 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -269,10 +269,10 @@ pub struct MonitorSignersArgs { /// Whether the node is mainnet. Default is false. #[arg(long, default_value = "false")] pub mainnet: bool, - /// Set the polling interval in seconds. Default is 60 seconds. + /// Set the polling interval in seconds. #[arg(long, short, default_value = "60")] pub interval: u64, - /// Max age in seconds before a signer message is considered stale. Default is 1200 seconds. + /// Max age in seconds before a signer message is considered stale. #[arg(long, short, default_value = "1200")] pub max_age: u64, } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 25431164fc..5898258f60 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -228,16 +228,18 @@ impl StacksClient { // Get the signer writers from the stacker-db to find the signer slot id let stackerdb_signer_slots = self.get_stackerdb_signer_slots(&signer_stackerdb_contract_id, signer_set)?; - let mut signer_slot_ids = HashMap::with_capacity(stackerdb_signer_slots.len()); - for (index, (address, _)) in stackerdb_signer_slots.into_iter().enumerate() { - signer_slot_ids.insert( - address, - SignerSlotID( - u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), - ), - ); - } - Ok(signer_slot_ids) + Ok(stackerdb_signer_slots + .into_iter() + .enumerate() + .map(|(index, (address, _))| { + ( + address, + SignerSlotID( + u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), + ), + ) + }) + .collect()) } /// Get the vote for a given round, reward cycle, and signer address diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index c61ae39731..9d8a22a320 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -29,12 +29,16 @@ pub mod cli; pub mod client; /// The configuration module for the signer pub mod config; +/// The signer monitor for observing signer behaviours in the network +pub mod monitor_signers; /// The monitoring server for the signer pub mod monitoring; /// The primary runloop for the signer pub mod runloop; /// The signer state module pub mod signerdb; +/// The util module for the signer +pub mod utils; /// The v0 implementation of the signer. This does not include WSTS support pub mod v0; /// The v1 implementation of the singer. This includes WSTS support diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 513382e843..5b118db646 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -26,41 +26,30 @@ extern crate serde; extern crate serde_json; extern crate toml; -use std::collections::HashMap; use std::io::{self, Write}; use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; use clap::Parser; -use clarity::codec::read_next; -use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; -use clarity::types::StacksEpochId; +use clarity::types::chainstate::StacksPublicKey; use clarity::util::sleep_ms; -use clarity::vm::types::QualifiedContractIdentifier; -use libsigner::v0::messages::{MessageSlotID, SignerMessage}; -use libsigner::{SignerSession, StackerDBSession}; +use libsigner::SignerSession; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; +use slog::{slog_debug, slog_error}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::{debug, error, info, warn}; +use stacks_common::{debug, error}; use stacks_signer::cli::{ Cli, Command, GenerateStackingSignatureArgs, GenerateVoteArgs, GetChunkArgs, GetLatestChunkArgs, MonitorSignersArgs, PutChunkArgs, RunSignerArgs, StackerDBArgs, VerifyVoteArgs, }; -use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::GlobalConfig; +use stacks_signer::monitor_signers::SignerMonitor; +use stacks_signer::utils::stackerdb_session; use stacks_signer::v0::SpawnedSigner; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; -/// Create a new stacker db session -fn stackerdb_session(host: &str, contract: QualifiedContractIdentifier) -> StackerDBSession { - let mut session = StackerDBSession::new(host, contract.clone()); - session.connect(host.to_string(), contract).unwrap(); - session -} - /// Write the chunk to stdout fn write_chunk_to_stdout(chunk_opt: Option>) { if let Some(chunk) = chunk_opt.as_ref() { @@ -209,302 +198,6 @@ fn handle_monitor_signers(args: MonitorSignersArgs) { } } -struct SignerMonitor { - stacks_client: StacksClient, - cycle_state: RewardCycleState, - args: MonitorSignersArgs, -} - -#[derive(Debug, Default, Clone)] -struct RewardCycleState { - signers_slots: HashMap, - signers_keys: HashMap, - signers_addresses: HashMap, - signers_weights: HashMap, - slot_ids: Vec, - /// Reward cycle is not known until the first successful call to the node - reward_cycle: Option, -} - -impl SignerMonitor { - fn new(args: MonitorSignersArgs) -> Self { - url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); - let stacks_client = StacksClient::new( - StacksPrivateKey::new(), // We don't need a private key to read - args.host.clone(), - "FOO".to_string(), // We don't care about authorized paths. Just accessing public info - args.mainnet, - ); - Self { - stacks_client, - cycle_state: RewardCycleState::default(), - args, - } - } - - fn refresh_state(&mut self) -> Result { - let reward_cycle = self - .stacks_client - .get_current_reward_cycle_info()? - .reward_cycle; - if Some(reward_cycle) == self.cycle_state.reward_cycle { - // The reward cycle has not changed. Nothing to refresh. - return Ok(false); - } - self.cycle_state.reward_cycle = Some(reward_cycle); - - self.cycle_state.signers_keys.clear(); - self.cycle_state.signers_addresses.clear(); - - self.cycle_state.signers_slots = - self.stacks_client.get_parsed_signer_slots(reward_cycle)?; - - let entries = self - .stacks_client - .get_reward_set_signers(reward_cycle)? - .unwrap_or_else(|| { - panic!("No signers found for the current reward cycle {reward_cycle}") - }); - for entry in entries { - let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) - .expect("Failed to convert signing key to StacksPublicKey"); - let stacks_address = StacksAddress::p2pkh(self.args.mainnet, &public_key); - self.cycle_state - .signers_keys - .insert(stacks_address, public_key); - self.cycle_state - .signers_weights - .insert(stacks_address, entry.weight); - } - for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { - self.cycle_state - .signers_addresses - .insert(*slot_id, *signer_address); - } - - for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { - self.cycle_state - .signers_addresses - .insert(*slot_id, *signer_address); - self.cycle_state.slot_ids.push(slot_id.0); - } - Ok(true) - } - - fn print_missing_signers(&self, missing_signers: &[StacksAddress]) { - if missing_signers.is_empty() { - return; - } - let formatted_signers = missing_signers - .iter() - .map(|addr| format!("{addr}")) - .collect::>() - .join(", "); - let formatted_keys = self - .cycle_state - .signers_keys - .iter() - .filter_map(|(addr, key)| { - if missing_signers.contains(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - let missing_weight = missing_signers - .iter() - .map(|addr| self.cycle_state.signers_weights.get(addr).unwrap()) - .sum::(); - let total_weight = self.cycle_state.signers_weights.values().sum::(); - let percentage_missing = missing_weight as f64 / total_weight as f64 * 100.00; - warn!( - "Missing messages for {} of {} signer(s). Missing {percentage_missing:.2}% of signing weight ({missing_weight}/{total_weight})", missing_signers.len(), self.cycle_state.signers_addresses.len(); - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); - } - - fn print_stale_signers(&self, stale_signers: &[StacksAddress]) { - if stale_signers.is_empty() { - return; - } - let formatted_signers = stale_signers - .iter() - .map(|addr| format!("{addr}")) - .collect::>() - .join(", "); - let formatted_keys = self - .cycle_state - .signers_keys - .iter() - .filter_map(|(addr, key)| { - if stale_signers.contains(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "No new updates from {} of {} signer(s) in over {} seconds", - stale_signers.len(), - self.cycle_state.signers_addresses.len(), - self.args.max_age; - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); - } - - fn print_unexpected_messages( - &self, - unexpected_messages: &HashMap, - ) { - if unexpected_messages.is_empty() { - return; - } - let formatted_signers = unexpected_messages - .iter() - .map(|(addr, (msg, slot))| { - format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") - }) - .collect::>() - .join(", "); - let formatted_keys = self - .cycle_state - .signers_keys - .iter() - .filter_map(|(addr, key)| { - if unexpected_messages.contains_key(addr) { - Some(format!("0x{}", key.to_hex())) - } else { - None - } - }) - .collect::>() - .join(", "); - warn!( - "Unexpected messages from {} of {} signer(s).", - unexpected_messages.len(), - self.cycle_state.signers_addresses.len(); - "signer_addresses" => formatted_signers, - "signer_keys" => formatted_keys - ); - } - - /// Start monitoring the signers stackerdb slots for expected new messages - pub fn start(&mut self) -> Result<(), ClientError> { - self.refresh_state()?; - let nmb_signers = self.cycle_state.signers_keys.len(); - let interval_ms = self.args.interval * 1000; - let reward_cycle = self - .cycle_state - .reward_cycle - .expect("BUG: reward cycle not set"); - let contract = - MessageSlotID::BlockResponse.stacker_db_contract(self.args.mainnet, reward_cycle); - info!( - "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", - self.args.interval, self.args.max_age - ); - let mut session = stackerdb_session(&self.args.host, contract); - info!("Confirming messages for {nmb_signers} registered signers"; - "signer_addresses" => self.cycle_state.signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") - ); - let mut last_messages = HashMap::with_capacity(nmb_signers); - let mut last_updates = HashMap::with_capacity(nmb_signers); - loop { - info!("Polling signers stackerdb for new messages..."); - let mut missing_signers = Vec::with_capacity(nmb_signers); - let mut stale_signers = Vec::with_capacity(nmb_signers); - let mut unexpected_messages = HashMap::new(); - - if self.refresh_state()? { - let reward_cycle = self - .cycle_state - .reward_cycle - .expect("BUG: reward cycle not set"); - let contract = MessageSlotID::BlockResponse - .stacker_db_contract(self.args.mainnet, reward_cycle); - info!( - "Reward cycle has changed to {reward_cycle}. Updating stacker db session to StackerDB contract {contract}.", - ); - session = stackerdb_session(&self.args.host, contract); - // Clear the last messages and signer last update times. - last_messages.clear(); - last_updates.clear(); - } - let new_messages: Vec<_> = session - .get_latest_chunks(&self.cycle_state.slot_ids)? - .into_iter() - .map(|chunk_opt| { - chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) - }) - .collect(); - - for (signer_message_opt, slot_id) in - new_messages.into_iter().zip(&self.cycle_state.slot_ids) - { - let signer_slot_id = SignerSlotID(*slot_id); - let signer_address = *self - .cycle_state - .signers_addresses - .get(&signer_slot_id) - .expect("BUG: missing signer address for given slot id"); - let Some(signer_message) = signer_message_opt else { - missing_signers.push(signer_address); - continue; - }; - if let Some(last_message) = last_messages.get(&signer_slot_id) { - if last_message == &signer_message { - continue; - } - } - let epoch = self.stacks_client.get_node_epoch()?; - if epoch < StacksEpochId::Epoch25 { - return Err(ClientError::UnsupportedStacksFeature(format!("Monitoring signers is only supported for Epoch 2.5 and later. Current epoch: {epoch:?}"))); - } - if (epoch == StacksEpochId::Epoch25 - && !matches!(signer_message, SignerMessage::MockSignature(_))) - || (epoch > StacksEpochId::Epoch25 - && !matches!(signer_message, SignerMessage::BlockResponse(_))) - { - unexpected_messages.insert(signer_address, (signer_message, signer_slot_id)); - continue; - } - last_messages.insert(signer_slot_id, signer_message); - last_updates.insert(signer_slot_id, std::time::Instant::now()); - } - for (slot_id, last_update_time) in last_updates.iter() { - if last_update_time.elapsed().as_secs() > self.args.max_age { - let address = self - .cycle_state - .signers_addresses - .get(slot_id) - .expect("BUG: missing signer address for given slot id"); - stale_signers.push(*address); - } - } - if missing_signers.is_empty() - && stale_signers.is_empty() - && unexpected_messages.is_empty() - { - info!( - "All {} signers are sending messages as expected.", - nmb_signers - ); - } else { - self.print_missing_signers(&missing_signers); - self.print_stale_signers(&stale_signers); - self.print_unexpected_messages(&unexpected_messages); - } - sleep_ms(interval_ms); - } - } -} - fn main() { let cli = Cli::parse(); diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs new file mode 100644 index 0000000000..fdddef64ea --- /dev/null +++ b/stacks-signer/src/monitor_signers.rs @@ -0,0 +1,331 @@ +use std::collections::HashMap; + +use clarity::codec::read_next; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use clarity::types::StacksEpochId; +use clarity::util::sleep_ms; +use libsigner::v0::messages::{MessageSlotID, SignerMessage}; +use libsigner::SignerSession; +use slog::{slog_info, slog_warn}; +use stacks_common::{info, warn}; + +use crate::cli::MonitorSignersArgs; +use crate::client::{ClientError, SignerSlotID, StacksClient}; +use crate::utils::stackerdb_session; + +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// The `SignerMonitor` struct is used to monitor the signers stackerdb slots for expected new messages +pub struct SignerMonitor { + /// The client being used to monitor stackerdb messages + stacks_client: StacksClient, + /// The current view of the reward cycle + cycle_state: RewardCycleState, + /// The arguments used to configure the monitor + args: MonitorSignersArgs, +} + +#[derive(Debug, Default, Clone)] +/// The `RewardCycleState` struct is used to store the current reward cycle view +pub struct RewardCycleState { + signers_slots: HashMap, + signers_keys: HashMap, + signers_addresses: HashMap, + signers_weights: HashMap, + slot_ids: Vec, + /// Reward cycle is not known until the first successful call to the node + reward_cycle: Option, +} + +impl SignerMonitor { + /// Create a new `SignerMonitor` instance from the given command line args + pub fn new(args: MonitorSignersArgs) -> Self { + url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); + let stacks_client = StacksClient::new( + StacksPrivateKey::new(), // We don't need a private key to read + args.host.clone(), + "FOO".to_string(), // We don't care about authorized paths. Just accessing public info + args.mainnet, + ); + Self { + stacks_client, + cycle_state: RewardCycleState::default(), + args, + } + } + + fn refresh_state(&mut self) -> Result { + let reward_cycle = self + .stacks_client + .get_current_reward_cycle_info()? + .reward_cycle; + if Some(reward_cycle) == self.cycle_state.reward_cycle { + // The reward cycle has not changed. Nothing to refresh. + return Ok(false); + } + self.cycle_state.reward_cycle = Some(reward_cycle); + + self.cycle_state.signers_keys.clear(); + self.cycle_state.signers_addresses.clear(); + + self.cycle_state.signers_slots = + self.stacks_client.get_parsed_signer_slots(reward_cycle)?; + + let entries = self + .stacks_client + .get_reward_set_signers(reward_cycle)? + .unwrap_or_else(|| { + panic!("No signers found for the current reward cycle {reward_cycle}") + }); + for entry in entries { + let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("Failed to convert signing key to StacksPublicKey"); + let stacks_address = StacksAddress::p2pkh(self.args.mainnet, &public_key); + self.cycle_state + .signers_keys + .insert(stacks_address, public_key); + self.cycle_state + .signers_weights + .insert(stacks_address, entry.weight); + } + for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { + self.cycle_state + .signers_addresses + .insert(*slot_id, *signer_address); + } + + for (signer_address, slot_id) in self.cycle_state.signers_slots.iter() { + self.cycle_state + .signers_addresses + .insert(*slot_id, *signer_address); + self.cycle_state.slot_ids.push(slot_id.0); + } + Ok(true) + } + + fn print_missing_signers(&self, missing_signers: &[StacksAddress]) { + if missing_signers.is_empty() { + return; + } + let formatted_signers = missing_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if missing_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + let missing_weight = missing_signers + .iter() + .map(|addr| self.cycle_state.signers_weights.get(addr).unwrap()) + .sum::(); + let total_weight = self.cycle_state.signers_weights.values().sum::(); + let percentage_missing = missing_weight as f64 / total_weight as f64 * 100.00; + warn!( + "Missing messages for {} of {} signer(s). Missing {percentage_missing:.2}% of signing weight ({missing_weight}/{total_weight})", missing_signers.len(), self.cycle_state.signers_addresses.len(); + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + fn print_stale_signers(&self, stale_signers: &[StacksAddress]) { + if stale_signers.is_empty() { + return; + } + let formatted_signers = stale_signers + .iter() + .map(|addr| format!("{addr}")) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if stale_signers.contains(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "No new updates from {} of {} signer(s) in over {} seconds", + stale_signers.len(), + self.cycle_state.signers_addresses.len(), + self.args.max_age; + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + fn print_unexpected_messages( + &self, + unexpected_messages: &HashMap, + ) { + if unexpected_messages.is_empty() { + return; + } + let formatted_signers = unexpected_messages + .iter() + .map(|(addr, (msg, slot))| { + format!("(address: {addr}, slot_id: {slot}, message: {msg:?})") + }) + .collect::>() + .join(", "); + let formatted_keys = self + .cycle_state + .signers_keys + .iter() + .filter_map(|(addr, key)| { + if unexpected_messages.contains_key(addr) { + Some(format!("0x{}", key.to_hex())) + } else { + None + } + }) + .collect::>() + .join(", "); + warn!( + "Unexpected messages from {} of {} signer(s).", + unexpected_messages.len(), + self.cycle_state.signers_addresses.len(); + "signer_addresses" => formatted_signers, + "signer_keys" => formatted_keys + ); + } + + /// Start monitoring the signers stackerdb slots for expected new messages + pub fn start(&mut self) -> Result<(), ClientError> { + self.refresh_state()?; + let nmb_signers = self.cycle_state.signers_keys.len(); + let interval_ms = self.args.interval * 1000; + let reward_cycle = self + .cycle_state + .reward_cycle + .expect("BUG: reward cycle not set"); + let contract = + MessageSlotID::BlockResponse.stacker_db_contract(self.args.mainnet, reward_cycle); + info!( + "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", + self.args.interval, self.args.max_age + ); + let mut session = stackerdb_session(&self.args.host, contract); + info!("Confirming messages for {nmb_signers} registered signers"; + "signer_addresses" => self.cycle_state.signers_addresses.values().map(|addr| format!("{addr}")).collect::>().join(", ") + ); + let mut last_messages = HashMap::with_capacity(nmb_signers); + let mut last_updates = HashMap::with_capacity(nmb_signers); + loop { + info!("Polling signers stackerdb for new messages..."); + let mut missing_signers = Vec::with_capacity(nmb_signers); + let mut stale_signers = Vec::with_capacity(nmb_signers); + let mut unexpected_messages = HashMap::new(); + + if self.refresh_state()? { + let reward_cycle = self + .cycle_state + .reward_cycle + .expect("BUG: reward cycle not set"); + let contract = MessageSlotID::BlockResponse + .stacker_db_contract(self.args.mainnet, reward_cycle); + info!( + "Reward cycle has changed to {reward_cycle}. Updating stacker db session to StackerDB contract {contract}.", + ); + session = stackerdb_session(&self.args.host, contract); + // Clear the last messages and signer last update times. + last_messages.clear(); + last_updates.clear(); + } + let new_messages: Vec<_> = session + .get_latest_chunks(&self.cycle_state.slot_ids)? + .into_iter() + .map(|chunk_opt| { + chunk_opt.and_then(|data| read_next::(&mut &data[..]).ok()) + }) + .collect(); + + for (signer_message_opt, slot_id) in + new_messages.into_iter().zip(&self.cycle_state.slot_ids) + { + let signer_slot_id = SignerSlotID(*slot_id); + let signer_address = *self + .cycle_state + .signers_addresses + .get(&signer_slot_id) + .expect("BUG: missing signer address for given slot id"); + let Some(signer_message) = signer_message_opt else { + missing_signers.push(signer_address); + continue; + }; + if let Some(last_message) = last_messages.get(&signer_slot_id) { + if last_message == &signer_message { + continue; + } + } + let epoch = self.stacks_client.get_node_epoch()?; + if epoch < StacksEpochId::Epoch25 { + return Err(ClientError::UnsupportedStacksFeature(format!("Monitoring signers is only supported for Epoch 2.5 and later. Current epoch: {epoch:?}"))); + } + if (epoch == StacksEpochId::Epoch25 + && !matches!(signer_message, SignerMessage::MockSignature(_))) + || (epoch > StacksEpochId::Epoch25 + && !matches!(signer_message, SignerMessage::BlockResponse(_))) + { + unexpected_messages.insert(signer_address, (signer_message, signer_slot_id)); + continue; + } + last_messages.insert(signer_slot_id, signer_message); + last_updates.insert(signer_slot_id, std::time::Instant::now()); + } + for (slot_id, last_update_time) in last_updates.iter() { + if last_update_time.elapsed().as_secs() > self.args.max_age { + let address = self + .cycle_state + .signers_addresses + .get(slot_id) + .expect("BUG: missing signer address for given slot id"); + stale_signers.push(*address); + } + } + if missing_signers.is_empty() + && stale_signers.is_empty() + && unexpected_messages.is_empty() + { + info!( + "All {} signers are sending messages as expected.", + nmb_signers + ); + } else { + self.print_missing_signers(&missing_signers); + self.print_stale_signers(&stale_signers); + self.print_unexpected_messages(&unexpected_messages); + } + sleep_ms(interval_ms); + } + } +} diff --git a/stacks-signer/src/utils.rs b/stacks-signer/src/utils.rs new file mode 100644 index 0000000000..955177e02d --- /dev/null +++ b/stacks-signer/src/utils.rs @@ -0,0 +1,24 @@ +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::types::QualifiedContractIdentifier; +use libsigner::{SignerSession, StackerDBSession}; + +/// Create a new stacker db session +pub fn stackerdb_session(host: &str, contract: QualifiedContractIdentifier) -> StackerDBSession { + let mut session = StackerDBSession::new(host, contract.clone()); + session.connect(host.to_string(), contract).unwrap(); + session +} From 970b8c431334ae1bef5de9ea02a832433d95e5d4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 20 Sep 2024 15:11:54 -0700 Subject: [PATCH 1189/1400] CRC: get mainnet flag from stacks_client directly Signed-off-by: Jacinta Ferrant --- libsigner/src/v0/messages.rs | 33 ++++++++++++----------- stacks-signer/src/cli.rs | 3 --- stacks-signer/src/client/stacks_client.rs | 24 ++++++++++++++++- stacks-signer/src/monitor_signers.rs | 16 +++++------ 4 files changed, 48 insertions(+), 28 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index ae565207a7..7209398c1c 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -275,6 +275,8 @@ pub struct PeerInfo { pub pox_consensus: ConsensusHash, /// The server version pub server_version: String, + /// The network id + pub network_id: u32, } impl StacksMessageCodec for PeerInfo { @@ -287,6 +289,7 @@ impl StacksMessageCodec for PeerInfo { fd.write_all(self.server_version.as_bytes()) .map_err(CodecError::WriteError)?; write_next(fd, &self.pox_consensus)?; + write_next(fd, &self.network_id)?; Ok(()) } @@ -305,6 +308,7 @@ impl StacksMessageCodec for PeerInfo { ) })?; let pox_consensus = read_next::(fd)?; + let network_id = read_next(fd)?; Ok(Self { burn_block_height, stacks_tip_consensus_hash, @@ -312,6 +316,7 @@ impl StacksMessageCodec for PeerInfo { stacks_tip_height, server_version, pox_consensus, + network_id, }) } } @@ -321,18 +326,15 @@ impl StacksMessageCodec for PeerInfo { pub struct MockProposal { /// The view of the stacks node peer information at the time of the mock proposal pub peer_info: PeerInfo, - /// The chain id for the mock proposal - pub chain_id: u32, /// The miner's signature across the peer info signature: MessageSignature, } impl MockProposal { /// Create a new mock proposal data struct from the provided peer info, chain id, and private key. - pub fn new(peer_info: PeerInfo, chain_id: u32, stacks_private_key: &StacksPrivateKey) -> Self { + pub fn new(peer_info: PeerInfo, stacks_private_key: &StacksPrivateKey) -> Self { let mut sig = Self { signature: MessageSignature::empty(), - chain_id, peer_info, }; sig.sign(stacks_private_key) @@ -342,7 +344,8 @@ impl MockProposal { /// The signature hash for the mock proposal pub fn miner_signature_hash(&self) -> Sha256Sum { - let domain_tuple = make_structured_data_domain("mock-miner", "1.0.0", self.chain_id); + let domain_tuple = + make_structured_data_domain("mock-miner", "1.0.0", self.peer_info.network_id); let data_tuple = Value::Tuple( TupleData::from_data(vec![ ( @@ -375,7 +378,8 @@ impl MockProposal { /// The signature hash including the miner's signature. Used by signers. fn signer_signature_hash(&self) -> Sha256Sum { - let domain_tuple = make_structured_data_domain("mock-signer", "1.0.0", self.chain_id); + let domain_tuple = + make_structured_data_domain("mock-signer", "1.0.0", self.peer_info.network_id); let data_tuple = Value::Tuple( TupleData::from_data(vec![ ( @@ -413,18 +417,15 @@ impl MockProposal { impl StacksMessageCodec for MockProposal { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { self.peer_info.consensus_serialize(fd)?; - write_next(fd, &self.chain_id)?; write_next(fd, &self.signature)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let peer_info = PeerInfo::consensus_deserialize(fd)?; - let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; Ok(Self { peer_info, - chain_id, signature, }) } @@ -1024,6 +1025,12 @@ mod test { let stacks_tip_height = thread_rng().next_u64(); let server_version = "0.0.0".to_string(); let pox_consensus_byte: u8 = thread_rng().gen(); + let network_byte: u8 = thread_rng().gen_range(0..=1); + let network_id = if network_byte == 1 { + CHAIN_ID_TESTNET + } else { + CHAIN_ID_MAINNET + }; PeerInfo { burn_block_height, stacks_tip_consensus_hash: ConsensusHash([stacks_tip_consensus_byte; 20]), @@ -1031,19 +1038,13 @@ mod test { stacks_tip_height, server_version, pox_consensus: ConsensusHash([pox_consensus_byte; 20]), + network_id, } } fn random_mock_proposal() -> MockProposal { - let chain_byte: u8 = thread_rng().gen_range(0..=1); - let chain_id = if chain_byte == 1 { - CHAIN_ID_TESTNET - } else { - CHAIN_ID_MAINNET - }; let peer_info = random_peer_data(); MockProposal { peer_info, - chain_id, signature: MessageSignature::empty(), } } diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index c691e7bb69..3b74635cbc 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -266,9 +266,6 @@ pub struct MonitorSignersArgs { /// The Stacks node to connect to #[arg(long)] pub host: String, - /// Whether the node is mainnet. Default is false. - #[arg(long, default_value = "false")] - pub mainnet: bool, /// Set the polling interval in seconds. #[arg(long, short, default_value = "60")] pub interval: u64, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 5898258f60..31301e8b0a 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -75,7 +75,7 @@ pub struct StacksClient { /// The chain we are interacting with chain_id: u32, /// Whether we are mainnet or not - mainnet: bool, + pub mainnet: bool, /// The Client used to make HTTP connects stacks_node_client: reqwest::blocking::Client, /// the auth password for the stacks node @@ -135,6 +135,28 @@ impl StacksClient { } } + /// Create a new signer StacksClient and attempt to connect to the stacks node to determine the version + pub fn try_from_host( + stacks_private_key: StacksPrivateKey, + node_host: String, + auth_password: String, + ) -> Result { + let mut stacks_client = Self::new(stacks_private_key, node_host, auth_password, true); + let pubkey = StacksPublicKey::from_private(&stacks_private_key); + let info = stacks_client.get_peer_info()?; + if info.network_id == CHAIN_ID_MAINNET { + stacks_client.mainnet = true; + stacks_client.chain_id = CHAIN_ID_MAINNET; + stacks_client.tx_version = TransactionVersion::Mainnet; + } else { + stacks_client.mainnet = false; + stacks_client.chain_id = CHAIN_ID_TESTNET; + stacks_client.tx_version = TransactionVersion::Testnet; + } + stacks_client.stacks_address = StacksAddress::p2pkh(stacks_client.mainnet, &pubkey); + Ok(stacks_client) + } + /// Get our signer address pub const fn get_signer_address(&self) -> &StacksAddress { &self.stacks_address diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs index fdddef64ea..7f03e96b5d 100644 --- a/stacks-signer/src/monitor_signers.rs +++ b/stacks-signer/src/monitor_signers.rs @@ -51,15 +51,15 @@ pub struct RewardCycleState { } impl SignerMonitor { - /// Create a new `SignerMonitor` instance from the given command line args + /// Create a new `SignerMonitor` instance pub fn new(args: MonitorSignersArgs) -> Self { url::Url::parse(&format!("http://{}", args.host)).expect("Failed to parse node host"); - let stacks_client = StacksClient::new( + let stacks_client = StacksClient::try_from_host( StacksPrivateKey::new(), // We don't need a private key to read args.host.clone(), "FOO".to_string(), // We don't care about authorized paths. Just accessing public info - args.mainnet, - ); + ) + .expect("Failed to connect to provided host."); Self { stacks_client, cycle_state: RewardCycleState::default(), @@ -93,7 +93,7 @@ impl SignerMonitor { for entry in entries { let public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) .expect("Failed to convert signing key to StacksPublicKey"); - let stacks_address = StacksAddress::p2pkh(self.args.mainnet, &public_key); + let stacks_address = StacksAddress::p2pkh(self.stacks_client.mainnet, &public_key); self.cycle_state .signers_keys .insert(stacks_address, public_key); @@ -228,8 +228,8 @@ impl SignerMonitor { .cycle_state .reward_cycle .expect("BUG: reward cycle not set"); - let contract = - MessageSlotID::BlockResponse.stacker_db_contract(self.args.mainnet, reward_cycle); + let contract = MessageSlotID::BlockResponse + .stacker_db_contract(self.stacks_client.mainnet, reward_cycle); info!( "Monitoring signers stackerdb. Polling interval: {} secs, Max message age: {} secs, Reward cycle: {reward_cycle}, StackerDB contract: {contract}", self.args.interval, self.args.max_age @@ -252,7 +252,7 @@ impl SignerMonitor { .reward_cycle .expect("BUG: reward cycle not set"); let contract = MessageSlotID::BlockResponse - .stacker_db_contract(self.args.mainnet, reward_cycle); + .stacker_db_contract(self.stacks_client.mainnet, reward_cycle); info!( "Reward cycle has changed to {reward_cycle}. Updating stacker db session to StackerDB contract {contract}.", ); From 5d860c60b7b2cdb6c69c9e1466028904d240394a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sun, 22 Sep 2024 07:03:22 -0500 Subject: [PATCH 1190/1400] test: add response unit test for getsortition --- stackslib/src/net/api/tests/getsortition.rs | 72 ++++++++++++++++++++- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index 8541b73eb6..e112fde4a0 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -19,10 +19,14 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; use stacks_common::types::net::PeerHost; -use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier, SortitionInfo}; +use crate::net::api::tests::test_rpc; use crate::net::connection::ConnectionOptions; -use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; -use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::http::{ + Error as HttpError, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponsePayload, HttpVersion, +}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble, StacksHttpRequest}; use crate::net::Error as NetError; fn make_preamble(query: &str) -> HttpRequestPreamble { @@ -99,3 +103,65 @@ fn test_parse_request() { } } } + +#[test] +fn response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 33333); + + let request = StacksHttpRequest::new_for_peer( + addr.into(), + "GET".into(), + "/v3/sortitions".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + let mut responses = test_rpc(function_name!(), vec![request]); + let HttpResponsePayload::JSON(response) = + responses.pop().unwrap().get_http_payload_ok().unwrap() + else { + panic!("Expected JSON response"); + }; + + info!("Response:\n{:#?}\n", response); + + let info_array = response.as_array().expect("Response should be array"); + assert_eq!( + info_array.len(), + 1, + "/v3/sortitions should return a single entry" + ); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 33333); + let request = StacksHttpRequest::new_for_peer( + addr.into(), + "GET".into(), + "/v3/sortitions/latest_and_last".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + let mut responses = test_rpc(function_name!(), vec![request]); + let HttpResponsePayload::JSON(response) = + responses.pop().unwrap().get_http_payload_ok().unwrap() + else { + panic!("Expected JSON response"); + }; + + info!("Response:\n{:#?}\n", response); + + let info_array = response.as_array().expect("Response should be array"); + assert_eq!( + info_array.len(), + 2, + "/v3/sortitions/latest_and_last should return 2 entries" + ); + let first_entry: SortitionInfo = serde_json::from_value(info_array[0].clone()) + .expect("Response array elements should parse to SortitionInfo"); + let second_entry: SortitionInfo = serde_json::from_value(info_array[1].clone()) + .expect("Response array elements should parse to SortitionInfo"); + assert!(first_entry.was_sortition); + assert!(second_entry.was_sortition); + assert_eq!( + first_entry.last_sortition_ch.as_ref().unwrap(), + &second_entry.consensus_hash, + ); +} From 24bb08c71e09c1af3a1bf9f0d1b0afb2cc776936 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sun, 22 Sep 2024 20:51:16 -0400 Subject: [PATCH 1191/1400] fix: use `max_unspent_utxos` in `get_all_utxos` --- .../stacks-node/src/burnchains/bitcoin_regtest_controller.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 98f504cf99..a3a6201813 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -668,7 +668,7 @@ impl BitcoinRegtestController { max_conf.into(), filter_addresses.clone().into(), true.into(), - json!({ "minimumAmount": minimum_amount }), + json!({ "minimumAmount": minimum_amount, "maximumCount": self.config.burnchain.max_unspent_utxos }), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), From 2140b4433aebc75ab099e3044c6d1b6fbce22a68 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Sep 2024 14:46:57 +0000 Subject: [PATCH 1192/1400] fix: typo Co-authored-by: Brice Dobry --- stackslib/src/net/neighbors/comms.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index f3e160ff57..ed0e03f5c6 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -520,7 +520,7 @@ impl NeighborComms for PeerNetworkComms { .map(|event_ref| *event_ref) } - /// Remove a connecting neighbor because it conected + /// Remove a connecting neighbor because it connected fn remove_connecting(&mut self, network: &PeerNetwork, nk: &NK) { self.connecting.remove(&nk.to_neighbor_key(network)); } From 70c8656ef2308881a7f89952a2dc445a1d3662ec Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 23 Sep 2024 08:18:27 -0700 Subject: [PATCH 1193/1400] CRC: fix build error and move copyright text to top of monitor_signers file Signed-off-by: Jacinta Ferrant --- stacks-signer/src/monitor_signers.rs | 30 ++++++++++++++-------------- testnet/stacks-node/src/neon_node.rs | 4 ++-- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs index 7f03e96b5d..4bc017fa27 100644 --- a/stacks-signer/src/monitor_signers.rs +++ b/stacks-signer/src/monitor_signers.rs @@ -1,18 +1,3 @@ -use std::collections::HashMap; - -use clarity::codec::read_next; -use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; -use clarity::types::StacksEpochId; -use clarity::util::sleep_ms; -use libsigner::v0::messages::{MessageSlotID, SignerMessage}; -use libsigner::SignerSession; -use slog::{slog_info, slog_warn}; -use stacks_common::{info, warn}; - -use crate::cli::MonitorSignersArgs; -use crate::client::{ClientError, SignerSlotID, StacksClient}; -use crate::utils::stackerdb_session; - // Copyright (C) 2020-2024 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify @@ -28,6 +13,21 @@ use crate::utils::stackerdb_session; // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; + +use clarity::codec::read_next; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; +use clarity::types::StacksEpochId; +use clarity::util::sleep_ms; +use libsigner::v0::messages::{MessageSlotID, SignerMessage}; +use libsigner::SignerSession; +use slog::{slog_info, slog_warn}; +use stacks_common::{info, warn}; + +use crate::cli::MonitorSignersArgs; +use crate::client::{ClientError, SignerSlotID, StacksClient}; +use crate::utils::stackerdb_session; + /// The `SignerMonitor` struct is used to monitor the signers stackerdb slots for expected new messages pub struct SignerMonitor { /// The client being used to monitor stackerdb messages diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 793605d1c4..dcfa855c9b 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2268,6 +2268,7 @@ impl BlockMinerThread { stacks_tip_height, pox_consensus, server_version, + network_id: self.config.get_burnchain_config().chain_id, } } @@ -2392,8 +2393,7 @@ impl BlockMinerThread { return Ok(()); } let election_sortition = last_winner_snapshot.consensus_hash; - let mock_proposal = - MockProposal::new(peer_info, self.config.burnchain.chain_id, &mining_key); + let mock_proposal = MockProposal::new(peer_info, &mining_key); info!("Sending mock proposal to stackerdb: {mock_proposal:?}"); From 21defc635e94ecac03583fcef9437fec15a4e533 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Sep 2024 16:32:45 +0000 Subject: [PATCH 1194/1400] fix: typo Co-authored-by: Brice Dobry --- stackslib/src/net/inv/nakamoto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 9be77949b1..68fd750077 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -150,7 +150,7 @@ impl InvGenerator { /// messages. /// /// If found, then return the ancestor block ID represented in `self.processed_tenures`. - /// If not, then reutrn None. + /// If not, then return None. pub(crate) fn find_ancestor_processed_tenures( &self, chainstate: &StacksChainState, From 896cae0b0a47fc50162ce88143e1200c8d6d769d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Sep 2024 12:46:33 -0400 Subject: [PATCH 1195/1400] fix: error and then panic --- stackslib/src/chainstate/stacks/db/accounts.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 105d3ed516..7c81410e87 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -415,9 +415,11 @@ impl StacksChainState { ) { clarity_tx .with_clarity_db(|ref mut db| { - let next_nonce = cur_nonce - .checked_add(1) - .unwrap_or_else(|| panic!("OUT OF NONCES")); + let next_nonce = cur_nonce.checked_add(1).unwrap_or_else(|| { + error!("OUT OF NONCES"); + panic!(); + }); + db.set_account_nonce(&principal, next_nonce)?; Ok(()) }) From 89b9d84c7f6283f92fee828f292258547e0f35f7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Sep 2024 13:07:28 -0400 Subject: [PATCH 1196/1400] fix: get_mut() --> get() --- stackslib/src/net/inv/nakamoto.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 68fd750077..15940e23be 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -218,7 +218,7 @@ impl InvGenerator { }; // this tip has a known table - if let Some(loaded_tenure_info) = tenure_infos.get_mut(tenure_id_consensus_hash) { + if let Some(loaded_tenure_info) = tenure_infos.get(tenure_id_consensus_hash) { // we've loaded this tenure info before for this tip return Ok(loaded_tenure_info.clone()); } else { From 091afbfab03d659020ec4b50db3ca40b3850bfba Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 23 Sep 2024 10:24:14 -0700 Subject: [PATCH 1197/1400] CRC: update v3_signer_endpoint test to confirm exact number of blocks Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/nakamoto/mod.rs | 23 +++-- .../src/tests/nakamoto_integrations.rs | 85 ++++++++++++++----- 2 files changed, 79 insertions(+), 29 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index c111f4f212..d006ae2184 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3316,7 +3316,7 @@ impl NakamotoChainState { StacksPublicKey::recover_to_pubkey(signer_sighash.bits(), &signer_signature) .map_err(|e| ChainstateError::InvalidStacksBlock(e.to_string()))?; let sql = "INSERT INTO signer_stats(public_key,reward_cycle) VALUES(?1,?2) ON CONFLICT(public_key,reward_cycle) DO UPDATE SET blocks_signed=blocks_signed+1"; - let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + let params = params![signer_pubkey.to_hex(), reward_cycle]; tx.execute(sql, params)?; } Ok(()) @@ -3331,7 +3331,7 @@ impl NakamotoChainState { ) -> Result { let sql = "SELECT blocks_signed FROM signer_stats WHERE public_key = ?1 AND reward_cycle = ?2"; - let params = params![serde_json::to_string(&signer_pubkey).unwrap(), reward_cycle]; + let params = params![signer_pubkey.to_hex(), reward_cycle]; chainstate_db .query_row(sql, params, |row| row.get("blocks_signed")) .optional() @@ -4135,12 +4135,19 @@ impl NakamotoChainState { if let Some(signer_calculation) = signer_set_calc { Self::write_reward_set(chainstate_tx, &new_block_id, &signer_calculation.reward_set)?; - let cycle_number = pox_constants - .reward_cycle_of_prepare_phase( - first_block_height.into(), - chain_tip_burn_header_height.into(), - ) - .or_else(|| reward_cycle.map(|cycle| cycle + 1)); + let cycle_number = if let Some(cycle) = pox_constants.reward_cycle_of_prepare_phase( + first_block_height.into(), + chain_tip_burn_header_height.into(), + ) { + Some(cycle) + } else { + pox_constants + .block_height_to_reward_cycle( + first_block_height.into(), + chain_tip_burn_header_height.into(), + ) + .map(|cycle| cycle + 1) + }; if let Some(cycle) = cycle_number { reward_set_data = Some(RewardSetData::new( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5981660e50..5085608702 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -8025,11 +8025,23 @@ fn v3_signer_api_endpoint() { let (mut conf, _miner_account) = naka_neon_integration_conf(None); let password = "12345".to_string(); conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let stacker_sk = setup_stacker(&mut conf); let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); let signer_pubkey = Secp256k1PublicKey::from_private(&signer_sk); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt + send_fee, + ); conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); // only subscribe to the block proposal events test_observer::spawn(); @@ -8070,16 +8082,13 @@ fn v3_signer_api_endpoint() { ); info!("------------------------- Reached Epoch 3.0 -------------------------"); - blind_signer(&conf, &signers, proposals_submitted); - wait_for_first_naka_block_commit(60, &commits_submitted); - // TODO (hack) instantiate the sortdb in the burnchain _ = btc_regtest_controller.sortdb_mut(); info!("------------------------- Setup finished, run test -------------------------"); - let naka_tenures = 20; + let naka_tenures = conf.burnchain.pox_reward_length.unwrap().into(); let pre_naka_reward_cycle = 1; let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -8102,11 +8111,23 @@ fn v3_signer_api_endpoint() { let blocks_signed_pre_naka = get_v3_signer(&signer_pubkey, pre_naka_reward_cycle); assert_eq!(blocks_signed_pre_naka, 0); - // Keep track of reward cycles encountered - let mut reward_cycles = HashSet::new(); + let block_height = btc_regtest_controller.get_headers_height(); + let first_reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + + let second_reward_cycle = first_reward_cycle.saturating_add(1); + let second_reward_cycle_start = btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(second_reward_cycle) + .saturating_sub(1); + + let nmb_naka_blocks_in_first_cycle = second_reward_cycle_start - block_height; + let nmb_naka_blocks_in_second_cycle = naka_tenures - nmb_naka_blocks_in_first_cycle; // Mine some nakamoto tenures - for _ in 0..naka_tenures { + for _i in 0..naka_tenures { next_block_and_mine_commit( &mut btc_regtest_controller, 60, @@ -8114,23 +8135,45 @@ fn v3_signer_api_endpoint() { &commits_submitted, ) .unwrap(); - let block_height = btc_regtest_controller.get_headers_height(); - let reward_cycle = btc_regtest_controller - .get_burnchain() - .block_height_to_reward_cycle(block_height) - .unwrap(); - reward_cycles.insert(reward_cycle); } + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); - // Make sure we got a couple cycles - assert!(reward_cycles.len() > 1); - assert!(!reward_cycles.contains(&pre_naka_reward_cycle)); + assert_eq!(reward_cycle, second_reward_cycle); - // Since we have only one signer, it must be signing at least 1 block per reward cycle - for reward_cycle in reward_cycles.into_iter() { - let blocks_signed = get_v3_signer(&signer_pubkey, reward_cycle); - assert_ne!(blocks_signed, 0); - } + // Assert that we mined a single block (the commit op) per tenure + let nmb_signed_first_cycle = get_v3_signer(&signer_pubkey, first_reward_cycle); + let nmb_signed_second_cycle = get_v3_signer(&signer_pubkey, second_reward_cycle); + + assert_eq!(nmb_signed_first_cycle, nmb_naka_blocks_in_first_cycle); + assert_eq!(nmb_signed_second_cycle, nmb_naka_blocks_in_second_cycle); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra stacks block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + wait_for(30, || { + Ok(coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed() + > blocks_processed_before) + }) + .unwrap(); + // Assert that we mined an additional block in the second cycle + assert_eq!( + get_v3_signer(&signer_pubkey, second_reward_cycle), + nmb_naka_blocks_in_second_cycle + 1 + ); info!("------------------------- Test finished, clean up -------------------------"); From 5bf852e3e7b86f2d5eb9c5b254c3bf5f7f03a994 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 23 Sep 2024 10:26:27 -0700 Subject: [PATCH 1198/1400] Merge remote-tracking branch 'origin/develop' into feat/monitor-signers-cli-command --- .github/workflows/bitcoin-tests.yml | 7 +- .../api/core-node/get_sortitions.example.json | 15 + ...t_sortitions_latest_and_prior.example.json | 28 ++ docs/rpc/openapi.yaml | 41 ++ stacks-signer/src/chainstate.rs | 35 +- stacks-signer/src/client/stacks_client.rs | 63 +-- stacks-signer/src/tests/chainstate.rs | 1 - stackslib/src/chainstate/nakamoto/mod.rs | 5 +- stackslib/src/net/api/getsortition.rs | 229 +++++---- stackslib/src/net/api/tests/getsortition.rs | 72 ++- .../burnchains/bitcoin_regtest_controller.rs | 2 +- .../src/nakamoto_node/sign_coordinator.rs | 12 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 35 +- .../src/tests/nakamoto_integrations.rs | 438 +++++++++++++++++- .../src/tests/neon_integrations.rs | 72 ++- testnet/stacks-node/src/tests/signer/mod.rs | 94 +++- testnet/stacks-node/src/tests/signer/v0.rs | 433 +++++++++-------- 17 files changed, 1175 insertions(+), 407 deletions(-) create mode 100644 docs/rpc/api/core-node/get_sortitions.example.json create mode 100644 docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index e618eedebe..2d02b17669 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -75,9 +75,13 @@ jobs: - tests::neon_integrations::vote_for_aggregate_key_burn_op_test - tests::neon_integrations::mock_miner_replay - tests::neon_integrations::listunspent_max_utxos + - tests::neon_integrations::bitcoin_reorg_flap + - tests::neon_integrations::bitcoin_reorg_flap_with_follower + - tests::neon_integrations::start_stop_bitcoind - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration + - tests::nakamoto_integrations::simple_neon_integration_with_flash_blocks_on_epoch_3 - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb @@ -122,9 +126,6 @@ jobs: - tests::nakamoto_integrations::follower_bootup_across_multiple_cycles - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - # Do not run this one until we figure out why it fails in CI - # - tests::neon_integrations::bitcoin_reorg_flap - # - tests::neon_integrations::bitcoin_reorg_flap_with_follower # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/docs/rpc/api/core-node/get_sortitions.example.json b/docs/rpc/api/core-node/get_sortitions.example.json new file mode 100644 index 0000000000..a56fd887b1 --- /dev/null +++ b/docs/rpc/api/core-node/get_sortitions.example.json @@ -0,0 +1,15 @@ +[ + { + "burn_block_hash": "0x046f54cd1924a5d80fc3b8186d0334b7521acae90f9e136e2bee680c720d0e83", + "burn_block_height": 231, + "burn_header_timestamp": 1726797570, + "sortition_id": "0x8a5116b7b4306dc4f6db290d1adfff9e1347f3e921bb793fc4c33e2ff05056e2", + "parent_sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "consensus_hash": "0x8d2c51db737597a93191f49bcdc9c7bb44b90892", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "last_sortition_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "committed_block_hash": "0xeea47d6d639c565027110e192e308fb11656183d5c077bcd718d830652800183" + } +] diff --git a/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json b/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json new file mode 100644 index 0000000000..db970637ed --- /dev/null +++ b/docs/rpc/api/core-node/get_sortitions_latest_and_prior.example.json @@ -0,0 +1,28 @@ +[ + { + "burn_block_hash": "0x046f54cd1924a5d80fc3b8186d0334b7521acae90f9e136e2bee680c720d0e83", + "burn_block_height": 231, + "burn_header_timestamp": 1726797570, + "sortition_id": "0x8a5116b7b4306dc4f6db290d1adfff9e1347f3e921bb793fc4c33e2ff05056e2", + "parent_sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "consensus_hash": "0x8d2c51db737597a93191f49bcdc9c7bb44b90892", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "last_sortition_ch": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "committed_block_hash": "0xeea47d6d639c565027110e192e308fb11656183d5c077bcd718d830652800183" + }, + { + "burn_block_hash": "0x496ff02cb63a4850d0bdee5fab69284b6eb0392b4538e1c462f82362c5becfa4", + "burn_block_height": 230, + "burn_header_timestamp": 1726797570, + "sortition_id": "0xdaf479110cf859e58c56b6ae941f8a14e7c7992c57027183dfbda4a4b820897c", + "parent_sortition_id": "0xf9058692055cbd879d7f71e566e44b905a887b2b182407ed596b5d6499ceae2a", + "consensus_hash": "0x697357c72da55b759b1d6b721676c92c69f0b490", + "was_sortition": true, + "miner_pk_hash160": "0x6bc51b33e9f3626944eb879147e18111581f8f9b", + "stacks_parent_ch": "0xf7d1bd7d9d5c5a5c368402b6ef9510bd014d70f7", + "last_sortition_ch": "0xf7d1bd7d9d5c5a5c368402b6ef9510bd014d70f7", + "committed_block_hash": "0x36ee5f7f7271de1c1d4cd830e36320b51e01605547621267ae6e9b4e9b10f95e" + } +] diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 3d4249329e..5547d3bcb6 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -675,3 +675,44 @@ paths: schema: type: string + /v3/sortitions/{lookup_kind}/{lookup}: + get: + summary: Fetch information about evaluated burnchain blocks (i.e., sortitions). + tags: + - Blocks + operationId: get_sortitions + description: + Fetch sortition information about a burnchain block. If the `lookup_kind` and `lookup` parameters are empty, it will return information about the latest burn block. + responses: + "200": + description: Information for the burn block or in the case of `latest_and_last`, multiple burn blocks + content: + application/json: + examples: + Latest: + description: A single element list is returned when just one sortition is requested + value: + $ref: ./api/core-node/get_sortitions.example.json + LatestAndLast: + description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. + value: + $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json + parameters: + - name: lookup_kind + in: path + description: |- + The style of lookup that should be performed. If not given, the most recent burn block processed will be returned. + Otherwise, the `lookup_kind` should be one of the following strings: + * `consensus` - find the burn block using the consensus hash supplied in the `lookup` field. + * `burn_height` - find the burn block using the burn block height supplied in the `lookup` field. + * `burn` - find the burn block using the burn block hash supplied in the `lookup` field. + * `latest_and_last` - return information about the latest burn block with a winning miner *and* the previous such burn block + required: false + schema: + type: string + - name: lookup + in: path + description: The value to use for the lookup if `lookup_kind` is `consensus`, `burn_height`, or `burn` + required: false + schema: + type: string diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4012fd48a0..4bbb9741a5 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -25,7 +25,7 @@ use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; use stacks_common::util::hash::Hash160; use stacks_common::{info, warn}; -use crate::client::{ClientError, StacksClient}; +use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; use crate::config::SignerConfig; use crate::signerdb::{BlockState, SignerDb}; @@ -138,8 +138,6 @@ pub struct SortitionsView { pub last_sortition: Option, /// the current successful sortition (this corresponds to the "current" miner slot) pub cur_sortition: SortitionState, - /// the hash at which the sortitions view was fetched - pub latest_consensus_hash: ConsensusHash, /// configuration settings for evaluating proposals pub config: ProposalEvalConfig, } @@ -608,42 +606,21 @@ impl SortitionsView { config: ProposalEvalConfig, client: &StacksClient, ) -> Result { - let latest_state = client.get_latest_sortition()?; - let latest_ch = latest_state.consensus_hash; - - // figure out what cur_sortition will be set to. - // if the latest sortition wasn't successful, query the last one that was. - let latest_success = if latest_state.was_sortition { - latest_state - } else { - info!("Latest state wasn't a sortition: {latest_state:?}"); - let last_sortition_ch = latest_state - .last_sortition_ch - .as_ref() - .ok_or_else(|| ClientError::NoSortitionOnChain)?; - client.get_sortition(last_sortition_ch)? - }; - - // now, figure out what `last_sortition` will be set to. - let last_sortition = latest_success - .last_sortition_ch - .as_ref() - .map(|ch| client.get_sortition(ch)) - .transpose()?; + let CurrentAndLastSortition { + current_sortition, + last_sortition, + } = client.get_current_and_last_sortition()?; - let cur_sortition = SortitionState::try_from(latest_success)?; + let cur_sortition = SortitionState::try_from(current_sortition)?; let last_sortition = last_sortition .map(SortitionState::try_from) .transpose() .ok() .flatten(); - let latest_consensus_hash = latest_ch; - Ok(Self { cur_sortition, last_sortition, - latest_consensus_hash, config, }) } diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 31301e8b0a..7b490144fc 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -1,5 +1,3 @@ -use std::collections::{HashMap, VecDeque}; - // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -15,6 +13,8 @@ use std::collections::{HashMap, VecDeque}; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{HashMap, VecDeque}; + use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ @@ -88,6 +88,15 @@ struct GetStackersErrorResp { err_msg: String, } +/// Result from fetching current and last sortition: +/// two sortition infos +pub struct CurrentAndLastSortition { + /// the latest winning sortition in the current burnchain fork + pub current_sortition: SortitionInfo, + /// the last winning sortition prior to `current_sortition`, if there was one + pub last_sortition: Option, +} + impl From<&GlobalConfig> for StacksClient { fn from(config: &GlobalConfig) -> Self { Self { @@ -531,10 +540,10 @@ impl StacksClient { Ok(tenures) } - /// Get the sortition information for the latest sortition - pub fn get_latest_sortition(&self) -> Result { - debug!("stacks_node_client: Getting latest sortition..."); - let path = self.sortition_info_path(); + /// Get the current winning sortition and the last winning sortition + pub fn get_current_and_last_sortition(&self) -> Result { + debug!("stacks_node_client: Getting current and prior sortition..."); + let path = format!("{}/latest_and_last", self.sortition_info_path()); let timer = crate::monitoring::new_rpc_call_timer(&path, &self.http_origin); let send_request = || { self.stacks_node_client.get(&path).send().map_err(|e| { @@ -547,29 +556,29 @@ impl StacksClient { if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); } - let sortition_info = response.json()?; - Ok(sortition_info) - } - - /// Get the sortition information for a given sortition - pub fn get_sortition(&self, ch: &ConsensusHash) -> Result { - debug!("stacks_node_client: Getting sortition with consensus hash {ch}..."); - let path = format!("{}/consensus/{}", self.sortition_info_path(), ch.to_hex()); - let timer_label = format!("{}/consensus/:consensus_hash", self.sortition_info_path()); - let timer = crate::monitoring::new_rpc_call_timer(&timer_label, &self.http_origin); - let send_request = || { - self.stacks_node_client.get(&path).send().map_err(|e| { - warn!("Signer failed to request sortition"; "consensus_hash" => %ch, "err" => ?e); - e - }) + let mut info_list: VecDeque = response.json()?; + let Some(current_sortition) = info_list.pop_front() else { + return Err(ClientError::UnexpectedResponseFormat( + "Empty SortitionInfo returned".into(), + )); }; - let response = send_request()?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); + if !current_sortition.was_sortition { + return Err(ClientError::UnexpectedResponseFormat( + "'Current' SortitionInfo returned which was not a winning sortition".into(), + )); } - let sortition_info = response.json()?; - Ok(sortition_info) + let last_sortition = if current_sortition.last_sortition_ch.is_some() { + let Some(last_sortition) = info_list.pop_back() else { + return Err(ClientError::UnexpectedResponseFormat("'Current' SortitionInfo has `last_sortition_ch` field, but corresponding data not returned".into())); + }; + Some(last_sortition) + } else { + None + }; + Ok(CurrentAndLastSortition { + current_sortition, + last_sortition, + }) } /// Get the current peer info data from the stacks node diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index f79bcadc3f..a390c27edc 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -82,7 +82,6 @@ fn setup_test_environment( }); let view = SortitionsView { - latest_consensus_hash: cur_sortition.consensus_hash, cur_sortition, last_sortition, config: ProposalEvalConfig { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 756212ee54..24f92ad02b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -734,6 +734,7 @@ impl NakamotoBlockHeader { write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; write_next(fd, &self.timestamp)?; + write_next(fd, &self.pox_treatment)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -1876,7 +1877,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; let connected_sort_id = get_ancestor_sort_id(&handle, parent_burn_view_sn.block_height, &handle.context.chain_tip)? @@ -1888,7 +1889,7 @@ impl NakamotoChainState { "stacks_block_id" => %next_ready_block.header.block_id(), "parent_block_id" => %next_ready_block.header.parent_block_id ); - ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) })?; if connected_sort_id != parent_burn_view_sn.sortition_id { warn!( diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 5e0557ca26..7b594530c2 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -28,6 +28,7 @@ use stacks_common::util::HexError; use {serde, serde_json}; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; @@ -51,10 +52,13 @@ pub enum QuerySpecifier { BurnchainHeaderHash(BurnchainHeaderHash), BlockHeight(u64), Latest, + /// Fetch the latest sortition *which was a winning sortition* and that sortition's + /// last sortition, returning two SortitionInfo structs. + LatestAndLast, } pub static RPC_SORTITION_INFO_PATH: &str = "/v3/sortitions"; -static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})/(?P[0-9a-f]{1,64}))?$"; +static PATH_REGEX: &str = "^/v3/sortitions(/(?P[a-z_]{1,15})(/(?P[0-9a-f]{1,64}))?)?$"; /// Struct for sortition information returned via the GetSortition API call #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -112,6 +116,7 @@ impl TryFrom<(&str, &str)> for QuerySpecifier { value.1 }; match value.0 { + "latest_and_last" => Ok(Self::LatestAndLast), "consensus" => Ok(Self::ConsensusHash( ConsensusHash::from_hex(hex_str).map_err(|e| Error::DecodeError(e.to_string()))?, )), @@ -141,6 +146,74 @@ impl GetSortitionHandler { query: QuerySpecifier::Latest, } } + + fn get_sortition_info( + sortition_sn: BlockSnapshot, + sortdb: &SortitionDB, + ) -> Result { + let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = + if !sortition_sn.sortition { + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let last_sortition = + handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; + (None, None, None, Some(last_sortition.consensus_hash)) + } else { + let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? + .ok_or_else(|| { + error!( + "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + let handle = sortdb.index_handle(&sortition_sn.sortition_id); + let stacks_parent_sn = handle + .get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? + .ok_or_else(|| { + warn!( + "Failed to load the snapshot of the winning block commits parent"; + "sortition_id" => %sortition_sn.sortition_id, + "txid" => %sortition_sn.winning_block_txid, + ); + ChainError::NoSuchBlockError + })?; + + // try to figure out what the last snapshot in this fork was with a successful + // sortition. + // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` + let last_sortition_ch = if stacks_parent_sn.sortition { + stacks_parent_sn.consensus_hash.clone() + } else { + // we actually need to perform the marf lookup + let last_sortition = handle.get_last_snapshot_with_sortition( + sortition_sn.block_height.saturating_sub(1), + )?; + last_sortition.consensus_hash + }; + + ( + sortition_sn.miner_pk_hash.clone(), + Some(stacks_parent_sn.consensus_hash), + Some(block_commit.block_header_hash), + Some(last_sortition_ch), + ) + }; + + Ok(SortitionInfo { + burn_block_hash: sortition_sn.burn_header_hash, + burn_block_height: sortition_sn.block_height, + burn_header_timestamp: sortition_sn.burn_header_timestamp, + sortition_id: sortition_sn.sortition_id, + parent_sortition_id: sortition_sn.parent_sortition_id, + consensus_hash: sortition_sn.consensus_hash, + was_sortition: sortition_sn.sortition, + miner_pk_hash160, + stacks_parent_ch, + last_sortition_ch, + committed_block_hash, + }) + } } /// Decode the HTTP request impl HttpRequest for GetSortitionHandler { @@ -169,9 +242,15 @@ impl HttpRequest for GetSortitionHandler { let req_contents = HttpRequestContents::new().query_string(query); self.query = QuerySpecifier::Latest; - if let (Some(key), Some(value)) = (captures.name("key"), captures.name("value")) { - self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; - } + match (captures.name("key"), captures.name("value")) { + (Some(key), None) => { + self.query = QuerySpecifier::try_from((key.as_str(), ""))?; + } + (Some(key), Some(value)) => { + self.query = QuerySpecifier::try_from((key.as_str(), value.as_str()))?; + } + _ => {} + }; Ok(req_contents) } @@ -194,81 +273,37 @@ impl RPCRequestHandler for GetSortitionHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let result = - node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { - let query_result = match self.query { - QuerySpecifier::Latest => { + let result = node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { + let query_result = match self.query { + QuerySpecifier::Latest => Ok(Some(network.burnchain_tip.clone())), + QuerySpecifier::ConsensusHash(ref consensus_hash) => { + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) + } + QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot(burn_hash) + } + QuerySpecifier::BlockHeight(burn_height) => { + let handle = sortdb.index_handle_at_tip(); + handle.get_block_snapshot_by_height(burn_height) + } + QuerySpecifier::LatestAndLast => { + if network.burnchain_tip.sortition { + // optimization: if the burn chain tip had a sortition, just return that Ok(Some(network.burnchain_tip.clone())) - }, - QuerySpecifier::ConsensusHash(ref consensus_hash) => { - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), consensus_hash) - }, - QuerySpecifier::BurnchainHeaderHash(ref burn_hash) => { - let handle = sortdb.index_handle_at_tip(); - handle.get_block_snapshot(burn_hash) - }, - QuerySpecifier::BlockHeight(burn_height) => { - let handle = sortdb.index_handle_at_tip(); - handle.get_block_snapshot_by_height(burn_height) - }, - }; - let sortition_sn = query_result? - .ok_or_else(|| ChainError::NoSuchBlockError)?; - - let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = if !sortition_sn.sortition { - let handle = sortdb.index_handle(&sortition_sn.sortition_id); - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; - (None, None, None, Some(last_sortition.consensus_hash)) - } else { - let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? - .ok_or_else(|| { - error!( - "Failed to load block commit from Sortition DB for snapshot with a winning block txid"; - "sortition_id" => %sortition_sn.sortition_id, - "txid" => %sortition_sn.winning_block_txid, - ); - ChainError::NoSuchBlockError - })?; - let handle = sortdb.index_handle(&sortition_sn.sortition_id); - let stacks_parent_sn = handle.get_block_snapshot_by_height(block_commit.parent_block_ptr.into())? - .ok_or_else(|| { - warn!( - "Failed to load the snapshot of the winning block commits parent"; - "sortition_id" => %sortition_sn.sortition_id, - "txid" => %sortition_sn.winning_block_txid, - ); - ChainError::NoSuchBlockError - })?; - - // try to figure out what the last snapshot in this fork was with a successful - // sortition. - // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` - let last_sortition_ch = if sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1 { - stacks_parent_sn.consensus_hash.clone() } else { - // we actually need to perform the marf lookup - let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height.saturating_sub(1))?; - last_sortition.consensus_hash - }; - - (sortition_sn.miner_pk_hash.clone(), Some(stacks_parent_sn.consensus_hash), Some(block_commit.block_header_hash), - Some(last_sortition_ch)) - }; - - Ok(SortitionInfo { - burn_block_hash: sortition_sn.burn_header_hash, - burn_block_height: sortition_sn.block_height, - burn_header_timestamp: sortition_sn.burn_header_timestamp, - sortition_id: sortition_sn.sortition_id, - parent_sortition_id: sortition_sn.parent_sortition_id, - consensus_hash: sortition_sn.consensus_hash, - was_sortition: sortition_sn.sortition, - miner_pk_hash160, - stacks_parent_ch, - last_sortition_ch, - committed_block_hash, - }) - }); + // we actually need to perform a marf lookup to find that last snapshot + // with a sortition + let handle = sortdb.index_handle_at_tip(); + let last_sortition = handle + .get_last_snapshot_with_sortition(network.burnchain_tip.block_height)?; + Ok(Some(last_sortition)) + } + } + }; + let sortition_sn = query_result?.ok_or_else(|| ChainError::NoSuchBlockError)?; + Self::get_sortition_info(sortition_sn, sortdb) + }); let block = match result { Ok(block) => block, @@ -290,8 +325,44 @@ impl RPCRequestHandler for GetSortitionHandler { } }; + let last_sortition_ch = block.last_sortition_ch.clone(); + let mut info_list = vec![block]; + if self.query == QuerySpecifier::LatestAndLast { + // if latest **and** last are requested, lookup the sortition info for last_sortition_ch + if let Some(last_sortition_ch) = last_sortition_ch { + let result = node.with_node_state(|_, sortdb, _, _, _| { + let last_sortition_sn = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &last_sortition_ch, + )? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + Self::get_sortition_info(last_sortition_sn, sortdb) + }); + let last_block = match result { + Ok(block) => block, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("Could not find snapshot for the `last_sortition_ch`({last_sortition_ch})\n")), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load snapshot for `last_sortition_ch`({last_sortition_ch}): {:?}\n", &e); + warn!("{msg}"); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + info_list.push(last_block); + } + } + let preamble = HttpResponsePreamble::ok_json(&preamble); - let result = HttpResponseContents::try_from_json(&block)?; + let result = HttpResponseContents::try_from_json(&info_list)?; Ok((preamble, result)) } } @@ -302,7 +373,7 @@ impl HttpResponse for GetSortitionHandler { preamble: &HttpResponsePreamble, body: &[u8], ) -> Result { - let sortition_info: SortitionInfo = parse_json(preamble, body)?; + let sortition_info: Vec = parse_json(preamble, body)?; Ok(HttpResponsePayload::try_from_json(sortition_info)?) } } diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index 8541b73eb6..e112fde4a0 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -19,10 +19,14 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash}; use stacks_common::types::net::PeerHost; -use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier}; +use crate::net::api::getsortition::{GetSortitionHandler, QuerySpecifier, SortitionInfo}; +use crate::net::api::tests::test_rpc; use crate::net::connection::ConnectionOptions; -use crate::net::http::{Error as HttpError, HttpRequestPreamble, HttpVersion}; -use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble}; +use crate::net::http::{ + Error as HttpError, HttpRequestContents, HttpRequestPreamble, HttpResponse, + HttpResponsePayload, HttpVersion, +}; +use crate::net::httpcore::{RPCRequestHandler, StacksHttp, StacksHttpPreamble, StacksHttpRequest}; use crate::net::Error as NetError; fn make_preamble(query: &str) -> HttpRequestPreamble { @@ -99,3 +103,65 @@ fn test_parse_request() { } } } + +#[test] +fn response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 33333); + + let request = StacksHttpRequest::new_for_peer( + addr.into(), + "GET".into(), + "/v3/sortitions".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + let mut responses = test_rpc(function_name!(), vec![request]); + let HttpResponsePayload::JSON(response) = + responses.pop().unwrap().get_http_payload_ok().unwrap() + else { + panic!("Expected JSON response"); + }; + + info!("Response:\n{:#?}\n", response); + + let info_array = response.as_array().expect("Response should be array"); + assert_eq!( + info_array.len(), + 1, + "/v3/sortitions should return a single entry" + ); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 33333); + let request = StacksHttpRequest::new_for_peer( + addr.into(), + "GET".into(), + "/v3/sortitions/latest_and_last".into(), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data"); + let mut responses = test_rpc(function_name!(), vec![request]); + let HttpResponsePayload::JSON(response) = + responses.pop().unwrap().get_http_payload_ok().unwrap() + else { + panic!("Expected JSON response"); + }; + + info!("Response:\n{:#?}\n", response); + + let info_array = response.as_array().expect("Response should be array"); + assert_eq!( + info_array.len(), + 2, + "/v3/sortitions/latest_and_last should return 2 entries" + ); + let first_entry: SortitionInfo = serde_json::from_value(info_array[0].clone()) + .expect("Response array elements should parse to SortitionInfo"); + let second_entry: SortitionInfo = serde_json::from_value(info_array[1].clone()) + .expect("Response array elements should parse to SortitionInfo"); + assert!(first_entry.was_sortition); + assert!(second_entry.was_sortition); + assert_eq!( + first_entry.last_sortition_ch.as_ref().unwrap(), + &second_entry.consensus_hash, + ); +} diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 98f504cf99..994636c97c 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -2831,7 +2831,7 @@ impl BitcoinRPCRequest { Ok(()) } - fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { + pub fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 29a64cfb27..1ac2618a53 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -886,11 +886,6 @@ impl SignCoordinator { ); continue; } - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } if Self::fault_injection_ignore_signatures() { warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; @@ -906,6 +901,12 @@ impl SignCoordinator { continue; } + if !gathered_signatures.contains_key(&slot_id) { + total_weight_signed = total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + info!("SignCoordinator: Signature Added to block"; "block_signer_sighash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), @@ -986,7 +987,6 @@ impl SignCoordinator { } }; } - // After gathering all signatures, return them if we've hit the threshold if total_weight_signed >= self.weight_threshold { info!("SignCoordinator: Received enough signatures. Continuing."; diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 6619152f9f..621f92aa47 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -11,6 +11,7 @@ use stacks::core::StacksEpochId; use stacks_common::util::hash::hex_bytes; use super::PUBLISH_CONTRACT; +use crate::burnchains::bitcoin_regtest_controller::BitcoinRPCRequest; use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::to_addr; @@ -19,12 +20,14 @@ use crate::Config; #[derive(Debug)] pub enum BitcoinCoreError { SpawnFailed(String), + StopFailed(String), } impl std::fmt::Display for BitcoinCoreError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), + Self::StopFailed(msg) => write!(f, "bitcoind stop failed: {msg}"), } } } @@ -109,25 +112,25 @@ impl BitcoinCoreController { pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { if let Some(_) = self.bitcoind_process.take() { - let mut command = Command::new("bitcoin-cli"); - command.stdout(Stdio::piped()).arg("-rpcconnect=127.0.0.1"); - - self.add_rpc_cli_args(&mut command); - - command.arg("stop"); - - let mut process = match command.spawn() { - Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), + let payload = BitcoinRPCRequest { + method: "stop".to_string(), + params: vec![], + id: "stacks".to_string(), + jsonrpc: "2.0".to_string(), }; - let mut out_reader = BufReader::new(process.stdout.take().unwrap()); - let mut line = String::new(); - while let Ok(bytes_read) = out_reader.read_line(&mut line) { - if bytes_read == 0 { - break; + let res = BitcoinRPCRequest::send(&self.config, payload) + .map_err(|e| BitcoinCoreError::StopFailed(format!("{e:?}")))?; + + if let Some(err) = res.get("error") { + if !err.is_null() { + return Err(BitcoinCoreError::StopFailed(format!("{err}"))); } - eprintln!("{line}"); + } else { + return Err(BitcoinCoreError::StopFailed(format!( + "Invalid response: {:?}", + res + ))); } } Ok(()) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4cd5ceb997..314200e748 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -930,6 +930,161 @@ pub fn boot_to_epoch_3( info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); } +/// Boot the chain to just before the Epoch 3.0 boundary to allow for flash blocks +/// This function is similar to `boot_to_epoch_3`, but it stops at epoch 3 start height - 2, +/// allowing for flash blocks to occur when the epoch changes. +/// +/// * `stacker_sks` - private keys for sending large `stack-stx` transactions to activate pox-4 +/// * `signer_sks` - corresponding signer keys for the stackers +pub fn boot_to_pre_epoch_3_boundary( + naka_conf: &Config, + blocks_processed: &Arc, + stacker_sks: &[StacksPrivateKey], + signer_sks: &[StacksPrivateKey], + self_signing: &mut Option<&mut TestSigners>, + btc_regtest_controller: &mut BitcoinRegtestController, +) { + assert_eq!(stacker_sks.len(), signer_sks.len()); + + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let current_height = btc_regtest_controller.get_headers_height(); + info!( + "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; + "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), + ); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, &blocks_processed); + // first mined stacks block + next_block_and_wait(btc_regtest_controller, &blocks_processed); + + let start_time = Instant::now(); + loop { + if start_time.elapsed() > Duration::from_secs(20) { + panic!("Timed out waiting for the stacks height to increment") + } + let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + if stacks_height >= 1 { + break; + } + thread::sleep(Duration::from_millis(100)); + } + // stack enough to activate pox-4 + + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + + for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(&stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &signer_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 12_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = StacksPublicKey::from_private(signer_sk); + + let stacking_tx = tests::make_contract_call( + &stacker_sk, + 0, + 1000, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(block_height as u128), + clarity::vm::Value::UInt(12), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + + // Update TestSigner with `signer_sks` if self-signing + if let Some(ref mut signers) = self_signing { + signers.signer_keys = signer_sks.to_vec(); + } + + let prepare_phase_start = btc_regtest_controller + .get_burnchain() + .pox_constants + .prepare_phase_start( + btc_regtest_controller.get_burnchain().first_block_height, + reward_cycle, + ); + + // Run until the prepare phase + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + prepare_phase_start, + &naka_conf, + ); + + // We need to vote on the aggregate public key if this test is self signing + if let Some(signers) = self_signing { + // Get the aggregate key + let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); + let aggregate_public_key = + clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) + .expect("Failed to serialize aggregate public key"); + let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); + let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); + // Vote on the aggregate public key + for signer_sk in signer_sks_unique.values() { + let signer_index = + get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) + .unwrap(); + let voting_tx = tests::make_contract_call( + signer_sk, + 0, + 300, + &StacksAddress::burn_address(false), + SIGNERS_VOTING_NAME, + SIGNERS_VOTING_FUNCTION_NAME, + &[ + clarity::vm::Value::UInt(u128::try_from(signer_index).unwrap()), + aggregate_public_key.clone(), + clarity::vm::Value::UInt(0), + clarity::vm::Value::UInt(reward_cycle as u128 + 1), + ], + ); + submit_tx(&http_origin, &voting_tx); + } + } + + run_until_burnchain_height( + btc_regtest_controller, + &blocks_processed, + epoch_3.start_height - 2, + &naka_conf, + ); + + info!("Bootstrapped to one block before Epoch 3.0 boundary, Epoch 2.x miner should continue for one more block"); +} + fn get_signer_index( stacker_set: &GetStackersResponse, signer_key: &Secp256k1PublicKey, @@ -1521,6 +1676,287 @@ fn simple_neon_integration() { run_loop_thread.join().unwrap(); } +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, +/// having flash blocks when epoch updates and expects everything to work normally, +/// then switches to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test makes three assertions: +/// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles +/// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 +/// * The final chain tip is a nakamoto block +fn simple_neon_integration_with_flash_blocks_on_epoch_3() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + send_amt * 2 + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_pre_epoch_3_boundary( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let block_height_before_mining = tip.block_height; + + // Mine 3 Bitcoin blocks rapidly without waiting for Stacks blocks to be processed. + // These blocks won't be considered "mined" until the next_block_and_wait call. + for _i in 0..3 { + btc_regtest_controller.build_next_block(1); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Verify that the canonical burn chain tip hasn't advanced yet + assert_eq!( + tip.block_height, + btc_regtest_controller.get_headers_height() - 1 + ); + assert_eq!(tip.block_height, block_height_before_mining); + } + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + // Mine a new block and wait for it to be processed. + // This should update the canonical burn chain tip to include all 4 new blocks. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // Verify that the burn chain tip has advanced by 4 blocks + assert_eq!( + tip.block_height, + block_height_before_mining + 4, + "Burn chain tip should have advanced by 4 blocks" + ); + + assert_eq!( + tip.block_height, + btc_regtest_controller.get_headers_height() - 1 + ); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine 15 nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // Submit a TX + let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + let mut mempool = naka_conf + .connect_mempool_db() + .expect("Database failure opening mempool"); + + mempool + .submit_raw( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + transfer_tx.clone(), + &ExecutionCost::max_value(), + &StacksEpochId::Epoch30, + ) + .unwrap(); + + // Mine 15 more nakamoto tenures + for _i in 0..15 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + signer_vote_if_needed( + &btc_regtest_controller, + &naka_conf, + &[sender_signer_sk], + &signers, + ); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + // assert that the transfer tx was observed + let transfer_tx_included = test_observer::get_blocks() + .into_iter() + .find(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }) + .is_some(); + + assert!( + transfer_tx_included, + "Nakamoto node failed to include the transfer tx" + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 30); + + // Check that we have the expected burn blocks + // We expect to have around the blocks 220-230 and 234 onwards, with a gap of 3 blocks for the flash blocks + let bhh = u64::from(tip.burn_header_height); + + // Get the Epoch 3.0 activation height (in terms of Bitcoin block height) + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start_height = epoch_3.start_height; + + // Find the gap in burn blocks + let mut gap_start = 0; + let mut gap_end = 0; + for i in 220..=bhh { + if test_observer::contains_burn_block_range(i..=i).is_err() { + if gap_start == 0 { + gap_start = i; + } + gap_end = i; + } else if gap_start != 0 { + break; + } + } + + // Verify that there's a gap of exactly 3 blocks + assert_eq!( + gap_end - gap_start + 1, + 3, + "Expected a gap of exactly 3 burn blocks due to flash blocks, found gap from {} to {}", + gap_start, + gap_end + ); + + // Verify that the gap includes the Epoch 3.0 activation height + assert!( + gap_start <= epoch_3_start_height && epoch_3_start_height <= gap_end, + "Expected the gap ({}..={}) to include the Epoch 3.0 activation height ({})", + gap_start, + gap_end, + epoch_3_start_height + ); + + // Verify blocks before and after the gap + test_observer::contains_burn_block_range(220..=(gap_start - 1)).unwrap(); + test_observer::contains_burn_block_range((gap_end + 1)..=bhh).unwrap(); + + info!("Verified burn block ranges, including expected gap for flash blocks"); + info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. @@ -5517,7 +5953,7 @@ fn signer_chainstate() { let time_start = Instant::now(); let proposal = loop { let proposal = get_latest_block_proposal(&naka_conf, &sortdb).unwrap(); - if proposal.0.header.consensus_hash == sortitions_view.latest_consensus_hash { + if proposal.0.header.consensus_hash == sortitions_view.cur_sortition.consensus_hash { break proposal; } if time_start.elapsed() > Duration::from_secs(20) { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d5098915ef..3dd299c861 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12398,6 +12398,10 @@ fn bitcoin_reorg_flap() { channel.stop_chains_coordinator(); } +/// Advance the bitcoin chain and wait for the miner and any followers to +/// process the next block. +/// NOTE: This only works if the followers are mock-mining, or else the counter +/// will not be updated. fn next_block_and_wait_all( btc_controller: &mut BitcoinRegtestController, miner_blocks_processed: &Arc, @@ -12447,7 +12451,7 @@ fn bitcoin_reorg_flap_with_follower() { } let (conf, _miner_account) = neon_integration_test_conf(); - let timeout = None; + let timeout = Some(Duration::from_secs(60)); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -12461,10 +12465,12 @@ fn bitcoin_reorg_flap_with_follower() { eprintln!("Chain bootstrapped..."); let mut miner_run_loop = neon::RunLoop::new(conf.clone()); + let run_loop_stopper = miner_run_loop.get_termination_switch(); let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc(); let miner_channel = miner_run_loop.get_coordinator_channel().unwrap(); let mut follower_conf = conf.clone(); + follower_conf.node.mock_mining = true; follower_conf.events_observers.clear(); follower_conf.node.working_dir = format!("{}-follower", &conf.node.working_dir); follower_conf.node.seed = vec![0x01; 32]; @@ -12483,7 +12489,7 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); - thread::spawn(move || miner_run_loop.start(None, 0)); + let run_loop_thread = thread::spawn(move || miner_run_loop.start(None, 0)); wait_for_runloop(&miner_blocks_processed); // figure out the started node's port @@ -12499,23 +12505,20 @@ fn bitcoin_reorg_flap_with_follower() { ); let mut follower_run_loop = neon::RunLoop::new(follower_conf.clone()); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc(); let follower_channel = follower_run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || follower_run_loop.start(None, 0)); + let follower_thread = thread::spawn(move || follower_run_loop.start(None, 0)); wait_for_runloop(&follower_blocks_processed); eprintln!("Follower bootup complete!"); // first block wakes up the run loop - next_block_and_wait_all( - &mut btc_regtest_controller, - &miner_blocks_processed, - &[], - timeout, - ); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &miner_blocks_processed, 60); - // first block will hold our VRF registration + // next block will hold our VRF registration + // Note that the follower will not see its block processed counter bumped here next_block_and_wait_all( &mut btc_regtest_controller, &miner_blocks_processed, @@ -12609,9 +12612,11 @@ fn bitcoin_reorg_flap_with_follower() { assert_eq!(miner_channel.get_sortitions_processed(), 225); assert_eq!(follower_channel.get_sortitions_processed(), 225); - btcd_controller.stop_bitcoind().unwrap(); - miner_channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); } /// Tests the following: @@ -12841,3 +12846,44 @@ fn listunspent_max_utxos() { let utxos = res.expect("Failed to get utxos"); assert_eq!(utxos.num_utxos(), 10); } + +#[test] +#[ignore] +/// Test out stopping bitcoind and restarting it +fn start_stop_bitcoind() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = neon_integration_test_conf(); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + conf.node.prometheus_bind = Some(prom_bind.clone()); + + conf.burnchain.max_rbf = 1000000; + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); + + thread::sleep(Duration::from_secs(5)); + + btcd_controller + .start_bitcoind() + .expect("Failed to start bitcoind"); + + btcd_controller + .stop_bitcoind() + .expect("Failed to stop bitcoind"); +} diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 0b38a79234..a48677e0f6 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -37,6 +37,7 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; +use libsigner::v0::messages::{BlockResponse, RejectCode, SignerMessage}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -46,7 +47,9 @@ use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; -use stacks::types::chainstate::StacksAddress; +use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; +use stacks::types::PublicKey; +use stacks::util::hash::MerkleHashFunc; use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -678,6 +681,76 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { + // Make sure that ALL signers accepted the block proposal + wait_for(timeout_secs, || { + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(( + hash, + signature, + ))) => { + if hash == *signer_signature_hash + && expected_signers.iter().any(|pk| { + pk.verify(hash.bits(), &signature) + .expect("Failed to verify signature") + }) + { + Some(signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == expected_signers.len()) + }) + } + + pub fn wait_for_block_rejections( + &self, + timeout_secs: u64, + expected_signers: &[StacksPublicKey], + ) -> Result<(), String> { + wait_for(timeout_secs, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + let rejected_pubkey = rejection + .recover_public_key() + .expect("Failed to recover public key from rejection"); + if expected_signers.contains(&rejected_pubkey) { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(block_rejections.len() == expected_signers.len()) + }) + } } fn setup_stx_btc_node ()>( @@ -748,9 +821,22 @@ fn setup_stx_btc_node ()>( info!("Make new BitcoinRegtestController"); let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - info!("Bootstraping..."); - // Should be 201 for other tests? - btc_regtest_controller.bootstrap_chain_to_pks(195, btc_miner_pubkeys); + let epoch_2_5_start = usize::try_from( + naka_conf + .burnchain + .epochs + .as_ref() + .unwrap() + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height, + ) + .expect("Failed to get epoch 2.5 start height"); + let bootstrap_block = epoch_2_5_start - 6; + + info!("Bootstraping to block {bootstrap_block}..."); + btc_regtest_controller.bootstrap_chain_to_pks(bootstrap_block, btc_miner_pubkeys); info!("Chain bootstrapped..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2c5c8484ca..9f9f8d1a41 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2209,26 +2209,39 @@ fn signers_broadcast_signed_blocks() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); - sleep_ms(10_000); - + let info_before = get_chain_info(&signer_test.running_nodes.conf); + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30)); - sleep_ms(10_000); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + wait_for(30, || { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + let info = get_chain_info(&signer_test.running_nodes.conf); + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + ); + Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for first nakamoto block to be mined"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - let signer_pushed_before = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - // submit a tx so that the miner will mine a block + // submit a tx so that the miner will mine a blockn let sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); @@ -2236,26 +2249,16 @@ fn signers_broadcast_signed_blocks() { debug!("Transaction sent; waiting for block-mining"); - let start = Instant::now(); - let duration = 60; - loop { - let blocks_mined = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); + wait_for(30, || { let signer_pushed = signer_test .running_nodes .nakamoto_blocks_signer_pushed .load(Ordering::SeqCst); - + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); - if blocks_mined > blocks_before - && signer_pushed > signer_pushed_before - && info.stacks_tip_height > info_before.stacks_tip_height - { - break; - } - debug!( "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", blocks_mined, @@ -2265,12 +2268,11 @@ fn signers_broadcast_signed_blocks() { info.stacks_tip_height, info_before.stacks_tip_height ); - - std::thread::sleep(Duration::from_millis(100)); - if start.elapsed() >= Duration::from_secs(duration) { - panic!("Timed out"); - } - } + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height + && signer_pushed > signer_pushed_before) + }) + .expect("Timed out waiting for second nakamoto block to be mined"); signer_test.shutdown(); } @@ -3547,7 +3549,7 @@ fn partial_tenure_fork() { } let num_signers = 5; - let max_nakamoto_tenures = 20; + let max_nakamoto_tenures = 30; let inter_blocks_per_tenure = 5; // setup sender + recipient for a test stx transfer @@ -3590,6 +3592,22 @@ fn partial_tenure_fork() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + // Increase the reward cycle length to avoid missing a prepare phase + // while we are intentionally forking. + config.burnchain.pox_reward_length = Some(40); + config.burnchain.pox_prepare_length = Some(10); + + // Move epoch 2.5 and 3.0 earlier, so we have more time for the + // test before re-stacking is required. + if let Some(epochs) = config.burnchain.epochs.as_mut() { + epochs[6].end_height = 131; + epochs[7].start_height = 131; + epochs[7].end_height = 166; + epochs[8].start_height = 166; + } else { + panic!("Expected epochs to be set"); + } }, Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), None, @@ -3663,8 +3681,8 @@ fn partial_tenure_fork() { let mut min_miner_2_tenures = u64::MAX; let mut ignore_block = 0; - while !(miner_1_tenures >= min_miner_1_tenures && miner_2_tenures >= min_miner_2_tenures) { - if btc_blocks_mined > max_nakamoto_tenures { + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3820,7 +3838,7 @@ fn partial_tenure_fork() { Err(e) => { if e.to_string().contains("TooMuchChaining") { info!("TooMuchChaining error, skipping block"); - continue; + break; } else { panic!("Failed to submit tx: {}", e); } @@ -3934,33 +3952,41 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let send_fee = 180; let nmb_txs = 2; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let short_timeout_secs = 20; let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(20); signer_test.boot_to_epoch_3(); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); - let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout_secs, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N to be mined"); sender_nonce += 1; let info_after = signer_test.stacks_client.get_peer_info().unwrap(); assert_eq!( @@ -3970,15 +3996,17 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + signer_test + .wait_for_block_acceptance( + short_timeout_secs, + &block_n.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected - let rejecting_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .take(num_signers / 2) - .collect(); + let rejecting_signers: Vec<_> = all_signers.iter().cloned().take(num_signers / 2).collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() @@ -3988,42 +4016,13 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} to mine block N+1"); - let start_time = Instant::now(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - loop { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - if rejecting_signers.contains(&rejected_pubkey) - && rejection.reason_code == RejectCode::TestingDirective - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + // We cannot gaurantee that ALL signers will reject due to the testing directive as we may hit majority first..So ensure that we only assert that up to the threshold number rejected + signer_test + .wait_for_block_rejections(short_timeout_secs, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1"); assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); let info_after = signer_test.stacks_client.get_peer_info().unwrap(); @@ -4039,13 +4038,17 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(Vec::new()); - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + wait_for(short_timeout_secs, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height + && test_observer::get_mined_nakamoto_blocks().last().unwrap() != block_n_1) + }) + .expect("Timed out waiting for stacks block N+1' to be mined"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4055,14 +4058,6 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info_before.stacks_tip_height + 1 ); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' - let start_time = Instant::now(); - while test_observer::get_mined_nakamoto_blocks().last().unwrap() == block_n_1 { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1_prime = nakamoto_blocks.last().unwrap(); assert_eq!( @@ -4070,6 +4065,14 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n_1); + // Verify that all signers accepted the new block proposal + signer_test + .wait_for_block_acceptance( + short_timeout_secs, + &block_n_1_prime.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] @@ -4104,37 +4107,48 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let send_amt = 100; let send_fee = 180; let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let long_timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block + + // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { - let info_after = signer_test + + wait_for(short_timeout, || { + Ok(signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for N to be mined and processed"); - sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() @@ -4143,27 +4157,22 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); - // Ensure that the block was accepted globally so the stacks tip has not advanced to N + // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); - // Make less than 30% of the signers reject the block to ensure it is marked globally accepted - let rejecting_signers: Vec<_> = signer_test - .signer_stacks_private_keys + // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted + let rejecting_signers: Vec<_> = all_signers .iter() - .map(StacksPublicKey::from_private) + .cloned() .take(num_signers * 3 / 10) .collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4171,58 +4180,34 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N+1"); - let start_time = Instant::now(); + + // submit a tx so that the miner will mine a stacks block N+1 let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); - loop { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - let rejected_pubkey = rejection - .recover_public_key() - .expect("Failed to recover public key from rejection"); - if rejecting_signers.contains(&rejected_pubkey) - && rejection.reason_code == RejectCode::TestingDirective - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - if block_rejections.len() == rejecting_signers.len() { - break; - } - assert!( - start_time.elapsed() < long_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + .expect("Timed out waiting for stacks block N+1 to be mined"); + + signer_test + .wait_for_block_rejections(short_timeout, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1"); + // Assert the block was mined let info_after = signer_test .stacks_client @@ -4233,23 +4218,23 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers - rejecting_signers.len()); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1 + // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); assert_ne!(block_n_1, block_n); + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_1.signer_signature_hash, + &all_signers[num_signers * 3 / 10 + 1..], + ) + .expect("Timed out waiting for block acceptance of N+1"); + info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); + // Ensure that all signers accept the block proposal N+2 let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let blocks_before = mined_blocks.load(Ordering::SeqCst); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -4257,18 +4242,21 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .unwrap() .replace(Vec::new()); + // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); - wait_for(short_timeout.as_secs(), || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) }) - .expect("Timed out waiting for block to be mined and processed"); + .expect("Timed out waiting for stacks block N+2 to be mined"); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before + 1); @@ -4277,20 +4265,20 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height, ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_2 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); assert_ne!(block_n_2, block_n_1); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_2.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+2"); } #[test] @@ -4330,8 +4318,13 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { num_signers, vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); @@ -4340,13 +4333,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4355,27 +4350,24 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }) .expect("Timed out waiting for block to be mined and processed"); - sender_nonce += 1; + // Ensure that the block was accepted globally so the stacks tip has advanced to N let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected - let ignoring_signers: Vec<_> = signer_test - .signer_stacks_private_keys + let ignoring_signers: Vec<_> = all_signers .iter() - .map(StacksPublicKey::from_private) + .cloned() .take(num_signers * 7 / 10) .collect(); TEST_IGNORE_ALL_BLOCK_PROPOSALS @@ -4384,16 +4376,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .replace(ignoring_signers.clone()); // Clear the stackerdb chunks test_observer::clear(); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to attempt to mine block N+1"); let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - wait_for(short_timeout.as_secs(), || { + wait_for(short_timeout, || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4413,6 +4408,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); let info_after = signer_test .stacks_client @@ -4420,13 +4416,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to get peer info"); assert_eq!(blocks_after, blocks_before); assert_eq!(info_after, info_before); - // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_ne!(block_n_1, block_n); assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -4438,23 +4435,19 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }, ) .unwrap(); + info!( "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" ); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout.as_secs(), || { + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + wait_for(short_timeout, || { let info_after = signer_test .stacks_client .get_peer_info() @@ -4471,15 +4464,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert_eq!(nmb_signatures, num_signers); // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -4489,6 +4473,11 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { block_n_1_prime.block_hash ); assert_ne!(block_n_1_prime, block_n); + + // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure + signer_test + .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] @@ -4736,7 +4725,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); - info!("Allowing singers to broadcast block N+1 to the miner"); + info!("Allowing signers to broadcast block N+1 to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); // Assert the N+1' block was rejected From b734407d50cbbad6f41a128a2d0ae766cb6dac6a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 23 Sep 2024 13:26:26 -0700 Subject: [PATCH 1199/1400] CRC: add response testing and fix corresponding bug Signed-off-by: Jacinta Ferrant --- stackslib/src/net/api/getsigner.rs | 4 +- stackslib/src/net/api/tests/getsigner.rs | 53 +++++++++++++++++++ .../src/tests/nakamoto_integrations.rs | 9 ++-- 3 files changed, 60 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/api/getsigner.rs b/stackslib/src/net/api/getsigner.rs index 90bcc796bf..a09c051b24 100644 --- a/stackslib/src/net/api/getsigner.rs +++ b/stackslib/src/net/api/getsigner.rs @@ -151,7 +151,7 @@ impl RPCRequestHandler for GetSignerRequestHandler { ) }); - let response = match result { + let blocks_signed = match result { Ok(response) => response, Err(error) => { return StacksHttpResponse::new_error( @@ -163,6 +163,8 @@ impl RPCRequestHandler for GetSignerRequestHandler { } }; + let response = GetSignerResponse { blocks_signed }; + let mut preamble = HttpResponsePreamble::ok_json(&preamble); preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); let body = HttpResponseContents::try_from_json(&response)?; diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs index 92e30057d7..ffaa486f27 100644 --- a/stackslib/src/net/api/tests/getsigner.rs +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -94,3 +94,56 @@ fn test_try_parse_request() { } } } + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_observer = TestEventObserver::new(); + let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + // Copy pasta of the test setup values + let cycle_num = 5; + let public_key = StacksPublicKey::from_hex( + "0243311589af63c2adda04fcd7792c038a05c12a4fe40351b3eb1612ff6b2e5a0e", + ) + .unwrap(); + + let random_private_key = StacksPrivateKey::new(); + let random_public_key = StacksPublicKey::from_private(&random_private_key); + + let nakamoto_chain_tip = rpc_test.canonical_tip.clone(); + + let mut requests = vec![]; + + // Query existing signer + let info = StacksHttpRequest::new_getsigner( + addr.into(), + &public_key, + cycle_num, + TipRequest::SpecificTip(nakamoto_chain_tip), + ); + requests.push(info); + + // query random signer that doesn't exist + let request = StacksHttpRequest::new_getsigner( + addr.into(), + &random_public_key, + cycle_num, + TipRequest::SpecificTip(nakamoto_chain_tip), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // Existing signer + let response = responses.remove(0); + info!("response: {:?}", &response); + let signer_response = response.decode_signer().unwrap(); + assert_eq!(signer_response.blocks_signed, 40); + + // Signer doesn't exist so it should not have signed anything + let response = responses.remove(0); + info!("response: {:?}", &response); + let signer_response = response.decode_signer().unwrap(); + assert_eq!(signer_response.blocks_signed, 0); +} diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 51e73751eb..4964c199cf 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -67,6 +67,7 @@ use stacks::core::{ use stacks::libstackerdb::SlotMetadata; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; use stacks::net::api::get_tenures_fork_info::TenureForkingInfo; +use stacks::net::api::getsigner::GetSignerResponse; use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, @@ -8555,11 +8556,9 @@ fn v3_signer_api_endpoint() { info!("Send request: GET {url}"); reqwest::blocking::get(url) .unwrap_or_else(|e| panic!("GET request failed: {e}")) - .text() - .inspect(|response| info!("Recieved response: GET {url} -> {response}")) - .expect("Empty response") - .parse::() - .unwrap_or_else(|e| panic!("Failed to parse response as `u64`: {e}")) + .json::() + .unwrap() + .blocks_signed }; // Check reward cycle 1, should be 0 (pre-nakamoto) From 3d9e13ab9d7fb625275105175f69cc9df7f1ca6d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 23 Sep 2024 19:21:52 -0400 Subject: [PATCH 1200/1400] docs: fix openapi.yaml --- docs/rpc/openapi.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index 611ec1cb7b..c4dd06721c 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -731,7 +731,6 @@ paths: description: Hex-encoded compressed Secp256k1 public key of signer schema: type: string - parameters: - name: cycle_number in: path required: true From c8cda2945aa1dcb34f0669278c0cd7c73006cba8 Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 24 Sep 2024 15:52:47 +0200 Subject: [PATCH 1201/1400] fix: change pox-set-offset behavior to match the correct order --- pox-locking/src/events.rs | 5 +++-- stackslib/src/chainstate/stacks/boot/pox_4_tests.rs | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 0a1dc9d3c4..f757b54913 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -117,9 +117,10 @@ fn create_event_info_data_code( // `prepare_offset` is 1 or 0, depending on whether current execution is in a prepare phase or not // // "is-in-next-pox-set" == effective-height <= (reward-length - prepare-length) - // "<=" since the txs of the first block of the prepare phase are considered in the pox-set + // "<" since the txs of the first block of the prepare phase are NOT considered in the pox-set, + // the pox-set is locked in the first block of the prepare phase, before the transactions of that block are run. let pox_set_offset = r#" - (pox-set-offset (if (<= + (pox-set-offset (if (< (mod (- %height% (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length)) (- (var-get pox-reward-cycle-length) (var-get pox-prepare-cycle-length)) ) u0 u1)) diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 0968cc4de3..8fee5bd5b3 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -2462,7 +2462,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } - // produce blocks until the we're 1 before the prepare phase (first block of prepare-phase not yet mined) + // produce blocks until the we're 1 before the prepare phase (first block of prepare-phase not yet mined, whatever txs we create now won't be included in the reward set) while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height + 1) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); } @@ -2519,7 +2519,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { let steph_stacking_receipt = txs.get(&steph_stacking.txid()).unwrap().clone(); assert_eq!(steph_stacking_receipt.events.len(), 2); let steph_stacking_op_data = HashMap::from([ - ("start-cycle-id", Value::UInt(next_cycle)), + ("start-cycle-id", Value::UInt(next_cycle + 1)), // +1 because steph stacked in the block before the prepare phase (too late) ( "end-cycle-id", Value::some(Value::UInt(next_cycle + steph_lock_period)).unwrap(), From 476f47f2383c11454596ed670f93d3832e005317 Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 24 Sep 2024 16:24:13 +0200 Subject: [PATCH 1202/1400] chore: update comment --- pox-locking/src/events.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index f757b54913..04e3955dad 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -116,7 +116,7 @@ fn create_event_info_data_code( // If a given burn block height is in a prepare phase, then the stacker will be in the _next_ reward cycle, so bump the cycle by 1 // `prepare_offset` is 1 or 0, depending on whether current execution is in a prepare phase or not // - // "is-in-next-pox-set" == effective-height <= (reward-length - prepare-length) + // "is-in-next-pox-set" == effective-height < (reward-length - prepare-length) // "<" since the txs of the first block of the prepare phase are NOT considered in the pox-set, // the pox-set is locked in the first block of the prepare phase, before the transactions of that block are run. let pox_set_offset = r#" From 2f64e1fa7fe19bd8035fc1c548b9f1451bb51f62 Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 24 Sep 2024 16:24:51 +0200 Subject: [PATCH 1203/1400] chore: update comment --- pox-locking/src/events.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 04e3955dad..2e80ff8761 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -116,7 +116,7 @@ fn create_event_info_data_code( // If a given burn block height is in a prepare phase, then the stacker will be in the _next_ reward cycle, so bump the cycle by 1 // `prepare_offset` is 1 or 0, depending on whether current execution is in a prepare phase or not // - // "is-in-next-pox-set" == effective-height < (reward-length - prepare-length) + // "is-in-next-pox-set" == effective-height < (cycle-length - prepare-length) // "<" since the txs of the first block of the prepare phase are NOT considered in the pox-set, // the pox-set is locked in the first block of the prepare phase, before the transactions of that block are run. let pox_set_offset = r#" From d1281115b8bf7523543206b3711eae59a48fe015 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 24 Sep 2024 11:35:51 -0400 Subject: [PATCH 1204/1400] test: add `BITCOIND_TEST=1` for running tests in VSCode --- .vscode/settings.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index e648ed3e54..ab8db95f5d 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,7 @@ { "lldb.adapterType": "native", - "lldb.launch.sourceLanguages": ["rust"] + "lldb.launch.sourceLanguages": ["rust"], + "rust-analyzer.runnables.extraEnv": { + "BITCOIND_TEST": "1" + } } \ No newline at end of file From 3260a2ccc32d229db36bf62c09e55c7658769e0e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 24 Sep 2024 09:18:08 -0700 Subject: [PATCH 1205/1400] Add signing_in_0th_tenure_of_reward_cycle test Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 113 +++++++++++++++++++++ 2 files changed, 114 insertions(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a6d4dff460..8986594e0e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -114,6 +114,7 @@ jobs: - tests::signer::v0::partial_tenure_fork - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::signer_set_rollover + - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9f9f8d1a41..9dd8b96165 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -35,6 +35,7 @@ use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, S use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; +use stacks::net::api::getsigner::GetSignerResponse; use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; @@ -4813,3 +4814,115 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); assert_ne!(block_n_2, block_n); } + +#[test] +#[ignore] +/// Test that signers can successfully sign a block proposal in the 0th tenure of a reward cycle +/// This ensures there is no race condition in the /v2/pox endpoint which could prevent it from updating +/// on time, possibly triggering an "off by one" like behaviour in the 0th tenure. +/// +fn signing_in_0th_tenure_of_reward_cycle() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + ); + let signer_public_keys = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let long_timeout = Duration::from_secs(200); + signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + let next_reward_cycle = curr_reward_cycle + 1; + // Mine until the boundary of the first full Nakamoto reward cycles (epoch 3 starts in the middle of one) + let next_reward_cycle_height_boundary = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_sub(1); + + info!("------------------------- Advancing to {next_reward_cycle} Boundary at Block {next_reward_cycle_height_boundary} -------------------------"); + signer_test.run_until_burnchain_height_nakamoto( + long_timeout, + next_reward_cycle_height_boundary, + num_signers, + ); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { + let url = &format!( + "{http_origin}/v3/signer/{pk}/{reward_cycle}", + pk = pubkey.to_hex() + ); + info!("Send request: GET {url}"); + reqwest::blocking::get(url) + .unwrap_or_else(|e| panic!("GET request failed: {e}")) + .json::() + .unwrap() + .blocks_signed + }; + + assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); + + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + assert_eq!(blocks_signed, 0); + } + + info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(true), + ) + .unwrap(); + + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + assert_eq!(blocks_signed, 0); + } + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a stacks block in the 0th block of the new reward cycle + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let _tx = submit_tx(&http_origin, &transfer_tx); + + wait_for(30, || { + Ok(signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst) + > blocks_before) + }) + .unwrap(); + + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + assert_eq!(blocks_signed, 1); + } + assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); +} From ca392482a17b9e9183ec4dbc09352f0f43e5bec3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Sep 2024 13:45:59 -0400 Subject: [PATCH 1206/1400] fix: query nakamoto burnchain operations by tenure-start block ID, and only do so on tenure-start (not tenure-extend) --- stackslib/src/chainstate/nakamoto/mod.rs | 174 +++++++++++++++++++++-- 1 file changed, 160 insertions(+), 14 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 53b1c48555..e97fefafff 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fs; use std::ops::{Deref, DerefMut, Range}; use std::path::PathBuf; @@ -104,7 +104,9 @@ use crate::clarity_vm::clarity::{ ClarityInstance, ClarityTransactionConnection, Error as ClarityError, PreCommitClarityBlock, }; use crate::clarity_vm::database::SortitionDBRef; -use crate::core::{BOOT_BLOCK_HASH, NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD}; +use crate::core::{ + BOOT_BLOCK_HASH, BURNCHAIN_TX_SEARCH_WINDOW, NAKAMOTO_SIGNER_BLOCK_APPROVAL_THRESHOLD, +}; use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; use crate::net::Error as net_error; use crate::util_lib::boot; @@ -3251,14 +3253,18 @@ impl NakamotoChainState { if let Some(block_reward) = block_reward { StacksChainState::insert_miner_payment_schedule(headers_tx.deref_mut(), block_reward)?; } - StacksChainState::store_burnchain_txids( - headers_tx.deref(), - &index_block_hash, - burn_stack_stx_ops, - burn_transfer_stx_ops, - burn_delegate_stx_ops, - burn_vote_for_aggregate_key_ops, - )?; + + // NOTE: this is a no-op if the block isn't a tenure-start block + if new_tenure { + StacksChainState::store_burnchain_txids( + headers_tx.deref(), + &index_block_hash, + burn_stack_stx_ops, + burn_transfer_stx_ops, + burn_delegate_stx_ops, + burn_vote_for_aggregate_key_ops, + )?; + } if let Some(matured_miner_payouts) = mature_miner_payouts_opt { let rewarded_miner_block_id = StacksBlockId::new( @@ -3360,6 +3366,145 @@ impl NakamotoChainState { .map_err(ChainstateError::from) } + /// Find all of the TXIDs of Stacks-on-burnchain operations processed in the given Stacks fork. + /// In Nakamoto, we index these TXIDs by the tenure-start block ID + pub(crate) fn get_burnchain_txids_in_ancestor_tenures( + conn: &mut SDBI, + tip_consensus_hash: &ConsensusHash, + tip_block_hash: &BlockHeaderHash, + search_window: u64, + ) -> Result, ChainstateError> { + let tip = StacksBlockId::new(tip_consensus_hash, tip_block_hash); + let mut cursor = tip_consensus_hash.clone(); + let mut ret = HashSet::new(); + for _ in 0..search_window { + let Some(tenure_start_block_id) = conn.get_tenure_start_block_id(&tip, &cursor)? else { + break; + }; + let txids = StacksChainState::get_burnchain_txids_for_block( + conn.sqlite(), + &tenure_start_block_id, + )?; + ret.extend(txids.into_iter()); + + let Some(parent_tenure_id) = conn.get_parent_tenure_consensus_hash(&tip, &cursor)? + else { + break; + }; + + cursor = parent_tenure_id; + } + Ok(ret) + } + + /// Get all Stacks-on-burnchain operations that we haven't processed yet + pub(crate) fn get_stacks_on_burnchain_operations( + conn: &mut SDBI, + parent_consensus_hash: &ConsensusHash, + parent_block_hash: &BlockHeaderHash, + sortdb_conn: &Connection, + burn_tip: &BurnchainHeaderHash, + burn_tip_height: u64, + ) -> Result< + ( + Vec, + Vec, + Vec, + Vec, + ), + ChainstateError, + > { + let cur_epoch = SortitionDB::get_stacks_epoch(sortdb_conn, burn_tip_height)? + .expect("FATAL: no epoch defined for current burnchain tip height"); + + // only consider transactions in Stacks 3.0 + if cur_epoch.epoch_id < StacksEpochId::Epoch30 { + return Ok((vec![], vec![], vec![], vec![])); + } + + let epoch_start_height = cur_epoch.start_height; + + let search_window: u8 = + if epoch_start_height + u64::from(BURNCHAIN_TX_SEARCH_WINDOW) > burn_tip_height { + burn_tip_height + .saturating_sub(epoch_start_height) + .try_into() + .expect("FATAL: search window exceeds u8") + } else { + BURNCHAIN_TX_SEARCH_WINDOW + }; + + debug!( + "Search the last {} sortitions for burnchain-hosted stacks operations before {} ({})", + search_window, burn_tip, burn_tip_height + ); + let ancestor_burnchain_header_hashes = SortitionDB::get_ancestor_burnchain_header_hashes( + sortdb_conn, + burn_tip, + search_window.into(), + )?; + let processed_burnchain_txids = + NakamotoChainState::get_burnchain_txids_in_ancestor_tenures( + conn, + parent_consensus_hash, + parent_block_hash, + search_window.into(), + )?; + + // Find the *new* transactions -- the ones that we *haven't* seen in this Stacks + // fork yet. Note that we search for the ones that we have seen by searching back + // `BURNCHAIN_TX_SEARCH_WINDOW` tenures, whose sortitions may span more + // than `BURNCHAIN_TX_SEARCH_WINDOW` burnchain blocks. The inclusion of txids for + // burnchain transactions in the latter query is not a problem, because these txids + // are used to *exclude* transactions from the last `BURNCHAIN_TX_SEARCH_WINDOW` + // burnchain blocks. These excluded txids, if they were mined outside of this + // window, are *already* excluded. + + let mut all_stacking_burn_ops = vec![]; + let mut all_transfer_burn_ops = vec![]; + let mut all_delegate_burn_ops = vec![]; + let mut all_vote_for_aggregate_key_ops = vec![]; + + // go from oldest burn header hash to newest + for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { + let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh)?; + let transfer_ops = SortitionDB::get_transfer_stx_ops(sortdb_conn, ancestor_bhh)?; + let delegate_ops = SortitionDB::get_delegate_stx_ops(sortdb_conn, ancestor_bhh)?; + let vote_for_aggregate_key_ops = + SortitionDB::get_vote_for_aggregate_key_ops(sortdb_conn, ancestor_bhh)?; + + for stacking_op in stacking_ops.into_iter() { + if !processed_burnchain_txids.contains(&stacking_op.txid) { + all_stacking_burn_ops.push(stacking_op); + } + } + + for transfer_op in transfer_ops.into_iter() { + if !processed_burnchain_txids.contains(&transfer_op.txid) { + all_transfer_burn_ops.push(transfer_op); + } + } + + for delegate_op in delegate_ops.into_iter() { + if !processed_burnchain_txids.contains(&delegate_op.txid) { + all_delegate_burn_ops.push(delegate_op); + } + } + + for vote_op in vote_for_aggregate_key_ops.into_iter() { + if !processed_burnchain_txids.contains(&vote_op.txid) { + all_vote_for_aggregate_key_ops.push(vote_op); + } + } + } + Ok(( + all_stacking_burn_ops, + all_transfer_burn_ops, + all_delegate_burn_ops, + all_vote_for_aggregate_key_ops, + )) + } + /// Begin block-processing and return all of the pre-processed state within a /// `SetupBlockResult`. /// @@ -3432,10 +3577,11 @@ impl NakamotoChainState { }; let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops, vote_for_agg_key_ops) = - if new_tenure || tenure_extend { - StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( - chainstate_tx, - &parent_index_hash, + if new_tenure { + NakamotoChainState::get_stacks_on_burnchain_operations( + chainstate_tx.as_tx(), + &parent_consensus_hash, + &parent_header_hash, sortition_dbconn.sqlite_conn(), &burn_header_hash, burn_header_height.into(), From 4e5518114e39cff6b0dcaf08b42587d0e8f9555d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Sep 2024 13:46:33 -0400 Subject: [PATCH 1207/1400] chore: document the 2.x-nature of loading burnchain operations for Stacks --- stackslib/src/chainstate/stacks/db/mod.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index dfba727a3e..857bfaead4 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -867,6 +867,8 @@ const CHAINSTATE_SCHEMA_3: &'static [&'static str] = &[ // proessed r#" CREATE TABLE burnchain_txids( + -- in epoch 2.x, this is the index block hash of the Stacks block. + -- in epoch 3.x, this is the index block hash of the tenure-start block. index_block_hash TEXT PRIMARY KEY, -- this is a JSON-encoded list of txids txids TEXT NOT NULL @@ -2494,7 +2496,7 @@ impl StacksChainState { } /// Get the burnchain txids for a given index block hash - fn get_burnchain_txids_for_block( + pub(crate) fn get_burnchain_txids_for_block( conn: &Connection, index_block_hash: &StacksBlockId, ) -> Result, Error> { @@ -2516,6 +2518,7 @@ impl StacksChainState { } /// Get the txids of the burnchain operations applied in the past N Stacks blocks. + /// Only works for epoch 2.x pub fn get_burnchain_txids_in_ancestors( conn: &Connection, index_block_hash: &StacksBlockId, @@ -2532,7 +2535,10 @@ impl StacksChainState { Ok(ret) } - /// Store all on-burnchain STX operations' txids by index block hash + /// Store all on-burnchain STX operations' txids by index block hash. + /// `index_block_hash` is the tenure-start block. + /// * For epoch 2.x, this is simply the block ID + /// * for epoch 3.x and later, this is the first block in the tenure. pub fn store_burnchain_txids( tx: &DBTx, index_block_hash: &StacksBlockId, From 8ddddae02251dbc6ad5e8a6ddc6cb1e2cbab046a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 24 Sep 2024 11:11:36 -0700 Subject: [PATCH 1208/1400] Add positive integer for pox_sync_sample_secs and wait_on_interim_blocks for multiple_miners* tests Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 3 -- stacks-signer/src/client/stacks_client.rs | 16 +++--- .../src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/mod.rs | 12 ++--- testnet/stacks-node/src/tests/signer/v0.rs | 49 ++++++++++--------- 5 files changed, 37 insertions(+), 45 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 5dedfe82e3..dd8b1527cc 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -89,9 +89,6 @@ pub enum ClientError { /// Invalid response from the stacks node #[error("Invalid response from the stacks node: {0}")] InvalidResponse(String), - /// A successful sortition has not occurred yet - #[error("The Stacks chain has not processed any successful sortitions yet")] - NoSortitionOnChain, /// A successful sortition's info response should be parseable into a SortitionState #[error("A successful sortition's info response should be parseable into a SortitionState")] UnexpectedSortitionInfo, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 7b490144fc..ea84835292 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -33,7 +33,7 @@ use blockstack_lib::net::api::get_tenures_fork_info::{ use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getsortition::{SortitionInfo, RPC_SORTITION_INFO_PATH}; -use blockstack_lib::net::api::getstackers::{GetStackersErrors, GetStackersResponse}; +use blockstack_lib::net::api::getstackers::GetStackersResponse; use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postblock_v3; @@ -84,6 +84,7 @@ pub struct StacksClient { #[derive(Deserialize)] struct GetStackersErrorResp { + #[allow(dead_code)] err_type: String, err_msg: String, } @@ -655,14 +656,11 @@ impl StacksClient { warn!("Failed to parse the GetStackers error response: {e}"); backoff::Error::permanent(e.into()) })?; - if error_data.err_type == GetStackersErrors::NOT_AVAILABLE_ERR_TYPE { - Err(backoff::Error::permanent(ClientError::NoSortitionOnChain)) - } else { - warn!("Got error response ({status}): {}", error_data.err_msg); - Err(backoff::Error::permanent(ClientError::RequestFailure( - status, - ))) - } + + warn!("Got error response ({status}): {}", error_data.err_msg); + Err(backoff::Error::permanent(ClientError::RequestFailure( + status, + ))) }; let stackers_response = retry_with_exponential_backoff::<_, ClientError, GetStackersResponse>(send_request)?; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b95eed7fa5..665d880457 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -584,7 +584,7 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); conf.burnchain.poll_time_secs = 1; - conf.node.pox_sync_sample_secs = 0; + conf.node.pox_sync_sample_secs = 5; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a25a010465..421ffbb53f 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -375,14 +375,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Tue, 24 Sep 2024 13:38:17 -0700 Subject: [PATCH 1209/1400] feat: add integration test for stx-transfer and delegate burn ops in Nakamoto --- .github/workflows/bitcoin-tests.yml | 2 +- .../src/tests/nakamoto_integrations.rs | 170 +++++++++++++++++- 2 files changed, 167 insertions(+), 5 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a6d4dff460..839ada1ef9 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -114,7 +114,7 @@ jobs: - tests::signer::v0::partial_tenure_fork - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::signer_set_rollover - - tests::nakamoto_integrations::stack_stx_burn_op_integration_test + - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state - tests::nakamoto_integrations::check_block_times diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b95eed7fa5..bd669c7180 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -34,7 +34,8 @@ use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, PreStxOp, StackStxOp, VoteForAggregateKeyOp, + BlockstackOperationType, DelegateStxOp, PreStxOp, StackStxOp, TransferStxOp, + VoteForAggregateKeyOp, }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::OnChainRewardSetProvider; @@ -4012,7 +4013,17 @@ fn follower_bootup_across_multiple_cycles() { #[test] #[ignore] -fn stack_stx_burn_op_integration_test() { +/// Test out various burn operations being processed in Nakamoto. +/// +/// There are 4 burn ops submitted: +/// +/// - stx-transfer +/// - delegate-stx +/// - stack-stx +/// +/// Additionally, a stack-stx without a signer key is submitted, which should +/// not be processed in Nakamoto. +fn burn_ops_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -4027,10 +4038,26 @@ fn stack_stx_burn_op_integration_test() { let signer_sk_2 = Secp256k1PrivateKey::new(); let signer_addr_2 = tests::to_addr(&signer_sk_2); + let stacker_sk_1 = Secp256k1PrivateKey::new(); + let stacker_addr_1 = tests::to_addr(&stacker_sk_1); + + let stacker_sk_2 = Secp256k1PrivateKey::new(); + let stacker_addr_2 = tests::to_addr(&stacker_sk_2); + let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); let stacker_sk = setup_stacker(&mut naka_conf); + // Add the initial balances to the other accounts + naka_conf.add_initial_balance( + PrincipalData::from(stacker_addr_1.clone()).to_string(), + 1000000, + ); + naka_conf.add_initial_balance( + PrincipalData::from(stacker_addr_2.clone()).to_string(), + 1000000, + ); + test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; naka_conf.events_observers.insert(EventObserverConfig { @@ -4135,7 +4162,49 @@ fn stack_stx_burn_op_integration_test() { .is_ok(), "Pre-stx operation should submit successfully" ); - info!("Submitted 2 pre-stx ops at block {block_height}, mining a few blocks..."); + + let mut miner_signer_3 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); + info!("Submitting third pre-stx op"); + let pre_stx_op_3 = PreStxOp { + output: stacker_addr_1.clone(), + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + }; + assert!( + btc_regtest_controller + .submit_operation( + StacksEpochId::Epoch30, + BlockstackOperationType::PreStx(pre_stx_op_3), + &mut miner_signer_3, + 1 + ) + .is_ok(), + "Pre-stx operation should submit successfully" + ); + + info!("Submitting fourth pre-stx op"); + let mut miner_signer_4 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); + let pre_stx_op_4 = PreStxOp { + output: stacker_addr_2.clone(), + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + }; + assert!( + btc_regtest_controller + .submit_operation( + StacksEpochId::Epoch30, + BlockstackOperationType::PreStx(pre_stx_op_4), + &mut miner_signer_4, + 1 + ) + .is_ok(), + "Pre-stx operation should submit successfully" + ); + info!("Submitted 4 pre-stx ops at block {block_height}, mining a few blocks..."); // Mine until the next prepare phase let block_height = btc_regtest_controller.get_headers_height(); @@ -4216,6 +4285,8 @@ fn stack_stx_burn_op_integration_test() { let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); + let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1.clone(), false); + let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2.clone(), false); info!( "Before stack-stx op, signer 1 total: {}", @@ -4247,6 +4318,55 @@ fn stack_stx_burn_op_integration_test() { info!("Signer 1 addr: {}", signer_addr_1.to_b58()); info!("Signer 2 addr: {}", signer_addr_2.to_b58()); + info!("Submitting transfer STX op"); + let transfer_stx_op = TransferStxOp { + sender: stacker_addr_1.clone(), + recipient: stacker_addr_2.clone(), + transfered_ustx: 10000, + memo: vec![], + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + }; + assert!( + btc_regtest_controller + .submit_operation( + StacksEpochId::Epoch30, + BlockstackOperationType::TransferStx(transfer_stx_op), + &mut stacker_burnop_signer_1, + 1 + ) + .is_ok(), + "Transfer STX operation should submit successfully" + ); + + info!("Submitting delegate STX op"); + let del_stx_op = DelegateStxOp { + sender: stacker_addr_2.clone(), + delegate_to: stacker_addr_1.clone(), + reward_addr: None, + delegated_ustx: 100_000, + // to be filled in + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + until_burn_height: None, + }; + + assert!( + btc_regtest_controller + .submit_operation( + StacksEpochId::Epoch30, + BlockstackOperationType::DelegateStx(del_stx_op), + &mut stacker_burnop_signer_2, + 1 + ) + .is_ok(), + "Delegate STX operation should submit successfully" + ); + let pox_info = get_pox_info(&http_origin).unwrap(); let min_stx = pox_info.next_cycle.min_threshold_ustx; @@ -4318,6 +4438,8 @@ fn stack_stx_burn_op_integration_test() { } let mut stack_stx_found = false; + let mut transfer_stx_found = false; + let mut delegate_stx_found = false; let mut stack_stx_burn_op_tx_count = 0; let blocks = test_observer::get_blocks(); info!("stack event observer num blocks: {:?}", blocks.len()); @@ -4332,6 +4454,45 @@ fn stack_stx_burn_op_integration_test() { if raw_tx == "0x00" { info!("Found a burn op: {:?}", tx); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); + if burnchain_op.contains_key("transfer_stx") { + let transfer_stx_obj = burnchain_op.get("transfer_stx").unwrap(); + let sender_obj = transfer_stx_obj.get("sender").unwrap(); + let sender = sender_obj.get("address").unwrap().as_str().unwrap(); + let recipient_obj = transfer_stx_obj.get("recipient").unwrap(); + let recipient = recipient_obj.get("address").unwrap().as_str().unwrap(); + let transfered_ustx = transfer_stx_obj + .get("transfered_ustx") + .unwrap() + .as_u64() + .unwrap(); + assert_eq!(sender, stacker_addr_1.to_string()); + assert_eq!(recipient, stacker_addr_2.to_string()); + assert_eq!(transfered_ustx, 10000); + info!( + "Transfer STX op: sender: {}, recipient: {}, transfered_ustx: {}", + sender, recipient, transfered_ustx + ); + transfer_stx_found = true; + continue; + } + if burnchain_op.contains_key("delegate_stx") { + info!("Got delegate STX op: {:?}", burnchain_op); + let delegate_stx_obj = burnchain_op.get("delegate_stx").unwrap(); + let sender_obj = delegate_stx_obj.get("sender").unwrap(); + let sender = sender_obj.get("address").unwrap().as_str().unwrap(); + let delegate_to_obj = delegate_stx_obj.get("delegate_to").unwrap(); + let delegate_to = delegate_to_obj.get("address").unwrap().as_str().unwrap(); + let delegated_ustx = delegate_stx_obj + .get("delegated_ustx") + .unwrap() + .as_u64() + .unwrap(); + assert_eq!(sender, stacker_addr_2.to_string()); + assert_eq!(delegate_to, stacker_addr_1.to_string()); + assert_eq!(delegated_ustx, 100_000); + delegate_stx_found = true; + continue; + } if !burnchain_op.contains_key("stack_stx") { warn!("Got unexpected burnchain op: {:?}", burnchain_op); panic!("unexpected btc transaction type"); @@ -4378,7 +4539,8 @@ fn stack_stx_burn_op_integration_test() { stack_stx_burn_op_tx_count, 1, "Stack-stx tx without a signer_key shouldn't have been submitted" ); - + assert!(transfer_stx_found, "Expected transfer STX op"); + assert!(delegate_stx_found, "Expected delegate STX op"); let sortdb = btc_regtest_controller.sortdb_mut(); let sortdb_conn = sortdb.conn(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb_conn).unwrap(); From ed09847f9727b4d6de467eabfb575b624b7b3ca7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Sep 2024 17:21:37 -0400 Subject: [PATCH 1210/1400] chore: add unit tests for all burn ops in nakamoto --- .../chainstate/nakamoto/coordinator/tests.rs | 377 +++++++++++++++++- 1 file changed, 373 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index e56e55754c..cf016adb7d 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -28,17 +28,20 @@ use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, SIGNER_SLOTS_PER_USER, }; use stacks_common::types::chainstate::{ - StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, + BurnchainHeaderHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, }; -use stacks_common::types::{Address, StacksEpoch, StacksEpochId}; +use stacks_common::types::{Address, StacksEpoch, StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; -use crate::burnchains::PoxConstants; +use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use crate::chainstate::burn::operations::{BlockstackOperationType, LeaderBlockCommitOp}; +use crate::chainstate::burn::operations::{ + BlockstackOperationType, DelegateStxOp, LeaderBlockCommitOp, StackStxOp, TransferStxOp, + VoteForAggregateKeyOp, +}; use crate::chainstate::coordinator::tests::{p2pkh_from, pox_addr_from}; use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::nakamoto::fault_injection::*; @@ -58,6 +61,7 @@ use crate::chainstate::stacks::boot::test::{ }; use crate::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use crate::chainstate::stacks::db::{MinerPaymentTxFees, StacksAccount, StacksChainState}; +use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, TenureChangeCause, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, @@ -2595,3 +2599,368 @@ fn process_next_nakamoto_block_deadlock() { // Wait for the blocker and miner threads to finish miner_thread.join().unwrap(); } + +/// Test stacks-on-burnchain op discovery and usage +#[test] +fn test_stacks_on_burnchain_ops() { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let recipient_private_key = StacksPrivateKey::from_seed(&[3]); + let recipient_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&recipient_private_key)], + ) + .unwrap(); + + let agg_private_key = StacksPrivateKey::from_seed(&[4]); + let agg_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&agg_private_key)], + ) + .unwrap(); + + // make enough signers and signing keys so we can create a block and a malleablized block that + // are both valid + let (mut test_signers, test_stackers) = TestStacker::multi_signing_set(&[ + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, + ]); + let observer = TestEventObserver::new(); + let mut peer = boot_nakamoto( + function_name!(), + vec![(addr.into(), 100_000_000)], + &mut test_signers, + &test_stackers, + Some(&observer), + ); + + let mut all_blocks: Vec = vec![]; + let mut all_burn_ops = vec![]; + let mut consensus_hashes = vec![]; + let mut fee_counts = vec![]; + let stx_miner_key = peer.miner.nakamoto_miner_key(); + + let mut extra_burn_ops = vec![]; + let mut bitpatterns = HashMap::new(); // map consensus hash to txid bit pattern + + let cur_reward_cycle = peer + .config + .burnchain + .block_height_to_reward_cycle(peer.get_burn_block_height()) + .unwrap(); + + peer.refresh_burnchain_view(); + let first_stacks_height = peer.network.stacks_tip.height; + + for i in 0..10 { + peer.refresh_burnchain_view(); + let block_height = peer.get_burn_block_height(); + + // parent tip + let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); + + let (mut burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + + let mut new_burn_ops = vec![]; + new_burn_ops.push(BlockstackOperationType::DelegateStx(DelegateStxOp { + sender: addr.clone(), + delegate_to: recipient_addr.clone(), + reward_addr: None, + delegated_ustx: 1, + until_burn_height: None, + + // mocked + txid: Txid([i as u8; 32]), + vtxindex: 1, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + })); + new_burn_ops.push(BlockstackOperationType::StackStx(StackStxOp { + sender: addr.clone(), + reward_addr: PoxAddress::Standard( + recipient_addr.clone(), + Some(AddressHashMode::SerializeP2PKH), + ), + stacked_ustx: 1, + num_cycles: 1, + signer_key: Some(StacksPublicKeyBuffer::from_public_key( + &StacksPublicKey::from_private(&recipient_private_key), + )), + max_amount: Some(1), + auth_id: Some(i as u32), + + // mocked + txid: Txid([(i as u8) | 0x80; 32]), + vtxindex: 2, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + })); + new_burn_ops.push(BlockstackOperationType::TransferStx(TransferStxOp { + sender: addr.clone(), + recipient: recipient_addr.clone(), + transfered_ustx: 1, + memo: vec![0x2], + + // mocked + txid: Txid([(i as u8) | 0x40; 32]), + vtxindex: 3, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + })); + new_burn_ops.push(BlockstackOperationType::VoteForAggregateKey( + VoteForAggregateKeyOp { + sender: addr.clone(), + aggregate_key: StacksPublicKeyBuffer::from_public_key( + &StacksPublicKey::from_private(&agg_private_key), + ), + round: i as u32, + reward_cycle: cur_reward_cycle + 1, + signer_index: 1, + signer_key: StacksPublicKeyBuffer::from_public_key(&StacksPublicKey::from_private( + &recipient_private_key, + )), + + // mocked + txid: Txid([(i as u8) | 0xc0; 32]), + vtxindex: 4, + block_height: block_height + 1, + burn_header_hash: BurnchainHeaderHash([0x00; 32]), + }, + )); + + extra_burn_ops.push(new_burn_ops.clone()); + burn_ops.append(&mut new_burn_ops); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + bitpatterns.insert(consensus_hash.clone(), i as u8); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + debug!("Next burnchain block: {}", &consensus_hash); + + // make sure all our burnchain ops are processed and stored. + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let ancestor_burnchain_header_hashes = SortitionDB::get_ancestor_burnchain_header_hashes( + peer.sortdb().conn(), + &burn_tip.burn_header_hash, + 6, + ) + .unwrap(); + let processed_burnchain_txids = + NakamotoChainState::get_burnchain_txids_in_ancestor_tenures( + &mut peer.chainstate().index_conn(), + &stacks_tip_ch, + &stacks_tip_bh, + 6, + ) + .unwrap(); + + let mut expected_burnchain_txids = HashSet::new(); + for j in (i as u64).saturating_sub(6)..i { + expected_burnchain_txids.insert(Txid([j as u8; 32])); + expected_burnchain_txids.insert(Txid([(j as u8) | 0x80; 32])); + expected_burnchain_txids.insert(Txid([(j as u8) | 0x40; 32])); + expected_burnchain_txids.insert(Txid([(j as u8) | 0xc0; 32])); + } + assert_eq!(processed_burnchain_txids, expected_burnchain_txids); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() < 10 { + let mut txs = vec![]; + + debug!("\n\nProduce block {}\n\n", blocks_so_far.len()); + + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + txs.push(stx_transfer); + + let last_block_opt = blocks_so_far + .last() + .as_ref() + .map(|(block, _size, _cost)| block.header.block_id()); + + let mut final_txs = vec![]; + if let Some(last_block) = last_block_opt.as_ref() { + let tenure_extension = tenure_change.extend( + consensus_hash.clone(), + last_block.clone(), + blocks_so_far.len() as u32, + ); + let tenure_extension_tx = + miner.make_nakamoto_tenure_change(tenure_extension.clone()); + final_txs.push(tenure_extension_tx); + } + final_txs.append(&mut txs); + final_txs + } else { + vec![] + } + }, + ); + + let fees = blocks_and_sizes + .iter() + .map(|(block, _, _)| { + block + .txs + .iter() + .map(|tx| tx.get_tx_fee() as u128) + .sum::() + }) + .sum::(); + + consensus_hashes.push(consensus_hash); + fee_counts.push(fees); + let mut blocks: Vec = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + // check that our tenure-extends have been getting applied + let (highest_tenure, sort_tip) = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let tenure = NakamotoChainState::get_ongoing_tenure( + &mut chainstate.index_conn(), + &sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(), + ) + .unwrap() + .unwrap(); + (tenure, tip) + }; + + let last_block = blocks.last().as_ref().cloned().unwrap(); + assert_eq!( + highest_tenure.tenure_id_consensus_hash, + last_block.header.consensus_hash + ); + assert_eq!( + highest_tenure.burn_view_consensus_hash, + sort_tip.consensus_hash + ); + assert!(last_block.header.consensus_hash == sort_tip.consensus_hash); + assert_eq!(highest_tenure.coinbase_height, 12 + i); + assert_eq!(highest_tenure.cause, TenureChangeCause::Extended); + assert_eq!( + highest_tenure.num_blocks_confirmed, + (blocks.len() as u32) - 1 + ); + + all_blocks.append(&mut blocks); + all_burn_ops.push(burn_ops); + } + + // check receipts for burn ops + let mut observed_burn_txids = HashSet::new(); + let observed_blocks = observer.get_blocks(); + for block in observed_blocks.into_iter() { + let block_height = block.metadata.anchored_header.height(); + if block_height < first_stacks_height { + continue; + } + + let mut is_tenure_start = false; + let mut block_burn_txids = HashSet::new(); + for receipt in block.receipts.into_iter() { + match receipt.transaction { + TransactionOrigin::Burn(op) => { + block_burn_txids.insert(op.txid().clone()); + } + TransactionOrigin::Stacks(tx) => { + if let TransactionPayload::TenureChange(txp) = &tx.payload { + if txp.cause == TenureChangeCause::BlockFound { + is_tenure_start = true; + } + } + } + } + } + + // no burnchain blocks processed for non-tenure-start blocks + if !is_tenure_start { + assert_eq!(block_burn_txids.len(), 0); + continue; + } + + // this tenure-start block only processed "new" burnchain ops + let mut expected_burnchain_txids = HashSet::new(); + let bitpattern = *bitpatterns.get(&block.metadata.consensus_hash).unwrap(); + expected_burnchain_txids.insert(Txid([bitpattern; 32])); + expected_burnchain_txids.insert(Txid([bitpattern | 0x80; 32])); + expected_burnchain_txids.insert(Txid([bitpattern | 0x40; 32])); + expected_burnchain_txids.insert(Txid([bitpattern | 0xc0; 32])); + + debug!("At block {}: {:?}", block_height, &block_burn_txids); + debug!("Expected: {:?}", &expected_burnchain_txids); + assert_eq!(block_burn_txids, expected_burnchain_txids); + + observed_burn_txids.extend(block_burn_txids.into_iter()); + } + + // all extra burn ops are represented + for extra_burn_ops_per_block in extra_burn_ops.into_iter() { + for extra_burn_op in extra_burn_ops_per_block.into_iter() { + assert!(observed_burn_txids.contains(&extra_burn_op.txid())); + } + } + + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 111 + ); + + peer.check_nakamoto_migration(); + peer.check_malleablized_blocks(all_blocks, 2); +} From 425ba9b6d3cb6f7511e29a9088cff08464db96bd Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 24 Sep 2024 14:46:12 -0700 Subject: [PATCH 1211/1400] feat: ensure burn ops on included in tenure_change block --- .../src/tests/nakamoto_integrations.rs | 48 ++++++++++++++++++- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index bd669c7180..17b829557f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4044,6 +4044,10 @@ fn burn_ops_integration_test() { let stacker_sk_2 = Secp256k1PrivateKey::new(); let stacker_addr_2 = tests::to_addr(&stacker_sk_2); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let mut sender_nonce = 0; + let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); let stacker_sk = setup_stacker(&mut naka_conf); @@ -4057,6 +4061,10 @@ fn burn_ops_integration_test() { PrincipalData::from(stacker_addr_2.clone()).to_string(), 1000000, ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + 100_000_000, + ); test_observer::spawn(); let observer_port = test_observer::EVENT_OBSERVER_PORT; @@ -4426,7 +4434,8 @@ fn burn_ops_integration_test() { info!("Submitted 2 stack STX ops at height {block_height}, mining a few blocks..."); - // the second block should process the vote, after which the balances should be unchanged + // the second block should process the ops + // Also mine 2 interim blocks to ensure the stack-stx ops are not processed in them for _i in 0..2 { next_block_and_mine_commit( &mut btc_regtest_controller, @@ -4435,6 +4444,29 @@ fn burn_ops_integration_test() { &commits_submitted, ) .unwrap(); + for interim_block_ix in 0..2 { + info!("Mining interim block {interim_block_ix}"); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, 200, &stacker_addr_1.into(), 10000); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + loop { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + if blocks_processed > blocks_processed_before { + break; + } + thread::sleep(Duration::from_millis(100)); + } + } } let mut stack_stx_found = false; @@ -4449,10 +4481,12 @@ fn burn_ops_integration_test() { "stack event observer num transactions: {:?}", transactions.len() ); - for tx in transactions.iter() { + let mut block_has_tenure_change = false; + for tx in transactions.iter().rev() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { info!("Found a burn op: {:?}", tx); + assert!(block_has_tenure_change, "Block should have a tenure change"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if burnchain_op.contains_key("transfer_stx") { let transfer_stx_obj = burnchain_op.get("transfer_stx").unwrap(); @@ -4472,6 +4506,7 @@ fn burn_ops_integration_test() { "Transfer STX op: sender: {}, recipient: {}, transfered_ustx: {}", sender, recipient, transfered_ustx ); + assert!(!transfer_stx_found, "Transfer STX op should be unique"); transfer_stx_found = true; continue; } @@ -4490,6 +4525,7 @@ fn burn_ops_integration_test() { assert_eq!(sender, stacker_addr_2.to_string()); assert_eq!(delegate_to, stacker_addr_1.to_string()); assert_eq!(delegated_ustx, 100_000); + assert!(!delegate_stx_found, "Delegate STX op should be unique"); delegate_stx_found = true; continue; } @@ -4529,8 +4565,16 @@ fn burn_ops_integration_test() { .expect_result_ok() .expect("Expected OK result for stack-stx op"); + assert!(!stack_stx_found, "Stack STX op should be unique"); stack_stx_found = true; stack_stx_burn_op_tx_count += 1; + } else { + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { + block_has_tenure_change = true; + } } } } From f394e644aed108236764c8ab7733a2c5892f2daa Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 24 Sep 2024 17:12:30 -0500 Subject: [PATCH 1212/1400] feat: add 2 heuristics to miner for nakamoto * for the first block in a tenure, just mine an empty block * estimate the time it takes to eval a tx, and see if it will interfere with block deadline --- stackslib/src/chainstate/stacks/miner.rs | 52 +++++++++++++++++++++++- stackslib/src/core/mempool.rs | 49 ++++++++++++++++++++-- 2 files changed, 96 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 78d6a47781..cf6e83b484 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -18,6 +18,7 @@ use std::collections::{HashMap, HashSet}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::thread::ThreadId; +use std::time::Instant; use std::{cmp, fs, mem}; use clarity::vm::analysis::{CheckError, CheckErrors}; @@ -2211,6 +2212,15 @@ impl StacksBlockBuilder { ); } + // nakamoto miner tenure start heuristic: + // mine an empty block so you can start your tenure quickly! + if let Some(tx) = initial_txs.first() { + if matches!(&tx.payload, TransactionPayload::TenureChange(_)) { + debug!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); + return Ok((false, tx_events)); + } + } + mempool.reset_nonce_cache()?; mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?; @@ -2221,6 +2231,7 @@ impl StacksBlockBuilder { let mut invalidated_txs = vec![]; let mut to_drop_and_blacklist = vec![]; + let mut update_timings = vec![]; let deadline = ts_start + u128::from(max_miner_time_ms); let mut num_txs = 0; @@ -2250,10 +2261,27 @@ impl StacksBlockBuilder { if block_limit_hit == BlockLimitFunction::LIMIT_REACHED { return Ok(None); } - if get_epoch_time_ms() >= deadline { + let time_now = get_epoch_time_ms(); + if time_now >= deadline { debug!("Miner mining time exceeded ({} ms)", max_miner_time_ms); return Ok(None); } + if let Some(time_estimate) = txinfo.metadata.time_estimate_ms { + if time_now.saturating_add(time_estimate.into()) > deadline { + debug!("Mining tx would cause us to exceed our deadline, skipping"; + "txid" => %txinfo.tx.txid(), + "deadline" => deadline, + "now" => time_now, + "estimate" => time_estimate); + return Ok(Some( + TransactionResult::skipped( + &txinfo.tx, + "Transaction would exceed deadline.".into(), + ) + .convert_to_event(), + )); + } + } // skip transactions early if we can if considered.contains(&txinfo.tx.txid()) { @@ -2303,6 +2331,7 @@ impl StacksBlockBuilder { considered.insert(txinfo.tx.txid()); num_considered += 1; + let tx_start = Instant::now(); let tx_result = builder.try_mine_tx_with_len( epoch_tx, &txinfo.tx, @@ -2314,6 +2343,21 @@ impl StacksBlockBuilder { let result_event = tx_result.convert_to_event(); match tx_result { TransactionResult::Success(TransactionSuccess { receipt, .. }) => { + if txinfo.metadata.time_estimate_ms.is_none() { + // use i64 to avoid running into issues when storing in + // rusqlite. + let time_estimate_ms: i64 = tx_start + .elapsed() + .as_millis() + .try_into() + .unwrap_or_else(|_| i64::MAX); + let time_estimate_ms: u64 = time_estimate_ms + .try_into() + // should be unreachable + .unwrap_or_else(|_| 0); + update_timings.push((txinfo.tx.txid(), time_estimate_ms)); + } + num_txs += 1; if update_estimator { if let Err(e) = estimator.notify_event( @@ -2386,6 +2430,12 @@ impl StacksBlockBuilder { }, ); + if !update_timings.is_empty() { + if let Err(e) = mempool.update_tx_time_estimates(&update_timings) { + warn!("Error while updating time estimates for mempool"; "err" => ?e); + } + } + if to_drop_and_blacklist.len() > 0 { let _ = mempool.drop_and_blacklist_txs(&to_drop_and_blacklist); } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 0dff4796dc..9b917b608a 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -460,6 +460,7 @@ pub struct MemPoolTxMetadata { pub last_known_origin_nonce: Option, pub last_known_sponsor_nonce: Option, pub accept_time: u64, + pub time_estimate_ms: Option, } impl MemPoolTxMetadata { @@ -594,6 +595,7 @@ impl FromRow for MemPoolTxMetadata { let sponsor_nonce = u64::from_column(row, "sponsor_nonce")?; let last_known_sponsor_nonce = u64::from_column(row, "last_known_sponsor_nonce")?; let last_known_origin_nonce = u64::from_column(row, "last_known_origin_nonce")?; + let time_estimate_ms: Option = row.get("time_estimate_ms")?; Ok(MemPoolTxMetadata { txid, @@ -609,6 +611,7 @@ impl FromRow for MemPoolTxMetadata { last_known_origin_nonce, last_known_sponsor_nonce, accept_time, + time_estimate_ms, }) } } @@ -624,10 +627,7 @@ impl FromRow for MemPoolTxInfo { return Err(db_error::ParseError); } - Ok(MemPoolTxInfo { - tx: tx, - metadata: md, - }) + Ok(MemPoolTxInfo { tx, metadata: md }) } } @@ -803,6 +803,16 @@ const MEMPOOL_SCHEMA_6_NONCES: &'static [&'static str] = &[ "#, ]; +const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &'static [&'static str] = &[ + r#" + -- ALLOW NULL + ALTER TABLE mempool ADD COLUMN time_estimate_ms NUMBER; + "#, + r#" + INSERT INTO schema_version (version) VALUES (7) + "#, +]; + const MEMPOOL_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS by_txid ON mempool(txid);", "CREATE INDEX IF NOT EXISTS by_height ON mempool(height);", @@ -1287,6 +1297,9 @@ impl MemPoolDB { MemPoolDB::instantiate_nonces(tx)?; } 6 => { + MemPoolDB::instantiate_schema_7(tx)?; + } + 7 => { break; } _ => { @@ -1363,6 +1376,16 @@ impl MemPoolDB { Ok(()) } + /// Add the nonce table + #[cfg_attr(test, mutants::skip)] + fn instantiate_schema_7(tx: &DBTx) -> Result<(), db_error> { + for sql_exec in MEMPOOL_SCHEMA_7_TIME_ESTIMATES { + tx.execute_batch(sql_exec)?; + } + + Ok(()) + } + #[cfg_attr(test, mutants::skip)] pub fn db_path(chainstate_root_path: &str) -> Result { let mut path = PathBuf::from(chainstate_root_path); @@ -2650,6 +2673,24 @@ impl MemPoolDB { Ok(()) } + /// Drop and blacklist transactions, so we don't re-broadcast them or re-fetch them. + /// Do *NOT* remove them from the bloom filter. This will cause them to continue to be + /// reported as present, which is exactly what we want because we don't want these transactions + /// to be seen again (so we don't want anyone accidentally "helpfully" pushing them to us, nor + /// do we want the mempool sync logic to "helpfully" re-discover and re-download them). + pub fn update_tx_time_estimates(&mut self, txs: &[(Txid, u64)]) -> Result<(), db_error> { + let sql = "UPDATE mempool SET time_estimate_ms = ? WHERE txid = ?"; + let mempool_tx = self.tx_begin()?; + for (txid, time_estimate_ms) in txs.iter() { + mempool_tx + .tx + .execute(sql, params![time_estimate_ms, txid])?; + } + mempool_tx.commit()?; + + Ok(()) + } + /// Drop and blacklist transactions, so we don't re-broadcast them or re-fetch them. /// Do *NOT* remove them from the bloom filter. This will cause them to continue to be /// reported as present, which is exactly what we want because we don't want these transactions From cdb539446ef3b4234c0bd74dbd78552cd300a918 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 24 Sep 2024 20:31:12 -0500 Subject: [PATCH 1213/1400] use * in mempool SELECT --- stackslib/src/core/mempool.rs | 16 +--------------- stackslib/src/core/tests/mod.rs | 2 +- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 9b917b608a..28560d9a80 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -2015,21 +2015,7 @@ impl MemPoolDB { nonce: u64, ) -> Result, db_error> { let sql = format!( - "SELECT - txid, - origin_address, - origin_nonce, - sponsor_address, - sponsor_nonce, - tx_fee, - length, - consensus_hash, - block_header_hash, - height, - accept_time, - last_known_sponsor_nonce, - last_known_origin_nonce - FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2", + "SELECT * FROM mempool WHERE {0}_address = ?1 AND {0}_nonce = ?2", if is_origin { "origin" } else { "sponsor" } ); let args = params![addr.to_string(), u64_to_sql(nonce)?]; diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 72b29cc097..01fcac9e89 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -1381,7 +1381,7 @@ fn mempool_do_not_replace_tx() { .unwrap_err(); assert!(match err_resp { MemPoolRejection::ConflictingNonceInMempool => true, - _ => false, + e => panic!("Failed: {e:?}"), }); assert!(MemPoolDB::db_has_tx(&mempool_tx, &prior_txid).unwrap()); From b995e274f9e50f477269bdba6a2c0cf3c7463a5b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 25 Sep 2024 08:38:50 -0500 Subject: [PATCH 1214/1400] chore: fix signer_set_rollover test --- stackslib/src/core/mempool.rs | 6 +----- testnet/stacks-node/src/tests/signer/v0.rs | 8 ++++++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 28560d9a80..61306a9764 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -2659,11 +2659,7 @@ impl MemPoolDB { Ok(()) } - /// Drop and blacklist transactions, so we don't re-broadcast them or re-fetch them. - /// Do *NOT* remove them from the bloom filter. This will cause them to continue to be - /// reported as present, which is exactly what we want because we don't want these transactions - /// to be seen again (so we don't want anyone accidentally "helpfully" pushing them to us, nor - /// do we want the mempool sync logic to "helpfully" re-discover and re-download them). + /// Update the time estimates for the supplied txs in the mempool db pub fn update_tx_time_estimates(&mut self, txs: &[(Txid, u64)]) -> Result<(), db_error> { let sql = "UPDATE mempool SET time_estimate_ms = ? WHERE txid = ?"; let mempool_tx = self.tx_begin()?; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9f9f8d1a41..13c3acce44 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2974,6 +2974,7 @@ fn signer_set_rollover() { .running_nodes .btc_regtest_controller .get_headers_height(); + let accounts_to_check: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); for stacker_sk in new_signer_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, @@ -3017,6 +3018,13 @@ fn signer_set_rollover() { submit_tx(&http_origin, &stacking_tx); } + wait_for(60, || { + Ok(accounts_to_check + .iter() + .all(|acct| get_account(&http_origin, acct).nonce >= 1)) + }) + .expect("Timed out waiting for stacking txs to be mined"); + signer_test.mine_nakamoto_block(short_timeout); let next_reward_cycle = reward_cycle.saturating_add(1); From 8dd4771fd87cd4aceebd7b9641ba5bb729ee32db Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 08:08:03 -0700 Subject: [PATCH 1215/1400] CRC: remove potential race condition in signing_in_0th_tenure_of_reward_cycle Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 134 +++++++-------------- 1 file changed, 43 insertions(+), 91 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9dd8b96165..2ec72082a6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1992,35 +1992,29 @@ fn end_of_tenure() { std::thread::sleep(Duration::from_millis(100)); } - while signer_test.get_current_reward_cycle() != final_reward_cycle { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 10, - || Ok(true), - ) - .unwrap(); - assert!( - start_time.elapsed() <= short_timeout, - "Timed out waiting to enter the next reward cycle" - ); - std::thread::sleep(Duration::from_millis(100)); - } + wait_for(short_timeout.as_secs(), || { + let result = signer_test.get_current_reward_cycle() == final_reward_cycle; + if !result { + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + } + Ok(result) + }) + .expect("Timed out waiting to enter the next reward cycle"); - while test_observer::get_burn_blocks() - .last() - .unwrap() - .get("burn_block_height") - .unwrap() - .as_u64() - .unwrap() - < final_reward_cycle_height_boundary + 1 - { - assert!( - start_time.elapsed() <= short_timeout, - "Timed out waiting for burn block events" - ); - std::thread::sleep(Duration::from_millis(100)); - } + wait_for(short_timeout.as_secs(), || { + let blocks = test_observer::get_burn_blocks() + .last() + .unwrap() + .get("burn_block_height") + .unwrap() + .as_u64() + .unwrap(); + Ok(blocks > final_reward_cycle_height_boundary) + }) + .expect("Timed out waiting for burn block events"); signer_test.wait_for_cycle(30, final_reward_cycle); @@ -2078,21 +2072,11 @@ fn retry_on_rejection() { let burnchain = signer_test.running_nodes.conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - loop { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - - sleep_ms(10_000); - + wait_for(30, || { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if tip.sortition { - break; - } - } + Ok(tip.sortition) + }) + .expect("Timed out waiting for sortition"); // mine a nakamoto block let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -2534,12 +2518,10 @@ fn mock_sign_epoch_25() { { let mut mock_block_mesage = None; let mock_poll_time = Instant::now(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); let current_burn_block_height = signer_test .running_nodes .btc_regtest_controller @@ -2747,12 +2729,10 @@ fn multiple_miners_mock_sign_epoch_25() { { let mut mock_block_mesage = None; let mock_poll_time = Instant::now(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); let current_burn_block_height = signer_test .running_nodes .btc_regtest_controller @@ -4539,21 +4519,11 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let burnchain = signer_test.running_nodes.conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - loop { - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - - sleep_ms(10_000); - + wait_for(30, || { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if tip.sortition { - break; - } - } + Ok(tip.sortition) + }) + .expect("Timed out waiting for sortition"); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; @@ -4833,15 +4803,7 @@ fn signing_in_0th_tenure_of_reward_cycle() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); let signer_public_keys = signer_test .signer_stacks_private_keys .iter() @@ -4888,28 +4850,18 @@ fn signing_in_0th_tenure_of_reward_cycle() { } info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(true), - ) - .unwrap(); - for signer in &signer_public_keys { let blocks_signed = get_v3_signer(&signer, next_reward_cycle); assert_eq!(blocks_signed, 0); } - let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined .load(Ordering::SeqCst); - - // submit a tx so that the miner will mine a stacks block in the 0th block of the new reward cycle - let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - let _tx = submit_tx(&http_origin, &transfer_tx); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); wait_for(30, || { Ok(signer_test From 541d13b4cc69a15ae3767499e19e99455064506e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 25 Sep 2024 10:11:10 -0500 Subject: [PATCH 1216/1400] fix: bug in the tenure extending logic -- only include tenure change tx in the first block after extension --- stackslib/src/chainstate/stacks/miner.rs | 2 +- testnet/stacks-node/src/nakamoto_node/miner.rs | 14 ++++++++++---- .../stacks-node/src/tests/nakamoto_integrations.rs | 8 +++++--- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index cf6e83b484..d3298855da 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2216,7 +2216,7 @@ impl StacksBlockBuilder { // mine an empty block so you can start your tenure quickly! if let Some(tx) = initial_txs.first() { if matches!(&tx.payload, TransactionPayload::TenureChange(_)) { - debug!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); + info!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); return Ok((false, tx_events)); } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ecc30a9c19..5e3f72ee20 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1260,6 +1260,12 @@ impl BlockMinerThread { tenure_change_tx: None, }); }; + if self.last_block_mined.is_some() { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { @@ -1289,10 +1295,10 @@ impl BlockMinerThread { &parent_block_id, ) .map_err(NakamotoNodeError::MiningFailure)?; - debug!("Miner: Extending tenure"; - "burn_view_consensus_hash" => %burn_view_consensus_hash, - "parent_block_id" => %parent_block_id, - "num_blocks_so_far" => num_blocks_so_far, + info!("Miner: Extending tenure"; + "burn_view_consensus_hash" => %burn_view_consensus_hash, + "parent_block_id" => %parent_block_id, + "num_blocks_so_far" => num_blocks_so_far, ); payload = payload.extend( *burn_view_consensus_hash, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b95eed7fa5..870a00719a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6382,7 +6382,8 @@ fn continue_tenure_extend() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let http_origin = naka_conf.node.data_url.clone(); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); @@ -6571,12 +6572,13 @@ fn continue_tenure_extend() { &signers, ); - wait_for(5, || { + wait_for(25, || { let blocks_processed = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(blocks_processed > blocks_processed_before && sender_nonce >= 1) }) .unwrap(); From 93603978cdb8a3f64239e436fe37685a016145d7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 08:19:50 -0700 Subject: [PATCH 1217/1400] Do not use a test_observer in boot_to_epoch_3 to enable use with multi node tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 8 ++++---- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++++-- testnet/stacks-node/src/tests/signer/v1.rs | 6 ++++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 421ffbb53f..b7f39feba6 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -63,7 +63,6 @@ use wsts::state_machine::PublicKeys; use super::nakamoto_integrations::wait_for; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::neon::{Counters, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; @@ -364,9 +363,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest MinedNakamotoBlockEvent { + fn mine_nakamoto_block(&mut self, timeout: Duration) { let commits_submitted = self.running_nodes.commits_submitted.clone(); let mined_block_time = Instant::now(); + let info_before = self.stacks_client.get_peer_info().unwrap(); next_block_and_mine_commit( &mut self.running_nodes.btc_regtest_controller, timeout.as_secs(), @@ -376,7 +376,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height) }) .unwrap(); let mined_block_elapsed_time = mined_block_time.elapsed(); @@ -384,7 +385,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { agg_key: &Point, timeout: Duration, ) -> MinedNakamotoBlockEvent { - let new_block = self.mine_nakamoto_block(timeout); + self.mine_nakamoto_block(timeout); + let new_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let signer_sighash = new_block.signer_signature_hash.clone(); let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); @@ -1130,7 +1131,8 @@ fn sign_after_signer_reboot() { info!("------------------------- Test Mine Block after restart -------------------------"); - let last_block = signer_test.mine_nakamoto_block(timeout); + signer_test.mine_nakamoto_block(timeout); + let last_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let proposed_signer_signature_hash = signer_test .wait_for_validate_ok_response(short_timeout) .signer_signature_hash; From cb2f4909344b21c14b0b5ec3a761f73c8eb246a3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 25 Sep 2024 11:30:02 -0400 Subject: [PATCH 1218/1400] fix: don't ban peers for sending us nakamoto blocks we can't yet handle --- stackslib/src/net/relay.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 94b63d0382..d262d07c42 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1725,9 +1725,6 @@ impl Relayer { "Failed to validate Nakamoto blocks pushed from {:?}: {:?}", neighbor_key, &e ); - - // punish this peer - bad_neighbors.push((*neighbor_key).clone()); break; } From 82be35a8be28a66d3b46c08405bdd659fbc1c2a1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 10:02:48 -0700 Subject: [PATCH 1219/1400] Remove wsts from testnet/stacks-node Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - libsigner/src/signer_set.rs | 132 +- stacks-signer/src/client/mod.rs | 134 +- stacks-signer/src/client/stackerdb.rs | 2 +- stacks-signer/src/config.rs | 17 - stacks-signer/src/lib.rs | 4 +- stacks-signer/src/runloop.rs | 16 +- stacks-signer/src/v0/signer.rs | 48 +- stacks-signer/src/v1/coordinator.rs | 230 --- stacks-signer/src/v1/mod.rs | 29 - stacks-signer/src/v1/signer.rs | 1764 ----------------- stacks-signer/src/v1/stackerdb_manager.rs | 326 --- testnet/stacks-node/Cargo.toml | 2 - .../stacks-node/src/nakamoto_node/miner.rs | 16 +- .../src/nakamoto_node/sign_coordinator.rs | 428 +--- .../src/tests/nakamoto_integrations.rs | 25 +- testnet/stacks-node/src/tests/signer/mod.rs | 123 +- testnet/stacks-node/src/tests/signer/v0.rs | 6 +- testnet/stacks-node/src/tests/signer/v1.rs | 1155 ----------- 19 files changed, 121 insertions(+), 4337 deletions(-) delete mode 100644 stacks-signer/src/v1/coordinator.rs delete mode 100644 stacks-signer/src/v1/mod.rs delete mode 100644 stacks-signer/src/v1/signer.rs delete mode 100644 stacks-signer/src/v1/stackerdb_manager.rs delete mode 100644 testnet/stacks-node/src/tests/signer/v1.rs diff --git a/Cargo.lock b/Cargo.lock index b9b45849d3..e1d78fec15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3437,7 +3437,6 @@ dependencies = [ "tracing-subscriber", "url", "warp", - "wsts", ] [[package]] diff --git a/libsigner/src/signer_set.rs b/libsigner/src/signer_set.rs index fdcb857faf..f47ac454aa 100644 --- a/libsigner/src/signer_set.rs +++ b/libsigner/src/signer_set.rs @@ -13,125 +13,77 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{BTreeMap, HashMap}; + use blockstack_lib::chainstate::stacks::boot::NakamotoSignerEntry; -use hashbrown::{HashMap, HashSet}; use stacks_common::types::chainstate::{StacksAddress, StacksPublicKey}; -use wsts::curve::ecdsa; -use wsts::curve::point::{Compressed, Point}; -use wsts::state_machine::PublicKeys; -/// A reward set parsed into the structures required by WSTS party members and coordinators. +/// A reward set parsed into relevant structures #[derive(Debug, Clone)] pub struct SignerEntries { - /// The signer addresses mapped to signer id - pub signer_ids: HashMap, - /// The signer ids mapped to public key and key ids mapped to public keys - pub public_keys: PublicKeys, - /// The signer ids mapped to key ids - pub signer_key_ids: HashMap>, - /// The signer ids mapped to wsts public keys - pub signer_public_keys: HashMap, - /// The signer ids mapped to a hash set of key ids - /// The wsts coordinator uses a hash set for each signer since it needs to do lots of lookups - pub coordinator_key_ids: HashMap>, + /// The signer addresses mapped to signer ID + pub signer_addr_to_id: HashMap, + /// The signer IDs mapped to addresses. Uses a BTreeMap to ensure *reward cycle order* + pub signer_id_to_addr: BTreeMap, + /// signer ID mapped to public key + pub signer_id_to_pk: HashMap, + /// public_key mapped to signer ID + pub signer_pk_to_id: HashMap, + /// The signer public keys + pub signer_pks: Vec, + /// The signer addresses + pub signer_addresses: Vec, + /// The signer address mapped to signing weight + pub signer_addr_to_weight: HashMap, } /// Parsing errors for `SignerEntries` #[derive(Debug)] pub enum Error { /// A member of the signing set has a signing key buffer - /// which does not represent a ecdsa public key. + /// which does not represent a valid Stacks public key BadSignerPublicKey(String), /// The number of signers was greater than u32::MAX SignerCountOverflow, } impl SignerEntries { - /// Try to parse the reward set defined by `NakamotoSignEntry` into the structures required - /// by WSTS party members and coordinators. + /// Try to parse the reward set defined by `NakamotoSignEntry` into the SignerEntries struct pub fn parse(is_mainnet: bool, reward_set: &[NakamotoSignerEntry]) -> Result { - let mut weight_end = 1; - let mut signer_key_ids = HashMap::with_capacity(reward_set.len()); - let mut signer_public_keys = HashMap::with_capacity(reward_set.len()); - let mut coordinator_key_ids = HashMap::with_capacity(4000); - let mut signer_ids = HashMap::with_capacity(reward_set.len()); - let mut wsts_signers = HashMap::new(); - let mut wsts_key_ids = HashMap::new(); + let mut signer_pk_to_id = HashMap::with_capacity(reward_set.len()); + let mut signer_id_to_pk = HashMap::with_capacity(reward_set.len()); + let mut signer_addr_to_id = HashMap::with_capacity(reward_set.len()); + let mut signer_pks = Vec::with_capacity(reward_set.len()); + let mut signer_id_to_addr = BTreeMap::new(); + let mut signer_addr_to_weight = HashMap::new(); + let mut signer_addresses = Vec::with_capacity(reward_set.len()); for (i, entry) in reward_set.iter().enumerate() { let signer_id = u32::try_from(i).map_err(|_| Error::SignerCountOverflow)?; - let ecdsa_pk = - ecdsa::PublicKey::try_from(entry.signing_key.as_slice()).map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to ecdsa::PublicKey: {e}" - )) - })?; - let signer_public_key = Point::try_from(&Compressed::from(ecdsa_pk.to_bytes())) - .map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to wsts::Point: {e}" - )) - })?; - let stacks_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + let signer_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) .map_err(|e| { Error::BadSignerPublicKey(format!( "Failed to convert signing key to StacksPublicKey: {e}" )) })?; - let stacks_address = StacksAddress::p2pkh(is_mainnet, &stacks_public_key); - signer_ids.insert(stacks_address, signer_id); - - signer_public_keys.insert(signer_id, signer_public_key); - let weight_start = weight_end; - weight_end = weight_start + entry.weight; - let key_ids: HashSet = (weight_start..weight_end).collect(); - for key_id in key_ids.iter() { - wsts_key_ids.insert(*key_id, ecdsa_pk); - } - signer_key_ids.insert(signer_id, (weight_start..weight_end).collect()); - coordinator_key_ids.insert(signer_id, key_ids); - wsts_signers.insert(signer_id, ecdsa_pk); + let stacks_address = StacksAddress::p2pkh(is_mainnet, &signer_public_key); + signer_addr_to_id.insert(stacks_address, signer_id); + signer_id_to_pk.insert(signer_id, signer_public_key); + signer_pk_to_id.insert(signer_public_key, signer_id); + signer_pks.push(signer_public_key); + signer_id_to_addr.insert(signer_id, stacks_address); + signer_addr_to_weight.insert(stacks_address, entry.weight); + signer_addresses.push(stacks_address); } Ok(Self { - signer_ids, - public_keys: PublicKeys { - signers: wsts_signers, - key_ids: wsts_key_ids, - }, - signer_key_ids, - signer_public_keys, - coordinator_key_ids, + signer_addr_to_id, + signer_id_to_pk, + signer_pk_to_id, + signer_pks, + signer_id_to_addr, + signer_addr_to_weight, + signer_addresses, }) } - - /// Return the number of Key IDs in the WSTS group signature - pub fn count_keys(&self) -> Result { - self.public_keys - .key_ids - .len() - .try_into() - .map_err(|_| Error::SignerCountOverflow) - } - - /// Return the number of Key IDs in the WSTS group signature - pub fn count_signers(&self) -> Result { - self.public_keys - .signers - .len() - .try_into() - .map_err(|_| Error::SignerCountOverflow) - } - - /// Return the number of Key IDs required to sign a message with the WSTS group signature - pub fn get_signing_threshold(&self) -> Result { - let num_keys = self.count_keys()?; - Ok((num_keys as f64 * 7_f64 / 10_f64).ceil() as u32) - } - - /// Return the number of Key IDs required to sign a message with the WSTS group signature - pub fn get_dkg_threshold(&self) -> Result { - let num_keys = self.count_keys()?; - Ok((num_keys as f64 * 9_f64 / 10_f64).ceil() as u32) - } } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 5dedfe82e3..c36f73a3f9 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -123,6 +123,7 @@ where #[cfg(test)] pub(crate) mod tests { + use std::collections::{BTreeMap, HashMap}; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener}; @@ -138,20 +139,16 @@ pub(crate) mod tests { use clarity::vm::costs::ExecutionCost; use clarity::vm::types::TupleData; use clarity::vm::Value as ClarityValue; - use hashbrown::{HashMap, HashSet}; use libsigner::SignerEntries; use rand::distributions::Standard; use rand::{thread_rng, Rng}; - use rand_core::{OsRng, RngCore}; + use rand_core::RngCore; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::types::{StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::{Hash160, Sha256Sum}; - use wsts::curve::ecdsa; - use wsts::curve::point::{Compressed, Point}; - use wsts::curve::scalar::Scalar; - use wsts::state_machine::PublicKeys; + use wsts::curve::point::Point; use super::*; use crate::config::{GlobalConfig, SignerConfig}; @@ -456,112 +453,59 @@ pub(crate) mod tests { /// Generate a signer config with the given number of signers and keys where the first signer is /// obtained from the provided global config - pub fn generate_signer_config( - config: &GlobalConfig, - num_signers: u32, - num_keys: u32, - ) -> SignerConfig { + pub fn generate_signer_config(config: &GlobalConfig, num_signers: u32) -> SignerConfig { assert!( num_signers > 0, "Cannot generate 0 signers...Specify at least 1 signer." ); - assert!( - num_keys > 0, - "Cannot generate 0 keys for the provided signers...Specify at least 1 key." - ); - let mut public_keys = PublicKeys { - signers: HashMap::new(), - key_ids: HashMap::new(), - }; + + let weight_per_signer = 100 / num_signers; + let mut remaining_weight = 100 % num_signers; + let reward_cycle = thread_rng().next_u64(); - let rng = &mut OsRng; - let num_keys = num_keys / num_signers; - let remaining_keys = num_keys % num_signers; - let mut coordinator_key_ids = HashMap::new(); - let mut signer_key_ids = HashMap::new(); - let mut signer_ids = HashMap::new(); - let mut start_key_id = 1u32; - let mut end_key_id = start_key_id; - let mut signer_public_keys = HashMap::new(); - let mut signer_slot_ids = vec![]; - let ecdsa_private_key = config.ecdsa_private_key; - let ecdsa_public_key = - ecdsa::PublicKey::new(&ecdsa_private_key).expect("Failed to create ecdsa public key"); - // Key ids start from 1 hence the wrapping adds everywhere + + let mut signer_pk_to_id = HashMap::new(); + let mut signer_id_to_pk = HashMap::new(); + let mut signer_addr_to_id = HashMap::new(); + let mut signer_pks = Vec::new(); + let mut signer_slot_ids = Vec::new(); + let mut signer_id_to_addr = BTreeMap::new(); + let mut signer_addr_to_weight = HashMap::new(); + let mut signer_addresses = Vec::new(); + for signer_id in 0..num_signers { - end_key_id = if signer_id.wrapping_add(1) == num_signers { - end_key_id.wrapping_add(remaining_keys) + let private_key = if signer_id == 0 { + config.stacks_private_key } else { - end_key_id.wrapping_add(num_keys) + StacksPrivateKey::new() }; - if signer_id == 0 { - public_keys.signers.insert(signer_id, ecdsa_public_key); - let signer_public_key = - Point::try_from(&Compressed::from(ecdsa_public_key.to_bytes())).unwrap(); - signer_public_keys.insert(signer_id, signer_public_key); - public_keys.signers.insert(signer_id, ecdsa_public_key); - for k in start_key_id..end_key_id { - public_keys.key_ids.insert(k, ecdsa_public_key); - coordinator_key_ids - .entry(signer_id) - .or_insert(HashSet::new()) - .insert(k); - signer_key_ids - .entry(signer_id) - .or_insert(Vec::new()) - .push(k); - } - start_key_id = end_key_id; - let address = StacksAddress::p2pkh( - false, - &StacksPublicKey::from_slice(ecdsa_public_key.to_bytes().as_slice()) - .expect("Failed to create stacks public key"), - ); - signer_slot_ids.push(SignerSlotID(signer_id)); - signer_ids.insert(address, signer_id); - - continue; - } - let private_key = Scalar::random(rng); - let public_key = ecdsa::PublicKey::new(&private_key).unwrap(); - let signer_public_key = - Point::try_from(&Compressed::from(public_key.to_bytes())).unwrap(); - signer_public_keys.insert(signer_id, signer_public_key); - public_keys.signers.insert(signer_id, public_key); - for k in start_key_id..end_key_id { - public_keys.key_ids.insert(k, public_key); - coordinator_key_ids - .entry(signer_id) - .or_insert(HashSet::new()) - .insert(k); - signer_key_ids - .entry(signer_id) - .or_insert(Vec::new()) - .push(k); - } - let address = StacksAddress::p2pkh( - false, - &StacksPublicKey::from_slice(public_key.to_bytes().as_slice()) - .expect("Failed to create stacks public key"), - ); + let public_key = StacksPublicKey::from_private(&private_key); + + signer_id_to_pk.insert(signer_id, public_key); + signer_pk_to_id.insert(public_key, signer_id); + let address = StacksAddress::p2pkh(false, &public_key); + signer_addr_to_id.insert(address, signer_id); + signer_pks.push(public_key); signer_slot_ids.push(SignerSlotID(signer_id)); - signer_ids.insert(address, signer_id); - start_key_id = end_key_id; + signer_id_to_addr.insert(signer_id, address); + signer_addr_to_weight.insert(address, weight_per_signer + remaining_weight); + signer_addresses.push(address); + remaining_weight = 0; // The first signer gets the extra weight if there is any. All other signers only get the weight_per_signer } SignerConfig { reward_cycle, signer_id: 0, signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers - key_ids: signer_key_ids.get(&0).cloned().unwrap_or_default(), signer_entries: SignerEntries { - public_keys, - coordinator_key_ids, - signer_key_ids, - signer_ids, - signer_public_keys, + signer_addr_to_id, + signer_id_to_pk, + signer_pk_to_id, + signer_pks, + signer_id_to_addr, + signer_addr_to_weight, + signer_addresses, }, signer_slot_ids, - ecdsa_private_key: config.ecdsa_private_key, stacks_private_key: config.stacks_private_key, node_host: config.node_host.to_string(), mainnet: config.network.is_mainnet(), diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index f2b574ef4f..b3f6528232 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -257,7 +257,7 @@ mod tests { Some(9000), ); let config = GlobalConfig::load_from_str(&signer_config[0]).unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); + let signer_config = generate_signer_config(&config, 5); let mut stackerdb = StackerDB::from(&signer_config); let header = NakamotoBlockHeader::empty(); diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 66cf5a5f7d..802c362b86 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -29,9 +29,7 @@ use stacks_common::address::{ }; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; -use stacks_common::types::PrivateKey; use stacks_common::util::hash::Hash160; -use wsts::curve::scalar::Scalar; use crate::client::SignerSlotID; @@ -122,14 +120,10 @@ pub struct SignerConfig { pub signer_id: u32, /// The signer stackerdb slot id (may be different from signer_id) pub signer_slot_id: SignerSlotID, - /// This signer's key ids - pub key_ids: Vec, /// The registered signers for this reward cycle pub signer_entries: SignerEntries, /// The signer slot ids of all signers registered for this reward cycle pub signer_slot_ids: Vec, - /// The Scalar representation of the private key for signer communication - pub ecdsa_private_key: Scalar, /// The private key for this signer pub stacks_private_key: StacksPrivateKey, /// The node host for this signer @@ -166,8 +160,6 @@ pub struct GlobalConfig { pub node_host: String, /// endpoint to the event receiver pub endpoint: SocketAddr, - /// The Scalar representation of the private key for signer communication - pub ecdsa_private_key: Scalar, /// The signer's Stacks private key pub stacks_private_key: StacksPrivateKey, /// The signer's Stacks address @@ -295,14 +287,6 @@ impl TryFrom for GlobalConfig { raw_data.stacks_private_key.clone(), ) })?; - - let ecdsa_private_key = - Scalar::try_from(&stacks_private_key.to_bytes()[..32]).map_err(|_| { - ConfigError::BadField( - "stacks_private_key".to_string(), - raw_data.stacks_private_key.clone(), - ) - })?; let stacks_public_key = StacksPublicKey::from_private(&stacks_private_key); let signer_hash = Hash160::from_data(stacks_public_key.to_bytes_compressed().as_slice()); let stacks_address = @@ -341,7 +325,6 @@ impl TryFrom for GlobalConfig { node_host: raw_data.node_host, endpoint, stacks_private_key, - ecdsa_private_key, stacks_address, network: raw_data.network, event_timeout, diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9d8a22a320..8bac540e7a 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -39,10 +39,8 @@ pub mod runloop; pub mod signerdb; /// The util module for the signer pub mod utils; -/// The v0 implementation of the signer. This does not include WSTS support +/// The v0 implementation of the signer. pub mod v0; -/// The v1 implementation of the singer. This includes WSTS support -pub mod v1; #[cfg(test)] mod tests; diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 4a22c15bb7..d8d159a086 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -301,7 +301,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo ); return Ok(None); }; - let Some(signer_id) = signer_entries.signer_ids.get(current_addr) else { + let Some(signer_id) = signer_entries.signer_addr_to_id.get(current_addr) else { warn!( "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." ); @@ -310,20 +310,13 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo info!( "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." ); - let key_ids = signer_entries - .signer_key_ids - .get(signer_id) - .cloned() - .unwrap_or_default(); Ok(Some(SignerConfig { reward_cycle, signer_id: *signer_id, signer_slot_id: *signer_slot_id, - key_ids, signer_entries, signer_slot_ids: signer_slot_ids.into_values().collect(), first_proposal_burn_block_timing: self.config.first_proposal_burn_block_timing, - ecdsa_private_key: self.config.ecdsa_private_key, stacks_private_key: self.config.stacks_private_key, node_host: self.config.node_host.to_string(), mainnet: self.config.network.is_mainnet(), @@ -608,8 +601,11 @@ mod tests { } let parsed_entries = SignerEntries::parse(false, &signer_entries).unwrap(); - assert_eq!(parsed_entries.signer_ids.len(), nmb_signers); - let mut signer_ids = parsed_entries.signer_ids.into_values().collect::>(); + assert_eq!(parsed_entries.signer_id_to_pk.len(), nmb_signers); + let mut signer_ids = parsed_entries + .signer_id_to_pk + .into_keys() + .collect::>(); signer_ids.sort(); assert_eq!( signer_ids, diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fa34cc4b42..7c94ec908c 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -12,7 +12,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; @@ -81,7 +81,7 @@ pub struct Signer { /// The reward cycle this signer belongs to pub reward_cycle: u64, /// Reward set signer addresses and their weights - pub signer_weights: HashMap, + pub signer_weights: HashMap, /// SignerDB for state management pub signer_db: SignerDb, /// Configuration for proposal evaluation @@ -292,40 +292,13 @@ impl From for Signer { SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); - // compute signer addresses *in reward cycle order* - let signer_ids_and_addrs: BTreeMap<_, _> = signer_config - .signer_entries - .signer_ids - .iter() - .map(|(addr, id)| (*id, *addr)) - .collect(); - - let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); - - let signer_weights = signer_addresses - .iter() - .map(|addr| { - let Some(signer_id) = signer_config.signer_entries.signer_ids.get(addr) else { - panic!("Malformed config: no signer ID for {}", addr); - }; - let Some(key_ids) = signer_config.signer_entries.signer_key_ids.get(signer_id) - else { - panic!( - "Malformed config: no key IDs for signer ID {} ({})", - signer_id, addr - ); - }; - (*addr, key_ids.len()) - }) - .collect(); - Self { private_key: signer_config.stacks_private_key, stackerdb, mainnet: signer_config.mainnet, signer_id: signer_config.signer_id, - signer_addresses, - signer_weights, + signer_addresses: signer_config.signer_entries.signer_addresses.clone(), + signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), signer_slot_ids: signer_config.signer_slot_ids.clone(), reward_cycle: signer_config.reward_cycle, signer_db, @@ -679,22 +652,17 @@ impl Signer { &self, addrs: impl Iterator, ) -> u32 { - let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { + addrs.fold(0u32, |signing_weight, stacker_address| { let stacker_weight = self.signer_weights.get(stacker_address).unwrap_or(&0); signing_weight.saturating_add(*stacker_weight) - }); - u32::try_from(signing_weight) - .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")) + }) } /// Compute the total signing weight fn compute_signature_total_weight(&self) -> u32 { - let total_weight = self - .signer_weights + self.signer_weights .values() - .fold(0usize, |acc, val| acc.saturating_add(*val)); - u32::try_from(total_weight) - .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")) + .fold(0u32, |acc, val| acc.saturating_add(*val)) } /// Handle an observed rejection from another signer diff --git a/stacks-signer/src/v1/coordinator.rs b/stacks-signer/src/v1/coordinator.rs deleted file mode 100644 index 7fc2d238c4..0000000000 --- a/stacks-signer/src/v1/coordinator.rs +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::time::Instant; - -use blockstack_lib::chainstate::burn::ConsensusHashExtensions; -use slog::slog_debug; -use stacks_common::debug; -use stacks_common::types::chainstate::ConsensusHash; -use stacks_common::util::hash::Sha256Sum; -use wsts::curve::ecdsa; -use wsts::state_machine::PublicKeys; - -/// TODO: test this value and adjust as necessary. Maybe make configurable? -pub const COORDINATOR_OPERATION_TIMEOUT_SECS: u64 = 300; - -/// TODO: test this value and adjust as necessary. Maybe make configurable? -pub const COORDINATOR_TENURE_TIMEOUT_SECS: u64 = 600; - -/// The coordinator selector -#[derive(Clone, Debug)] -pub struct CoordinatorSelector { - /// The ordered list of potential coordinators for a specific consensus hash - coordinator_ids: Vec, - /// The current coordinator id - coordinator_id: u32, - /// The current coordinator index into the coordinator ids list - coordinator_index: usize, - /// The last message received time for the current coordinator - pub last_message_time: Option, - /// The time the coordinator started its tenure - tenure_start: Instant, - /// The public keys of the coordinators - public_keys: PublicKeys, -} - -impl From for CoordinatorSelector { - /// Create a new Coordinator selector from the given list of public keys - fn from(public_keys: PublicKeys) -> Self { - let coordinator_ids = - Self::calculate_coordinator_ids(&public_keys, &ConsensusHash::empty()); - let coordinator_id = *coordinator_ids - .first() - .expect("FATAL: No registered signers"); - let coordinator_index = 0; - let last_message_time = None; - let tenure_start = Instant::now(); - Self { - coordinator_ids, - coordinator_id, - coordinator_index, - last_message_time, - tenure_start, - public_keys, - } - } -} - -/// Whether or not to rotate to new coordinators in `update_coordinator` -const ROTATE_COORDINATORS: bool = false; - -impl CoordinatorSelector { - /// Update the coordinator id - fn update_coordinator(&mut self, new_coordinator_ids: Vec) { - self.last_message_time = None; - self.coordinator_index = if new_coordinator_ids != self.coordinator_ids { - // We have advanced our block height and should select from the new list - let mut new_index: usize = 0; - self.coordinator_ids = new_coordinator_ids; - let new_coordinator_id = *self - .coordinator_ids - .first() - .expect("FATAL: No registered signers"); - if ROTATE_COORDINATORS && new_coordinator_id == self.coordinator_id { - // If the newly selected coordinator is the same as the current and we have more than one available, advance immediately to the next - if self.coordinator_ids.len() > 1 { - new_index = new_index.saturating_add(1); - } - } - new_index - } else if ROTATE_COORDINATORS { - self.coordinator_index.saturating_add(1) % self.coordinator_ids.len() - } else { - self.coordinator_index - }; - self.coordinator_id = *self - .coordinator_ids - .get(self.coordinator_index) - .expect("FATAL: Invalid number of registered signers"); - self.tenure_start = Instant::now(); - self.last_message_time = None; - } - - /// Check the coordinator timeouts and update the selected coordinator accordingly - /// Returns the resulting coordinator ID. (Note: it may be unchanged) - pub fn refresh_coordinator(&mut self, pox_consensus_hash: &ConsensusHash) -> u32 { - let new_coordinator_ids = - Self::calculate_coordinator_ids(&self.public_keys, pox_consensus_hash); - if let Some(time) = self.last_message_time { - if time.elapsed().as_secs() > COORDINATOR_OPERATION_TIMEOUT_SECS { - // We have not received a message in a while from this coordinator. - // We should consider the operation finished and use a new coordinator id. - self.update_coordinator(new_coordinator_ids); - } - } else if self.tenure_start.elapsed().as_secs() > COORDINATOR_TENURE_TIMEOUT_SECS - || new_coordinator_ids != self.coordinator_ids - { - // Our tenure has been exceeded or we have advanced our block height and should select from the new list - self.update_coordinator(new_coordinator_ids); - } - self.coordinator_id - } - - /// Get the current coordinator id and public key - pub fn get_coordinator(&self) -> (u32, ecdsa::PublicKey) { - ( - self.coordinator_id, - *self - .public_keys - .signers - .get(&self.coordinator_id) - .expect("FATAL: missing public key for selected coordinator id"), - ) - } - - /// Calculate the ordered list of coordinator ids by comparing the provided public keys - pub fn calculate_coordinator_ids( - public_keys: &PublicKeys, - pox_consensus_hash: &ConsensusHash, - ) -> Vec { - debug!("Using pox_consensus_hash {pox_consensus_hash:?} for selecting coordinator"); - // Create combined hash of each signer's public key with pox_consensus_hash - let mut selection_ids = public_keys - .signers - .iter() - .map(|(&id, pk)| { - let pk_bytes = pk.to_bytes(); - let mut buffer = - Vec::with_capacity(pk_bytes.len() + pox_consensus_hash.as_bytes().len()); - buffer.extend_from_slice(&pk_bytes[..]); - buffer.extend_from_slice(pox_consensus_hash.as_bytes()); - let digest = Sha256Sum::from_data(&buffer).as_bytes().to_vec(); - (id, digest) - }) - .collect::>(); - - // Sort the selection IDs based on the hash - selection_ids.sort_by_key(|(_, hash)| hash.clone()); - // Return only the ids - selection_ids.iter().map(|(id, _)| *id).collect() - } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::client::tests::{generate_random_consensus_hash, generate_signer_config}; - use crate::config::GlobalConfig; - - #[test] - fn calculate_coordinator_different_consensus_hashes_produces_unique_results() { - let number_of_tests = 5; - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let public_keys = generate_signer_config(&config, 10, 4000) - .signer_entries - .public_keys; - let mut results = Vec::new(); - - for _ in 0..number_of_tests { - let result = CoordinatorSelector::calculate_coordinator_ids( - &public_keys, - &generate_random_consensus_hash(), - ); - results.push(result); - } - - // Check that not all coordinator IDs are the same - let all_ids_same = results.iter().all(|ids| ids == &results[0]); - assert!(!all_ids_same, "Not all coordinator IDs should be the same"); - } - - fn generate_calculate_coordinator_test_results( - random_consensus: bool, - count: usize, - ) -> Vec> { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let public_keys = generate_signer_config(&config, 10, 4000) - .signer_entries - .public_keys; - let mut results = Vec::new(); - let same_hash = generate_random_consensus_hash(); - for _ in 0..count { - let hash = if random_consensus { - generate_random_consensus_hash() - } else { - same_hash - }; - let result = CoordinatorSelector::calculate_coordinator_ids(&public_keys, &hash); - results.push(result); - } - results - } - - #[test] - fn calculate_coordinator_results_should_vary_or_match_based_on_hash() { - let results_with_random_hash = generate_calculate_coordinator_test_results(true, 5); - let all_ids_same = results_with_random_hash - .iter() - .all(|ids| ids == &results_with_random_hash[0]); - assert!(!all_ids_same, "Not all coordinator IDs should be the same"); - - let results_with_static_hash = generate_calculate_coordinator_test_results(false, 5); - let all_ids_same = results_with_static_hash - .iter() - .all(|ids| ids == &results_with_static_hash[0]); - assert!(all_ids_same, "All coordinator IDs should be the same"); - } -} diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs deleted file mode 100644 index ed1d980016..0000000000 --- a/stacks-signer/src/v1/mod.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use libsigner::v1::messages::SignerMessage; - -use crate::v1::signer::Signer; - -/// The coordinator selector for the signer -pub mod coordinator; -/// The signer module for processing events -pub mod signer; -/// The stackerdb module for sending messages between signers and miners -pub mod stackerdb_manager; - -/// A v1 spawned signer -pub type SpawnedSigner = crate::SpawnedSigner; diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs deleted file mode 100644 index aa8fcfb0d2..0000000000 --- a/stacks-signer/src/v1/signer.rs +++ /dev/null @@ -1,1764 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -use std::collections::VecDeque; -use std::fmt::Debug; -use std::path::PathBuf; -use std::sync::mpsc::Sender; -use std::time::Instant; - -use blockstack_lib::chainstate::burn::ConsensusHashExtensions; -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::boot::SIGNERS_VOTING_FUNCTION_NAME; -use blockstack_lib::chainstate::stacks::StacksTransaction; -use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; -use blockstack_lib::util_lib::db::Error as DBError; -use hashbrown::HashSet; -use libsigner::v1::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, -}; -use libsigner::{BlockProposal, SignerEvent}; -use rand_core::OsRng; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; -use stacks_common::codec::{read_next, StacksMessageCodec}; -use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::Sha512Trunc256Sum; -use stacks_common::{debug, error, info, warn}; -use wsts::common::Signature; -use wsts::curve::keys::PublicKey; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::net::{Message, NonceRequest, Packet, SignatureShareRequest}; -use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; -use wsts::state_machine::coordinator::{ - Config as CoordinatorConfig, Coordinator, State as CoordinatorState, -}; -use wsts::state_machine::signer::Signer as SignerStateMachine; -use wsts::state_machine::{OperationResult, SignError}; -use wsts::traits::Signer as _; -use wsts::v2; - -use super::stackerdb_manager::StackerDBManager; -use crate::chainstate::SortitionsView; -use crate::client::{ClientError, SignerSlotID, StacksClient}; -use crate::config::SignerConfig; -use crate::runloop::{RunLoopCommand, SignerCommand, SignerResult}; -use crate::signerdb::{BlockInfo, NakamotoBlockVote, SignerDb}; -use crate::v1::coordinator::CoordinatorSelector; -use crate::Signer as SignerTrait; - -/// The specific operations that a signer can perform -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum Operation { - /// A DKG operation - Dkg, - /// A Sign operation - Sign, -} - -/// The Signer state -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum State { - /// The signer is uninitialized and should read stackerdb to restore state - Uninitialized, - /// The signer is idle, waiting for messages and commands - Idle, - /// The signer is executing a DKG or Sign round - OperationInProgress(Operation), -} - -/// The stacks signer registered for the reward cycle -#[derive(Debug)] -pub struct Signer { - /// The coordinator for inbound messages for a specific reward cycle - pub coordinator: FireCoordinator, - /// The signing round used to sign messages for a specific reward cycle - pub state_machine: SignerStateMachine, - /// the state of the signer - pub state: State, - /// Received Commands that need to be processed - pub commands: VecDeque, - /// The stackerdb client session manager - pub stackerdb_manager: StackerDBManager, - /// Whether the signer is a mainnet signer or not - pub mainnet: bool, - /// The signer id - pub signer_id: u32, - /// The signer slot ids for the signers in the reward cycle - pub signer_slot_ids: Vec, - /// The addresses of other signers - pub signer_addresses: Vec, - /// The signer slot ids for the signers in the NEXT reward cycle - pub next_signer_slot_ids: Vec, - /// The addresses of the signers for the NEXT reward cycle - pub next_signer_addresses: Vec, - /// The reward cycle this signer belongs to - pub reward_cycle: u64, - /// The default tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0). - pub tx_fee_ustx: u64, - /// If estimating the tx fee, the max tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0) - /// If None, will not cap the fee. - pub max_tx_fee_ustx: Option, - /// The coordinator info for the signer - pub coordinator_selector: CoordinatorSelector, - /// The approved key registered to the contract - pub approved_aggregate_public_key: Option, - /// The current active miner's key (if we know it!) - pub miner_key: Option, - /// Signer DB path - pub db_path: PathBuf, - /// SignerDB for state management - pub signer_db: SignerDb, -} - -impl std::fmt::Display for Signer { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "Cycle #{} Signer #{}(C:{})", - self.reward_cycle, - self.signer_id, - self.coordinator_selector.get_coordinator().0, - ) - } -} - -impl SignerTrait for Signer { - /// Create a new signer from the given configuration - fn new(config: SignerConfig) -> Self { - Self::from(config) - } - - /// Return the reward cycle of the signer - fn reward_cycle(&self) -> u64 { - self.reward_cycle - } - - /// Process the event - fn process_event( - &mut self, - stacks_client: &StacksClient, - _sortition_state: &mut Option, - event: Option<&SignerEvent>, - res: &Sender>, - current_reward_cycle: u64, - ) { - let event_parity = match event { - Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), - // Block proposal events do have reward cycles, but each proposal has its own cycle, - // and the vec could be heterogeneous, so, don't differentiate. - Some(SignerEvent::MinerMessages(..)) - | Some(SignerEvent::NewBurnBlock { .. }) - | Some(SignerEvent::StatusCheck) - | None => None, - Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), - }; - let other_signer_parity = (self.reward_cycle + 1) % 2; - if event_parity == Some(other_signer_parity) { - return; - } - if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { - error!("{self}: failed to refresh DKG: {e}"); - } - } - self.refresh_coordinator(); - if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { - error!("{self}: failed to refresh DKG: {e}"); - } - } - self.refresh_coordinator(); - debug!("{self}: Processing event: {event:?}"); - let Some(event) = event else { - // No event. Do nothing. - debug!("{self}: No event received"); - return; - }; - match event { - SignerEvent::BlockValidationResponse(block_validate_response) => { - info!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response( - stacks_client, - block_validate_response, - res, - current_reward_cycle, - ) - } - SignerEvent::SignerMessages(signer_set, messages) => { - if *signer_set != self.stackerdb_manager.get_signer_set() { - debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); - return; - } - debug!( - "{self}: Received {} messages from the other signers...", - messages.len() - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - } - SignerEvent::MinerMessages(messages, miner_key) => { - let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) - .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); - self.miner_key = Some(miner_key); - if current_reward_cycle != self.reward_cycle { - // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) - debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); - return; - } - debug!( - "{self}: Received {} messages from the miner", - messages.len(); - "miner_key" => ?miner_key, - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - } - SignerEvent::StatusCheck => { - debug!("{self}: Received a status check event.") - } - SignerEvent::NewBurnBlock { - burn_height, - burn_header_hash, - received_time, - } => { - info!("{self}: Received a new burn block event for block height {burn_height}"); - if let Err(e) = - self.signer_db - .insert_burn_block(burn_header_hash, *burn_height, received_time) - { - error!( - "Failed to write burn block event to signerdb"; - "err" => ?e, - "burn_header_hash" => %burn_header_hash, - "burn_height" => burn_height - ); - panic!("Failed to write burn block event to signerdb"); - } - } - } - } - - fn process_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - command: Option, - ) { - if let Some(command) = command { - let reward_cycle = command.reward_cycle; - if self.reward_cycle != reward_cycle { - warn!( - "{self}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}" - ); - } else { - info!( - "{self}: Queuing an external runloop command ({:?}): {command:?}", - self.state_machine.public_keys.signers.get(&self.signer_id) - ); - self.commands.push_back(command.command); - } - } - self.process_next_command(stacks_client, current_reward_cycle); - } - - fn has_unprocessed_blocks(&self) -> bool { - self.signer_db - .has_unprocessed_blocks(self.reward_cycle) - .unwrap_or_else(|e| { - error!("{self}: Failed to check if there are pending blocks: {e:?}"); - // Assume there are pending blocks to prevent premature cleanup - true - }) - } -} - -impl Signer { - /// Attempt to process the next command in the queue, and update state accordingly - fn process_next_command(&mut self, stacks_client: &StacksClient, current_reward_cycle: u64) { - match &self.state { - State::Uninitialized => { - // We cannot process any commands until we have restored our state - warn!("{self}: Cannot process commands until state is restored. Waiting..."); - } - State::Idle => { - let Some(command) = self.commands.front() else { - debug!("{self}: Nothing to process. Waiting for command..."); - return; - }; - let coordinator_id = if matches!(command, SignerCommand::Dkg) { - // We cannot execute a DKG command if we are not the coordinator - Some(self.get_coordinator_dkg().0) - } else { - self.get_coordinator_sign(current_reward_cycle).0 - }; - if coordinator_id != Some(self.signer_id) { - debug!( - "{self}: Coordinator is {coordinator_id:?}. Will not process any commands...", - ); - return; - } - let command = self - .commands - .pop_front() - .expect("BUG: Already asserted that the command queue was not empty"); - self.execute_command(stacks_client, &command); - } - State::OperationInProgress(op) => { - // We cannot execute the next command until the current one is finished... - debug!( - "{self}: Waiting for {op:?} operation to finish. Coordinator state = {:?}", - self.coordinator.state - ); - } - } - } - /// Return the current coordinator. - /// If the current reward cycle is the active reward cycle, this is the miner, - /// so the first element of the tuple will be None (because the miner does not have a signer index). - /// Otherwise, the coordinator is the signer with the index returned by the coordinator selector. - fn get_coordinator_sign(&self, current_reward_cycle: u64) -> (Option, PublicKey) { - if self.reward_cycle == current_reward_cycle { - let Some(ref cur_miner) = self.miner_key else { - error!( - "Signer #{}: Could not lookup current miner while in active reward cycle", - self.signer_id - ); - let selected = self.coordinator_selector.get_coordinator(); - return (Some(selected.0), selected.1); - }; - // coordinator is the current miner. - (None, *cur_miner) - } else { - let selected = self.coordinator_selector.get_coordinator(); - (Some(selected.0), selected.1) - } - } - - /// Refresh the next signer data from the given configuration data - #[allow(dead_code)] - fn update_signer(&mut self, new_signer_config: &SignerConfig) { - self.next_signer_addresses = new_signer_config - .signer_entries - .signer_ids - .keys() - .copied() - .collect(); - self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); - } - - /// Get the current coordinator for executing DKG - /// This will always use the coordinator selector to determine the coordinator - fn get_coordinator_dkg(&self) -> (u32, PublicKey) { - self.coordinator_selector.get_coordinator() - } - - /// Read stackerdb messages in case the signer was started late or restarted and missed incoming DKG messages - pub fn read_dkg_stackerdb_messages( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - current_reward_cycle: u64, - ) -> Result<(), ClientError> { - if self.state != State::Uninitialized { - // We should only read stackerdb if we are uninitialized - return Ok(()); - } - let ordered_packets = self - .stackerdb_manager - .get_dkg_packets(&self.signer_slot_ids)? - .iter() - .filter_map(|packet| { - let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { - self.get_coordinator_dkg().1 - } else { - debug!( - "{self}: Received a non-DKG message in the DKG message queue. Ignoring it." - ); - return None; - }; - self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) - }) - .collect::>(); - // We successfully read stackerdb so we are no longer uninitialized - self.state = State::Idle; - debug!( - "{self}: Processing {} DKG messages from stackerdb: {ordered_packets:?}", - ordered_packets.len() - ); - self.handle_packets(stacks_client, res, &ordered_packets, current_reward_cycle); - Ok(()) - } -} - -impl From for Signer { - fn from(signer_config: SignerConfig) -> Self { - let mut stackerdb_manager = StackerDBManager::from(&signer_config); - - let num_signers = signer_config - .signer_entries - .count_signers() - .expect("FATAL: Too many registered signers to fit in a u32"); - let num_keys = signer_config - .signer_entries - .count_keys() - .expect("FATAL: Too many key ids to fit in a u32"); - let threshold = signer_config - .signer_entries - .get_signing_threshold() - .expect("FATAL: Too many key ids to fit in a u32"); - let dkg_threshold = signer_config - .signer_entries - .get_dkg_threshold() - .expect("FATAL: Too many key ids to fit in a u32"); - - let coordinator_config = CoordinatorConfig { - threshold, - dkg_threshold, - num_signers, - num_keys, - message_private_key: signer_config.ecdsa_private_key, - dkg_public_timeout: signer_config.dkg_public_timeout, - dkg_private_timeout: signer_config.dkg_private_timeout, - dkg_end_timeout: signer_config.dkg_end_timeout, - nonce_timeout: signer_config.nonce_timeout, - sign_timeout: signer_config.sign_timeout, - signer_key_ids: signer_config.signer_entries.coordinator_key_ids, - signer_public_keys: signer_config.signer_entries.signer_public_keys, - }; - - let coordinator = FireCoordinator::new(coordinator_config); - let coordinator_selector = - CoordinatorSelector::from(signer_config.signer_entries.public_keys.clone()); - - debug!( - "Reward cycle #{} Signer #{}: initial coordinator is signer {}", - signer_config.reward_cycle, - signer_config.signer_id, - coordinator_selector.get_coordinator().0 - ); - let signer_db = - SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); - - let mut state_machine = SignerStateMachine::new( - threshold, - num_signers, - num_keys, - signer_config.signer_id, - signer_config.key_ids, - signer_config.ecdsa_private_key, - signer_config.signer_entries.public_keys, - ); - - if let Some(state) = load_encrypted_signer_state( - &mut stackerdb_manager, - signer_config.signer_slot_id, - &state_machine.network_private_key, - ).or_else(|err| { - warn!("Failed to load encrypted signer state from StackerDB, falling back to SignerDB: {err}"); - load_encrypted_signer_state( - &signer_db, - signer_config.reward_cycle, - &state_machine.network_private_key) - }).expect("Failed to load encrypted signer state from both StackerDB and SignerDB") { - state_machine.signer = state; - }; - - Self { - coordinator, - state_machine, - state: State::Uninitialized, - commands: VecDeque::new(), - stackerdb_manager, - mainnet: signer_config.mainnet, - signer_id: signer_config.signer_id, - signer_addresses: signer_config - .signer_entries - .signer_ids - .into_keys() - .collect(), - signer_slot_ids: signer_config.signer_slot_ids.clone(), - next_signer_slot_ids: vec![], - next_signer_addresses: vec![], - reward_cycle: signer_config.reward_cycle, - tx_fee_ustx: signer_config.tx_fee_ustx, - max_tx_fee_ustx: signer_config.max_tx_fee_ustx, - coordinator_selector, - approved_aggregate_public_key: None, - miner_key: None, - db_path: signer_config.db_path, - signer_db, - } - } -} - -impl Signer { - /// Refresh the coordinator selector - pub fn refresh_coordinator(&mut self) { - // TODO: do not use an empty consensus hash - let pox_consensus_hash = ConsensusHash::empty(); - let old_coordinator_id = self.coordinator_selector.get_coordinator().0; - let updated_coordinator_id = self - .coordinator_selector - .refresh_coordinator(&pox_consensus_hash); - if old_coordinator_id != updated_coordinator_id { - debug!( - "{self}: Coordinator updated. Resetting state to Idle."; - "old_coordinator_id" => {old_coordinator_id}, - "updated_coordinator_id" => {updated_coordinator_id}, - "pox_consensus_hash" => %pox_consensus_hash - ); - self.coordinator.state = CoordinatorState::Idle; - self.state = State::Idle; - } - } - - /// Finish an operation and update the coordinator selector accordingly - fn finish_operation(&mut self) { - self.state = State::Idle; - self.coordinator_selector.last_message_time = None; - } - - /// Update operation - fn update_operation(&mut self, operation: Operation) { - self.state = State::OperationInProgress(operation); - self.coordinator_selector.last_message_time = Some(Instant::now()); - } - - /// Execute the given command and update state accordingly - fn execute_command(&mut self, stacks_client: &StacksClient, command: &SignerCommand) { - match command { - SignerCommand::Dkg => { - crate::monitoring::increment_commands_processed("dkg"); - if self.approved_aggregate_public_key.is_some() { - debug!("Reward cycle #{} Signer #{}: Already have an aggregate key. Ignoring DKG command.", self.reward_cycle, self.signer_id); - return; - } - let vote_round = match stacks_client.get_last_round(self.reward_cycle) { - Ok(last_round) => last_round, - Err(e) => { - error!("{self}: Unable to perform DKG. Failed to get last round from stacks node: {e:?}"); - return; - } - }; - // The dkg id will increment internally following "start_dkg_round" so do not increment it here - self.coordinator.current_dkg_id = vote_round.unwrap_or(0); - info!( - "{self}: Starting DKG vote"; - "round" => self.coordinator.current_dkg_id.wrapping_add(1), - "cycle" => self.reward_cycle, - ); - match self.coordinator.start_dkg_round() { - Ok(msg) => { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - debug!("{self}: ACK: {ack:?}",); - self.update_operation(Operation::Dkg); - } - Err(e) => { - error!("{self}: Failed to start DKG: {e:?}",); - return; - } - } - self.update_operation(Operation::Dkg); - } - SignerCommand::Sign { - block_proposal, - is_taproot, - merkle_root, - } => { - crate::monitoring::increment_commands_processed("sign"); - if self.approved_aggregate_public_key.is_none() { - debug!("{self}: Cannot sign a block without an approved aggregate public key. Ignore it."); - return; - } - let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - let mut block_info = self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - .unwrap_or_else(|_| Some(BlockInfo::from(block_proposal.clone()))) - .unwrap_or_else(|| BlockInfo::from(block_proposal.clone())); - if block_info.signed_over { - debug!("{self}: Received a sign command for a block we are already signing over. Ignore it."); - return; - } - info!("{self}: Signing block"; - "block_consensus_hash" => %block_proposal.block.header.consensus_hash, - "block_height" => block_proposal.block.header.chain_length, - "pre_sign_block_id" => %block_proposal.block.block_id(), - ); - match self.coordinator.start_signing_round( - &block_proposal.serialize_to_vec(), - *is_taproot, - *merkle_root, - ) { - Ok(msg) => { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - debug!("{self}: ACK: {ack:?}",); - block_info.signed_over = true; - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|e| { - error!("{self}: Failed to insert block in DB: {e:?}"); - }); - self.update_operation(Operation::Sign); - } - Err(e) => { - error!("{self}: Failed to start signing block: {e:?}",); - return; - } - } - self.update_operation(Operation::Sign); - } - } - } - - /// Handle the block validate response returned from our prior calls to submit a block for validation - fn handle_block_validate_response( - &mut self, - stacks_client: &StacksClient, - block_validate_response: &BlockValidateResponse, - res: &Sender>, - current_reward_cycle: u64, - ) { - let mut block_info = match block_validate_response { - BlockValidateResponse::Ok(block_validate_ok) => { - crate::monitoring::increment_block_validation_responses(true); - let signer_signature_hash = block_validate_ok.signer_signature_hash; - // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}",); - return; - } - }; - let is_valid = self.verify_block_transactions(stacks_client, &block_info.block); - block_info.valid = Some(is_valid); - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - info!( - "{self}: Treating block validation for block {} as valid: {:?}", - &block_info.block.block_id(), - block_info.valid - ); - block_info - } - BlockValidateResponse::Reject(block_validate_reject) => { - crate::monitoring::increment_block_validation_responses(false); - let signer_signature_hash = block_validate_reject.signer_signature_hash; - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}"); - return; - } - }; - block_info.valid = Some(false); - // Submit a rejection response to the .signers contract for miners - // to observe so they know to send another block and to prove signers are doing work); - warn!("{self}: Broadcasting a block rejection due to stacks node validation failure..."); - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_validate_reject.clone().into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - block_info - } - }; - if let Some(mut nonce_request) = block_info.ext.take_nonce_request() { - debug!("{self}: Received a block validate response from the stacks node for a block we already received a nonce request for. Responding to the nonce request..."); - // We have received validation from the stacks node. Determine our vote and update the request message - self.determine_vote(&mut block_info, &mut nonce_request); - // Send the nonce request through with our vote - let packet = Packet { - msg: Message::NonceRequest(nonce_request), - sig: vec![], - }; - self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); - } - info!( - "{self}: Received a block validate response"; - "block_hash" => block_info.block.header.block_hash(), - "valid" => block_info.valid, - "signed_over" => block_info.signed_over, - ); - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - } - - /// Handle signer messages submitted to signers stackerdb - fn handle_signer_messages( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - messages: &[SignerMessage], - current_reward_cycle: u64, - ) { - let packets: Vec = messages - .iter() - .filter_map(|msg| match msg { - SignerMessage::DkgResults { .. } - | SignerMessage::BlockResponse(_) - | SignerMessage::EncryptedSignerState(_) - | SignerMessage::Transactions(_) => None, - // TODO: if a signer tries to trigger DKG and we already have one set in the contract, ignore the request. - SignerMessage::Packet(packet) => { - let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { - self.get_coordinator_dkg().1 - } else { - self.get_coordinator_sign(current_reward_cycle).1 - }; - self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) - } - }) - .collect(); - self.handle_packets(stacks_client, res, &packets, current_reward_cycle); - } - - /// Helper function for determining if the provided message is a DKG specific message - fn is_dkg_message(msg: &Message) -> bool { - matches!( - msg, - Message::DkgBegin(_) - | Message::DkgEnd(_) - | Message::DkgEndBegin(_) - | Message::DkgPrivateBegin(_) - | Message::DkgPrivateShares(_) - | Message::DkgPublicShares(_) - ) - } - - /// Process inbound packets as both a signer and a coordinator - /// Will send outbound packets and operation results as appropriate - fn handle_packets( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - packets: &[Packet], - current_reward_cycle: u64, - ) { - if let Ok(packets_len) = packets.len().try_into() { - crate::monitoring::increment_inbound_packets(packets_len); - } - let signer_outbound_messages = self - .state_machine - .process_inbound_messages(packets) - .unwrap_or_else(|e| { - error!("{self}: Failed to process inbound messages as a signer: {e:?}",); - vec![] - }); - - // Next process the message as the coordinator - let (coordinator_outbound_messages, operation_results) = if self.reward_cycle - != current_reward_cycle - { - self.coordinator - .process_inbound_messages(packets) - .unwrap_or_else(|e| { - error!("{self}: Failed to process inbound messages as a coordinator: {e:?}"); - (vec![], vec![]) - }) - } else { - (vec![], vec![]) - }; - - if !operation_results.is_empty() { - // We have finished a signing or DKG round, either successfully or due to error. - // Regardless of the why, update our state to Idle as we should not expect the operation to continue. - self.process_operation_results(stacks_client, &operation_results); - self.send_operation_results(res, operation_results); - self.finish_operation(); - } else if !packets.is_empty() { - // We have received a message. Update our state accordingly - // Let us be extra explicit in case a new state type gets added to wsts' state machine - match &self.coordinator.state { - CoordinatorState::Idle => {} - CoordinatorState::DkgPublicDistribute - | CoordinatorState::DkgPublicGather - | CoordinatorState::DkgPrivateDistribute - | CoordinatorState::DkgPrivateGather - | CoordinatorState::DkgEndDistribute - | CoordinatorState::DkgEndGather => { - self.update_operation(Operation::Dkg); - } - CoordinatorState::NonceRequest(_, _) - | CoordinatorState::NonceGather(_, _) - | CoordinatorState::SigShareRequest(_, _) - | CoordinatorState::SigShareGather(_, _) => { - self.update_operation(Operation::Sign); - } - } - } - - if packets - .iter() - .any(|packet| matches!(packet.msg, Message::DkgEnd(_))) - { - debug!("{self}: Saving signer state"); - self.save_signer_state() - .unwrap_or_else(|_| panic!("{self}: Failed to save signer state")); - } - self.send_outbound_messages(signer_outbound_messages); - self.send_outbound_messages(coordinator_outbound_messages); - } - - /// Validate a signature share request, updating its message where appropriate. - /// If the request is for a block it has already agreed to sign, it will overwrite the message with the agreed upon value - /// Returns whether the request is valid or not. - fn validate_signature_share_request(&self, request: &mut SignatureShareRequest) -> bool { - let Some(block_vote): Option = read_next(&mut &request.message[..]).ok() - else { - // We currently reject anything that is not a block vote - debug!( - "{self}: Received a signature share request for an unknown message stream. Reject it.", - ); - return false; - }; - - match self - .signer_db - .block_lookup(self.reward_cycle, &block_vote.signer_signature_hash) - .unwrap_or_else(|_| panic!("{self}: Failed to connect to DB")) - .map(|b| b.vote) - { - Some(Some(vote)) => { - // Overwrite with our agreed upon value in case another message won majority or the coordinator is trying to cheat... - debug!( - "{self}: Set vote (rejected = {}) to {vote:?}", block_vote.rejected; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - request.message = vote.serialize_to_vec(); - true - } - Some(None) => { - // We never agreed to sign this block. Reject it. - // This can happen if the coordinator received enough votes to sign yes - // or no on a block before we received validation from the stacks node. - debug!( - "{self}: Received a signature share request for a block we never agreed to sign. Ignore it."; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - false - } - None => { - // We will only sign across block hashes or block hashes + b'n' byte for - // blocks we have seen a Nonce Request for (and subsequent validation) - // We are missing the context here necessary to make a decision. Reject the block - debug!( - "{self}: Received a signature share request from an unknown block. Reject it."; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - false - } - } - } - - /// Validate a nonce request, updating its message appropriately. - /// If the request is for a block, we will update the request message - /// as either a hash indicating a vote no or the signature hash indicating a vote yes - /// Returns whether the request is valid or not - fn validate_nonce_request( - &mut self, - stacks_client: &StacksClient, - nonce_request: &mut NonceRequest, - ) -> Option { - let Some(block_proposal) = - BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()).ok() - else { - // We currently reject anything that is not a valid block proposal - warn!("{self}: Received a nonce request for an unknown message stream. Reject it.",); - return None; - }; - if block_proposal.reward_cycle != self.reward_cycle { - // We are not signing for this reward cycle. Reject the block - warn!( - "{self}: Received a nonce request for a different reward cycle. Reject it."; - "requested_reward_cycle" => block_proposal.reward_cycle, - ); - return None; - } - // TODO: could add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. - let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - let Some(mut block_info) = self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - .expect("Failed to connect to signer DB") - else { - debug!( - "{self}: received a nonce request for a new block. Submit block for validation. "; - "signer_sighash" => %signer_signature_hash, - ); - let block_info = BlockInfo::new_v1_with_request(block_proposal, nonce_request.clone()); - stacks_client - .submit_block_for_validation(block_info.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}",); - }); - return Some(block_info); - }; - - if block_info.valid.is_none() { - // We have not yet received validation from the stacks node. Cache the request and wait for validation - debug!("{self}: We have yet to receive validation from the stacks node for a nonce request. Cache the nonce request and wait for block validation..."); - block_info - .ext - .set_nonce_request(nonce_request.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to set nonce_request: {e:?}",); - }); - return Some(block_info); - } - - self.determine_vote(&mut block_info, nonce_request); - Some(block_info) - } - - /// Verify the transactions in a block are as expected - fn verify_block_transactions( - &mut self, - stacks_client: &StacksClient, - block: &NakamotoBlock, - ) -> bool { - let next_reward_cycle = self.reward_cycle.wrapping_add(1); - let approved_aggregate_public_key = stacks_client - .get_approved_aggregate_key(next_reward_cycle) - .unwrap_or(None); - if approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set for the upcoming signers' reward cycle - // Otherwise it is a waste of block space and time to enforce as the desired outcome has been reached. - debug!("{self}: Already have an aggregate key for the next signer set's reward cycle ({}). Skipping transaction verification...", next_reward_cycle); - return true; - } - if let Ok(expected_transactions) = self.get_expected_transactions(stacks_client) { - //It might be worth building a hashset of the blocks' txids and checking that against the expected transaction's txid. - let block_tx_hashset = block.txs.iter().map(|tx| tx.txid()).collect::>(); - // Ensure the block contains the transactions we expect - let missing_transactions = expected_transactions - .into_iter() - .filter_map(|tx| { - if !block_tx_hashset.contains(&tx.txid()) { - debug!("{self}: expected txid {} is in the block", &tx.txid()); - Some(tx) - } else { - debug!("{self}: missing expected txid {}", &tx.txid()); - None - } - }) - .collect::>(); - let is_valid = missing_transactions.is_empty(); - if !is_valid { - debug!("{self}: Broadcasting a block rejection due to missing expected transactions..."); - let block_rejection = BlockRejection::new( - block.header.signer_signature_hash(), - RejectCode::MissingTransactions(missing_transactions), - ); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - } - is_valid - } else { - // Failed to connect to the stacks node to get transactions. Cannot validate the block. Reject it. - debug!("{self}: Broadcasting a block rejection due to signer connectivity issues...",); - let block_rejection = BlockRejection::new( - block.header.signer_signature_hash(), - RejectCode::ConnectivityIssues, - ); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - false - } - } - - /// Get transactions from stackerdb for the given addresses and account nonces, filtering out any malformed transactions - fn get_signer_transactions( - &mut self, - nonces: &std::collections::HashMap, - ) -> Result, ClientError> { - let transactions: Vec<_> = self - .stackerdb_manager - .get_current_transactions()? - .into_iter() - .filter_map(|tx| { - if !NakamotoSigners::valid_vote_transaction(nonces, &tx, self.mainnet) { - return None; - } - Some(tx) - }) - .collect(); - Ok(transactions) - } - - /// Get the transactions that should be included in the block, filtering out any invalid transactions - fn get_expected_transactions( - &mut self, - stacks_client: &StacksClient, - ) -> Result, ClientError> { - if self.next_signer_slot_ids.is_empty() { - debug!("{self}: No next signers. Skipping transaction retrieval.",); - return Ok(vec![]); - } - // Get all the account nonces for the next signers - let account_nonces = self.get_account_nonces(stacks_client, &self.next_signer_addresses); - let transactions: Vec<_> = self - .stackerdb_manager - .get_next_transactions(&self.next_signer_slot_ids)?; - let mut filtered_transactions = std::collections::HashMap::new(); - NakamotoSigners::update_filtered_transactions( - &mut filtered_transactions, - &account_nonces, - self.mainnet, - transactions, - ); - // We only allow enforcement of one special cased transaction per signer address per block - Ok(filtered_transactions.into_values().collect()) - } - - /// Determine the vote for a block and update the block info and nonce request accordingly - fn determine_vote(&self, block_info: &mut BlockInfo, nonce_request: &mut NonceRequest) { - let rejected = !block_info.valid.unwrap_or(false); - if rejected { - debug!("{self}: Rejecting block {}", block_info.block.block_id()); - } else { - debug!("{self}: Accepting block {}", block_info.block.block_id()); - } - let block_vote = NakamotoBlockVote { - signer_signature_hash: block_info.block.header.signer_signature_hash(), - rejected: !block_info.valid.unwrap_or(false), - }; - let block_vote_bytes = block_vote.serialize_to_vec(); - // Cache our vote - block_info.vote = Some(block_vote); - nonce_request.message = block_vote_bytes; - } - - /// Verify a chunk is a valid wsts packet. Returns the packet if it is valid, else None. - /// NOTE: The packet will be updated if the signer wishes to respond to NonceRequest - /// and SignatureShareRequests with a different message than what the coordinator originally sent. - /// This is done to prevent a malicious coordinator from sending a different message than what was - /// agreed upon and to support the case where the signer wishes to reject a block by voting no - fn verify_packet( - &mut self, - stacks_client: &StacksClient, - mut packet: Packet, - coordinator_public_key: &PublicKey, - ) -> Option { - // We only care about verified wsts packets. Ignore anything else. - if packet.verify(&self.state_machine.public_keys, coordinator_public_key) { - match &mut packet.msg { - Message::SignatureShareRequest(request) => { - if !self.validate_signature_share_request(request) { - return None; - } - } - Message::NonceRequest(request) => { - let Some(updated_block_info) = - self.validate_nonce_request(stacks_client, request) - else { - warn!("Failed to validate and parse nonce request"); - return None; - }; - self.signer_db - .insert_block(&updated_block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - let process_request = updated_block_info.vote.is_some(); - if !process_request { - debug!("Failed to validate nonce request"); - return None; - } - } - _ => { - // Nothing to do for other message types - } - } - Some(packet) - } else { - debug!( - "{self}: Failed to verify wsts packet with {}: {packet:?}", - coordinator_public_key - ); - None - } - } - - /// Processes the operation results, broadcasting block acceptance or rejection messages - /// and DKG vote results accordingly - fn process_operation_results( - &mut self, - stacks_client: &StacksClient, - operation_results: &[OperationResult], - ) { - for operation_result in operation_results { - // Signers only every trigger non-taproot signing rounds over blocks. Ignore SignTaproot results - match operation_result { - OperationResult::Sign(signature) => { - crate::monitoring::increment_operation_results("sign"); - info!("{self}: Received signature result"); - self.process_signature(signature); - } - OperationResult::SignTaproot(_) => { - crate::monitoring::increment_operation_results("sign_taproot"); - debug!("{self}: Received a signature result for a taproot signature. Nothing to broadcast as we currently sign blocks with a FROST signature."); - } - OperationResult::Dkg(aggregate_key) => { - crate::monitoring::increment_operation_results("dkg"); - self.process_dkg(stacks_client, aggregate_key); - } - OperationResult::SignError(e) => { - crate::monitoring::increment_operation_results("sign_error"); - warn!("{self}: Received a Sign error: {e:?}"); - self.process_sign_error(e); - } - OperationResult::DkgError(e) => { - crate::monitoring::increment_operation_results("dkg_error"); - warn!("{self}: Received a DKG error: {e:?}"); - // TODO: process these errors and track malicious signers to report - } - } - } - } - - /// Process a dkg result by broadcasting a vote to the stacks node - fn process_dkg(&mut self, stacks_client: &StacksClient, dkg_public_key: &Point) { - let mut dkg_results_bytes = vec![]; - debug!( - "{self}: Received DKG result. Broadcasting vote to the stacks node..."; - "dkg_public_key" => %dkg_public_key - ); - if let Err(e) = SignerMessage::serialize_dkg_result( - &mut dkg_results_bytes, - dkg_public_key, - self.coordinator.party_polynomials.iter(), - ) { - error!("{}: Failed to serialize DKGResults message for StackerDB, will continue operating.", self.signer_id; - "error" => %e); - } else if let Err(e) = self - .stackerdb_manager - .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) - { - error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; - "error" => %e); - } - - // Get our current nonce from the stacks node and compare it against what we have sitting in the stackerdb instance - let signer_address = stacks_client.get_signer_address(); - // Retreieve ALL account nonces as we may have transactions from other signers in our stackerdb slot that we care about - let account_nonces = self.get_account_nonces(stacks_client, &self.signer_addresses); - let account_nonce = account_nonces.get(signer_address).unwrap_or(&0); - let signer_transactions = self - .get_signer_transactions(&account_nonces) - .map_err(|e| { - error!("{self}: Unable to get signer transactions: {e:?}."); - }) - .unwrap_or_default(); - // If we have a transaction in the stackerdb slot, we need to increment the nonce hence the +1, else should use the account nonce - let next_nonce = signer_transactions - .first() - .map(|tx| tx.get_origin_nonce().wrapping_add(1)) - .unwrap_or(*account_nonce); - let epoch = stacks_client - .get_node_epoch() - .unwrap_or(StacksEpochId::Epoch24); - match self.build_dkg_vote(stacks_client, &epoch, next_nonce, *dkg_public_key) { - Ok(new_transaction) => { - if let Err(e) = self.broadcast_dkg_vote( - stacks_client, - epoch, - signer_transactions, - new_transaction, - ) { - warn!( - "{self}: Failed to broadcast DKG public key vote ({dkg_public_key:?}): {e:?}" - ); - } - } - Err(e) => { - warn!( - "{self}: Failed to build DKG public key vote ({dkg_public_key:?}) transaction: {e:?}." - ); - } - } - } - - /// Build a signed DKG vote transaction - fn build_dkg_vote( - &mut self, - stacks_client: &StacksClient, - epoch: &StacksEpochId, - nonce: u64, - dkg_public_key: Point, - ) -> Result { - let mut unsigned_tx = stacks_client.build_unsigned_vote_for_aggregate_public_key( - self.stackerdb_manager.get_signer_slot_id().0, - self.coordinator.current_dkg_id, - dkg_public_key, - self.reward_cycle, - nonce, - )?; - let tx_fee = if epoch < &StacksEpochId::Epoch30 { - info!("{self}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote."); - let fee = if let Some(max_fee) = self.max_tx_fee_ustx { - let estimated_fee = stacks_client - .get_medium_estimated_fee_ustx(&unsigned_tx) - .map_err(|e| { - warn!("{self}: unable to estimate fee for DKG vote transaction: {e:?}."); - e - }) - .unwrap_or(self.tx_fee_ustx); - std::cmp::min(estimated_fee, max_fee) - } else { - self.tx_fee_ustx - }; - debug!("{self}: Using a fee of {fee} uSTX for DKG vote transaction."); - fee - } else { - 0 - }; - unsigned_tx.set_tx_fee(tx_fee); - stacks_client.sign_transaction(unsigned_tx) - } - - // Get the account nonces for the provided list of signer addresses - fn get_account_nonces( - &self, - stacks_client: &StacksClient, - signer_addresses: &[StacksAddress], - ) -> std::collections::HashMap { - let mut account_nonces = std::collections::HashMap::with_capacity(signer_addresses.len()); - for address in signer_addresses { - let Ok(account_nonce) = stacks_client.get_account_nonce(address) else { - warn!("{self}: Unable to get account nonce for address: {address}."); - continue; - }; - account_nonces.insert(*address, account_nonce); - } - account_nonces - } - - /// broadcast the dkg vote transaction according to the current epoch - fn broadcast_dkg_vote( - &mut self, - stacks_client: &StacksClient, - epoch: StacksEpochId, - mut signer_transactions: Vec, - new_transaction: StacksTransaction, - ) -> Result<(), ClientError> { - let txid = new_transaction.txid(); - if self.approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set - info!( - "{self}: Already has an approved aggregate key. Do not broadcast the transaction ({txid:?})." - ); - return Ok(()); - } - if epoch >= StacksEpochId::Epoch30 { - debug!("{self}: Received a DKG result while in epoch 3.0. Broadcast the transaction only to stackerDB."); - } else if epoch == StacksEpochId::Epoch25 { - debug!("{self}: Received a DKG result while in epoch 2.5. Broadcast the transaction to the mempool."); - stacks_client.submit_transaction(&new_transaction)?; - info!("{self}: Submitted DKG vote transaction ({txid:?}) to the mempool"); - } else { - debug!("{self}: Received a DKG result, but are in an unsupported epoch. Do not broadcast the transaction ({}).", new_transaction.txid()); - return Ok(()); - } - // For all Pox-4 epochs onwards, broadcast the results also to stackerDB for other signers/miners to observe - signer_transactions.push(new_transaction); - let signer_message = SignerMessage::Transactions(signer_transactions); - self.stackerdb_manager - .send_message_with_retry(signer_message)?; - crate::monitoring::increment_dkg_votes_submitted(); - info!("{self}: Broadcasted DKG vote transaction ({txid}) to stacker DB"); - Ok(()) - } - - /// Process a signature from a signing round by deserializing the signature and - /// broadcasting an appropriate Reject or Approval message to stackerdb - fn process_signature(&mut self, signature: &Signature) { - // Deserialize the signature result and broadcast an appropriate Reject or Approval message to stackerdb - let message = self.coordinator.get_message(); - let Some(block_vote): Option = read_next(&mut &message[..]).ok() else { - debug!("{self}: Received a signature result for a non-block. Nothing to broadcast."); - return; - }; - - let block_submission = if block_vote.rejected { - crate::monitoring::increment_block_responses_sent(false); - // We signed a rejection message. Return a rejection message - BlockResponse::rejected(block_vote.signer_signature_hash, signature.clone()) - } else { - crate::monitoring::increment_block_responses_sent(true); - // we agreed to sign the block hash. Return an approval message - BlockResponse::accepted(block_vote.signer_signature_hash, signature.clone()) - }; - - // Submit signature result to miners to observe - info!("{self}: Submit block response: {block_submission}"); - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_submission.into()) - { - warn!("{self}: Failed to send block submission to stacker-db: {e:?}"); - } - } - - /// Process a sign error from a signing round, broadcasting a rejection message to stackerdb accordingly - fn process_sign_error(&mut self, e: &SignError) { - let message = self.coordinator.get_message(); - // We do not sign across blocks, but across their hashes. however, the first sign request is always across the block - // so we must handle this case first - - let block: NakamotoBlock = read_next(&mut &message[..]).ok().unwrap_or({ - // This is not a block so maybe its across its hash - let Some(block_vote): Option = read_next(&mut &message[..]).ok() - else { - // This is not a block vote either. We cannot process this error - debug!( - "{self}: Received a signature error for a non-block. Nothing to broadcast." - ); - return; - }; - let Some(block_info) = self - .signer_db - .block_lookup(self.reward_cycle, &block_vote.signer_signature_hash) - .unwrap_or_else(|_| panic!("{self}: Failed to connect to signer DB")) - else { - debug!( - "{self}: Received a signature result for a block we have not seen before. Ignoring..." - ); - return; - }; - block_info.block - }); - let block_rejection = - BlockRejection::new(block.header.signer_signature_hash(), RejectCode::from(e)); - debug!("{self}: Broadcasting block rejection: {block_rejection:?}"); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection submission to stacker-db: {e:?}"); - } - } - - /// Persist signer state in both SignerDB and StackerDB - fn save_signer_state(&mut self) -> Result<(), PersistenceError> { - let rng = &mut OsRng; - - let state = self.state_machine.signer.save(); - let serialized_state = serde_json::to_vec(&state)?; - - let encrypted_state = encrypt( - &self.state_machine.network_private_key, - &serialized_state, - rng, - )?; - - let signerdb_result = self.save_signer_state_in_signerdb(&encrypted_state); - let stackerdb_result = self.save_signer_state_in_stackerdb(encrypted_state); - - if let Err(err) = &signerdb_result { - warn!("{self}: Failed to persist state in SignerDB: {err}"); - } - - if let Err(err) = &stackerdb_result { - warn!("{self}: Failed to persist state in StackerDB: {err}"); - - stackerdb_result - } else { - signerdb_result - } - } - - /// Persist signer state in SignerDB - fn save_signer_state_in_signerdb( - &self, - encrypted_state: &[u8], - ) -> Result<(), PersistenceError> { - self.signer_db - .insert_encrypted_signer_state(self.reward_cycle, encrypted_state)?; - Ok(()) - } - - /// Persist signer state in StackerDB - /// TODO: this is a no-op until the number of signer slots can be expanded - fn save_signer_state_in_stackerdb( - &mut self, - _encrypted_state: Vec, - ) -> Result<(), PersistenceError> { - /* - * This is a no-op until the number of signer slots can be expanded to 14 - * - let message = SignerMessage::EncryptedSignerState(encrypted_state); - self.stackerdb_manager.send_message_with_retry(message)?; - */ - Ok(()) - } - - /// Send any operation results across the provided channel - fn send_operation_results( - &mut self, - res: &Sender>, - operation_results: Vec, - ) { - let nmb_results = operation_results.len(); - match res.send(operation_results.into_iter().map(|r| r.into()).collect()) { - Ok(_) => { - debug!("{self}: Successfully sent {nmb_results} operation result(s)") - } - Err(e) => { - warn!("{self}: Failed to send {nmb_results} operation results: {e:?}"); - } - } - } - - /// Sending all provided packets through stackerdb with a retry - fn send_outbound_messages(&mut self, outbound_messages: Vec) { - debug!( - "{self}: Sending {} messages to other stacker-db instances.", - outbound_messages.len() - ); - for msg in outbound_messages { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - if let Ok(ack) = ack { - debug!("{self}: send outbound ACK: {ack:?}"); - } else { - warn!("{self}: Failed to send message to stacker-db instance: {ack:?}"); - } - } - } - - /// Refresh DKG and queue it if required - pub fn refresh_dkg( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - current_reward_cycle: u64, - ) -> Result<(), ClientError> { - // First attempt to retrieve the aggregate key from the contract. - self.update_approved_aggregate_key(stacks_client)?; - if self.approved_aggregate_public_key.is_some() { - return Ok(()); - } - // Check stackerdb for any missed DKG messages to catch up our state. - self.read_dkg_stackerdb_messages(stacks_client, res, current_reward_cycle)?; - // Check if we should still queue DKG - if !self.should_queue_dkg(stacks_client)? { - return Ok(()); - } - // Because there could be a slight delay in reading pending transactions and a key being approved by the contract, - // check one last time if the approved key was set since we finished the should queue dkg call - self.update_approved_aggregate_key(stacks_client)?; - if self.approved_aggregate_public_key.is_some() { - return Ok(()); - } - if self.commands.front() != Some(&SignerCommand::Dkg) { - info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command..."); - self.commands.push_front(SignerCommand::Dkg); - } else { - debug!("{self}: DKG command already queued..."); - } - Ok(()) - } - - /// Overwrites the approved aggregate key to the value in the contract, updating state accordingly - pub fn update_approved_aggregate_key( - &mut self, - stacks_client: &StacksClient, - ) -> Result<(), ClientError> { - let old_dkg = self.approved_aggregate_public_key; - self.approved_aggregate_public_key = - stacks_client.get_approved_aggregate_key(self.reward_cycle)?; - if self.approved_aggregate_public_key.is_some() { - // TODO: this will never work as is. We need to have stored our party shares on the side etc for this particular aggregate key. - // Need to update state to store the necessary info, check against it to see if we have participated in the winning round and - // then overwrite our value accordingly. Otherwise, we will be locked out of the round and should not participate. - let internal_dkg = self.coordinator.aggregate_public_key; - if internal_dkg != self.approved_aggregate_public_key { - warn!("{self}: we do not support changing the internal DKG key yet. Expected {internal_dkg:?} got {:?}", self.approved_aggregate_public_key); - } - self.coordinator - .set_aggregate_public_key(self.approved_aggregate_public_key); - if old_dkg != self.approved_aggregate_public_key { - warn!( - "{self}: updated DKG value from {old_dkg:?} to {:?}.", - self.approved_aggregate_public_key - ); - } - match self.state { - State::OperationInProgress(Operation::Dkg) => { - debug!( - "{self}: DKG has already been set. Aborting DKG operation {}.", - self.coordinator.current_dkg_id - ); - self.finish_operation(); - } - State::Uninitialized => { - // If we successfully load the DKG value, we are fully initialized - self.state = State::Idle; - } - _ => { - // do nothing - } - } - } - Ok(()) - } - - /// Should DKG be queued to the current signer's command queue - /// This assumes that no key has been approved by the contract yet - pub fn should_queue_dkg(&mut self, stacks_client: &StacksClient) -> Result { - if self.state != State::Idle - || self.signer_id != self.get_coordinator_dkg().0 - || self.commands.front() == Some(&SignerCommand::Dkg) - { - // We are not the coordinator, we are in the middle of an operation, or we have already queued DKG. Do not attempt to queue DKG - return Ok(false); - } - let signer_address = stacks_client.get_signer_address(); - let account_nonces = self.get_account_nonces(stacks_client, &[*signer_address]); - let old_transactions = self.get_signer_transactions(&account_nonces).map_err(|e| { - warn!("{self}: Failed to get old signer transactions: {e:?}. May trigger DKG unnecessarily"); - }).unwrap_or_default(); - // Check if we have an existing vote transaction for the same round and reward cycle - for transaction in old_transactions.iter() { - let params = - NakamotoSigners::parse_vote_for_aggregate_public_key(transaction).unwrap_or_else(|| panic!("BUG: {self}: Received an invalid {SIGNERS_VOTING_FUNCTION_NAME} transaction in an already filtered list: {transaction:?}")); - if Some(params.aggregate_key) == self.coordinator.aggregate_public_key - && params.voting_round == self.coordinator.current_dkg_id - { - debug!("{self}: Not triggering a DKG round. Already have a pending vote transaction."; - "txid" => %transaction.txid(), - "aggregate_key" => %params.aggregate_key, - "voting_round" => params.voting_round - ); - return Ok(false); - } - } - if let Some(aggregate_key) = stacks_client.get_vote_for_aggregate_public_key( - self.coordinator.current_dkg_id, - self.reward_cycle, - *signer_address, - )? { - let Some(round_weight) = stacks_client - .get_round_vote_weight(self.reward_cycle, self.coordinator.current_dkg_id)? - else { - // This only will happen if somehow we registered as a signer and were granted no weight which should not really ever happen. - error!("{self}: already voted for DKG, but no round vote weight found. We either have no voting power or the contract is corrupted."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key - ); - return Ok(false); - }; - let threshold_weight = stacks_client.get_vote_threshold_weight(self.reward_cycle)?; - if round_weight < threshold_weight { - // The threshold weight has not been met yet. We should wait for more votes to arrive. - // TODO: this should be on a timeout of some kind. We should not wait forever for the threshold to be met. - // See https://github.com/stacks-network/stacks-core/issues/4568 - debug!("{self}: Not triggering a DKG round. Weight threshold has not been met yet. Waiting for more votes to arrive."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key, - "round_weight" => round_weight, - "threshold_weight" => threshold_weight - ); - return Ok(false); - } - } else { - // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction - // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes - let account_nonce = stacks_client.get_account_nonce(signer_address).unwrap_or(0); - let old_transactions = self.stackerdb_manager.get_current_transactions()?; - // Check if we have an existing vote transaction for the same round and reward cycle - for transaction in old_transactions.iter() { - // We should not consider other signer transactions and should ignore invalid transaction versions - if transaction.origin_address() != *signer_address - || transaction.is_mainnet() != self.mainnet - { - continue; - } - let Some(params) = - NakamotoSigners::parse_vote_for_aggregate_public_key(transaction) - else { - continue; - }; - let Some(dkg_public_key) = self.coordinator.aggregate_public_key else { - break; - }; - if params.aggregate_key == dkg_public_key - && params.voting_round == self.coordinator.current_dkg_id - && params.reward_cycle == self.reward_cycle - { - let origin_nonce = transaction.get_origin_nonce(); - if origin_nonce < account_nonce { - // We have already voted, but our vote nonce is outdated. Resubmit vote with updated transaction - warn!("{self}: DKG vote submitted with invalid nonce ({origin_nonce} < {account_nonce}). Resubmitting vote."); - self.process_dkg(stacks_client, &dkg_public_key); - } else { - debug!("{self}: Already have a pending DKG vote in StackerDB. Waiting for it to be confirmed."; - "txid" => %transaction.txid(), - "aggregate_key" => %params.aggregate_key, - "voting_round" => params.voting_round, - "reward_cycle" => params.reward_cycle, - "nonce" => origin_nonce - ); - } - return Ok(false); - } - } - } - Ok(true) - } -} - -fn load_encrypted_signer_state( - storage: S, - id: S::IdType, - private_key: &Scalar, -) -> Result, PersistenceError> { - if let Some(encrypted_state) = storage.get_encrypted_signer_state(id)? { - let serialized_state = decrypt(private_key, &encrypted_state)?; - let state = serde_json::from_slice(&serialized_state) - .expect("Failed to deserialize decryoted state"); - Ok(Some(v2::Signer::load(&state))) - } else { - Ok(None) - } -} - -trait SignerStateStorage { - type IdType; - - fn get_encrypted_signer_state( - self, - signer_config: Self::IdType, - ) -> Result>, PersistenceError>; -} - -impl SignerStateStorage for &mut StackerDBManager { - type IdType = SignerSlotID; - - fn get_encrypted_signer_state( - self, - id: Self::IdType, - ) -> Result>, PersistenceError> { - Ok(self.get_encrypted_signer_state(id)?) - } -} - -impl SignerStateStorage for &SignerDb { - type IdType = u64; - fn get_encrypted_signer_state( - self, - id: Self::IdType, - ) -> Result>, PersistenceError> { - Ok(self.get_encrypted_signer_state(id)?) - } -} - -fn encrypt( - private_key: &Scalar, - msg: &[u8], - rng: &mut impl rand_core::CryptoRngCore, -) -> Result, EncryptionError> { - wsts::util::encrypt(derive_encryption_key(private_key).as_bytes(), msg, rng) - .map_err(|_| EncryptionError::Encrypt) -} - -fn decrypt(private_key: &Scalar, encrypted_msg: &[u8]) -> Result, EncryptionError> { - wsts::util::decrypt(derive_encryption_key(private_key).as_bytes(), encrypted_msg) - .map_err(|_| EncryptionError::Decrypt) -} - -fn derive_encryption_key(private_key: &Scalar) -> Sha512Trunc256Sum { - let mut prefixed_key = "SIGNER_STATE_ENCRYPTION_KEY/".as_bytes().to_vec(); - prefixed_key.extend_from_slice(&private_key.to_bytes()); - - Sha512Trunc256Sum::from_data(&prefixed_key) -} - -/// Error stemming from a persistence operation -#[derive(Debug, thiserror::Error)] -pub enum PersistenceError { - /// Encryption error - #[error("{0}")] - Encryption(#[from] EncryptionError), - /// Database error - #[error("Database operation failed: {0}")] - DBError(#[from] DBError), - /// Serialization error - #[error("JSON serialization failed: {0}")] - JsonSerializationError(#[from] serde_json::Error), - /// StackerDB client error - #[error("StackerDB client error: {0}")] - StackerDBClientError(#[from] ClientError), -} - -/// Error stemming from a persistence operation -#[derive(Debug, thiserror::Error)] -pub enum EncryptionError { - /// Encryption failed - #[error("Encryption operation failed")] - Encrypt, - /// Decryption failed - #[error("Encryption operation failed")] - Decrypt, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn encrypted_messages_should_be_possible_to_decrypt() { - let msg = "Nobody's gonna know".as_bytes(); - let key = Scalar::random(&mut OsRng); - - let encrypted = encrypt(&key, msg, &mut OsRng).unwrap(); - - assert_ne!(encrypted, msg); - - let decrypted = decrypt(&key, &encrypted).unwrap(); - - assert_eq!(decrypted, msg); - } -} diff --git a/stacks-signer/src/v1/stackerdb_manager.rs b/stacks-signer/src/v1/stackerdb_manager.rs deleted file mode 100644 index cf5e484022..0000000000 --- a/stacks-signer/src/v1/stackerdb_manager.rs +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -// -use blockstack_lib::chainstate::stacks::StacksTransaction; -use clarity::types::chainstate::StacksPrivateKey; -use libsigner::v1::messages::{MessageSlotID, SignerMessage}; -use libsigner::{SignerSession, StackerDBSession}; -use libstackerdb::StackerDBChunkAckData; -use slog::{slog_debug, slog_error, slog_warn}; -use stacks_common::codec::read_next; -use stacks_common::{debug, error, warn}; -use wsts::net::Packet; - -use crate::client::stackerdb::StackerDB; -use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID}; -use crate::config::SignerConfig; - -/// The session manager for communicating with the .signers contracts for the current and next reward cycle -#[derive(Debug)] -pub struct StackerDBManager { - /// The stacker-db transaction msg session for the NEXT reward cycle - next_transaction_session: StackerDBSession, - /// The stacker-db sessions for each signer set and message type. - stackerdb: StackerDB, -} - -impl From<&SignerConfig> for StackerDBManager { - fn from(config: &SignerConfig) -> Self { - let stackerdb = StackerDB::from(config); - let next_transaction_session = StackerDBSession::new( - &config.node_host, - MessageSlotID::Transactions - .stacker_db_contract(config.mainnet, config.reward_cycle.wrapping_add(1)), - ); - Self { - next_transaction_session, - stackerdb, - } - } -} -impl StackerDBManager { - /// Create a new StackerDB Manager - pub fn new( - host: &str, - stacks_private_key: StacksPrivateKey, - is_mainnet: bool, - reward_cycle: u64, - signer_slot_id: SignerSlotID, - ) -> Self { - let stackerdb = StackerDB::new( - host, - stacks_private_key, - is_mainnet, - reward_cycle, - signer_slot_id, - ); - let next_transaction_session = StackerDBSession::new( - host, - MessageSlotID::Transactions - .stacker_db_contract(is_mainnet, reward_cycle.wrapping_add(1)), - ); - Self { - next_transaction_session, - stackerdb, - } - } - - /// Send a message to the stackerdb with retry - pub fn send_message_with_retry( - &mut self, - message: SignerMessage, - ) -> Result { - self.stackerdb.send_message_with_retry(message) - } - - /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an - /// exponential backoff retry - pub fn send_message_bytes_with_retry( - &mut self, - msg_id: &MessageSlotID, - message_bytes: Vec, - ) -> Result { - self.stackerdb - .send_message_bytes_with_retry(msg_id, message_bytes) - } - - /// Get the ordered DKG packets from stackerdb for the signer slot IDs. - pub fn get_dkg_packets( - &mut self, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - let packet_slots = &[ - MessageSlotID::DkgBegin, - MessageSlotID::DkgPublicShares, - MessageSlotID::DkgPrivateBegin, - MessageSlotID::DkgPrivateShares, - MessageSlotID::DkgEndBegin, - MessageSlotID::DkgEnd, - ]; - let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); - let mut packets = vec![]; - for packet_slot in packet_slots { - let session = self - .stackerdb - .get_session_mut(packet_slot) - .ok_or(ClientError::NotConnected)?; - let messages = StackerDB::get_messages(session, &slot_ids)?; - for message in messages { - let SignerMessage::Packet(packet) = message else { - warn!("Found an unexpected type in a packet slot {packet_slot}"); - continue; - }; - packets.push(packet); - } - } - Ok(packets) - } - - /// Get the transactions from stackerdb for the signers - fn get_transactions( - transactions_session: &mut StackerDBSession, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); - let messages = StackerDB::get_messages(transactions_session, &slot_ids)?; - let mut transactions = vec![]; - for message in messages { - let SignerMessage::Transactions(chunk_transactions) = message else { - warn!("Signer wrote an unexpected type to the transactions slot"); - continue; - }; - transactions.extend(chunk_transactions); - } - Ok(transactions) - } - - /// Get this signer's latest transactions from stackerdb - pub fn get_current_transactions(&mut self) -> Result, ClientError> { - let signer_slot_id = self.get_signer_slot_id(); - let Some(transactions_session) = - self.stackerdb.get_session_mut(&MessageSlotID::Transactions) - else { - return Err(ClientError::NotConnected); - }; - Self::get_transactions(transactions_session, &[signer_slot_id]) - } - - /// Get the latest signer transactions from signer ids for the next reward cycle - pub fn get_next_transactions( - &mut self, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - debug!("Getting latest chunks from stackerdb for the following signers: {signer_ids:?}",); - Self::get_transactions(&mut self.next_transaction_session, signer_ids) - } - - /// Get the encrypted state for the given signer - pub fn get_encrypted_signer_state( - &mut self, - signer_id: SignerSlotID, - ) -> Result>, ClientError> { - debug!("Getting the persisted encrypted state for signer {signer_id}"); - let Some(state_session) = self - .stackerdb - .get_session_mut(&MessageSlotID::EncryptedSignerState) - else { - return Err(ClientError::NotConnected); - }; - - let send_request = || { - state_session - .get_latest_chunks(&[signer_id.0]) - .map_err(backoff::Error::transient) - }; - - let Some(chunk) = retry_with_exponential_backoff(send_request)?.pop().ok_or( - ClientError::UnexpectedResponseFormat(format!( - "Missing response for state session request for signer {}", - signer_id - )), - )? - else { - debug!("No persisted state for signer {signer_id}"); - return Ok(None); - }; - - if chunk.is_empty() { - debug!("Empty persisted state for signer {signer_id}"); - return Ok(None); - } - - let SignerMessage::EncryptedSignerState(state) = - read_next::(&mut chunk.as_slice())? - else { - error!("Wrong message type stored in signer state slot for signer {signer_id}"); - return Ok(None); - }; - - Ok(Some(state)) - } - - /// Retrieve the signer set this stackerdb client is attached to - pub fn get_signer_set(&self) -> u32 { - self.stackerdb.get_signer_set() - } - - /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&self) -> SignerSlotID { - self.stackerdb.get_signer_slot_id() - } -} - -#[cfg(test)] -mod tests { - use std::thread::spawn; - use std::time::Duration; - - use blockstack_lib::chainstate::stacks::{ - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionVersion, - }; - use blockstack_lib::util_lib::strings::StacksString; - use clarity::codec::StacksMessageCodec; - use clarity::types::chainstate::StacksPrivateKey; - use libstackerdb::StackerDBChunkAckData; - - use super::*; - use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; - use crate::config::GlobalConfig; - - #[test] - fn get_signer_transactions_should_succeed() { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); - let mut manager = StackerDBManager::from(&signer_config); - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - - let signer_message = SignerMessage::Transactions(vec![tx.clone()]); - let message = signer_message.serialize_to_vec(); - - let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; - let h = spawn(move || manager.get_next_transactions(&signer_slot_ids)); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - response_bytes.extend(message); - let mock_server = mock_server_from_config(&config); - write_response(mock_server, response_bytes.as_slice()); - - let signer_message = SignerMessage::Transactions(vec![]); - let message = signer_message.serialize_to_vec(); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - response_bytes.extend(message); - let mock_server = mock_server_from_config(&config); - write_response(mock_server, response_bytes.as_slice()); - - let transactions = h.join().unwrap().unwrap(); - assert_eq!(transactions, vec![tx]); - } - - #[test] - fn send_signer_message_should_succeed() { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-1.toml").unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); - let mut stackerdb = StackerDBManager::from(&signer_config); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - - let signer_message = SignerMessage::Transactions(vec![tx]); - let ack = StackerDBChunkAckData { - accepted: true, - reason: None, - metadata: None, - code: None, - }; - let mock_server = mock_server_from_config(&config); - let h = spawn(move || stackerdb.send_message_with_retry(signer_message)); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - let payload = serde_json::to_string(&ack).expect("Failed to serialize ack"); - response_bytes.extend(payload.as_bytes()); - std::thread::sleep(Duration::from_millis(500)); - write_response(mock_server, response_bytes.as_slice()); - assert_eq!(ack, h.join().unwrap().unwrap()); - } -} diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 19165db0a8..0b9b59a0e7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -24,7 +24,6 @@ stacks-common = { path = "../../stacks-common" } chrono = "0.4.19" regex = "1" libsigner = { path = "../../libsigner" } -wsts = { workspace = true } url = "2.1.0" rand = { workspace = true } rand_core = { workspace = true } @@ -48,7 +47,6 @@ stacks = { package = "stackslib", path = "../../stackslib", features = ["default stacks-signer = { path = "../../stacks-signer", features = ["testing"] } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ecc30a9c19..a2f949a8cc 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -50,7 +50,6 @@ use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; -use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; use super::sign_coordinator::SignCoordinator; @@ -290,7 +289,6 @@ impl BlockMinerThread { let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; - let mut attempts = 0; // now, actually run this tenure loop { #[cfg(test)] @@ -371,11 +369,9 @@ impl BlockMinerThread { if let Some(mut new_block) = new_block { Self::fault_injection_block_broadcast_stall(&new_block); - let (reward_set, signer_signature) = match self.gather_signatures( - &mut new_block, - &mut stackerdbs, - &mut attempts, - ) { + let (reward_set, signer_signature) = match self + .gather_signatures(&mut new_block, &mut stackerdbs) + { Ok(x) => x, Err(e) => match e { NakamotoNodeError::StacksTipChanged => { @@ -523,7 +519,6 @@ impl BlockMinerThread { &mut self, new_block: &mut NakamotoBlock, stackerdbs: &mut StackerDBs, - attempts: &mut u64, ) -> Result<(RewardSet, Vec), NakamotoNodeError> { let Some(miner_privkey) = self.config.miner.mining_key else { return Err(NakamotoNodeError::MinerConfigurationFailed( @@ -557,7 +552,6 @@ impl BlockMinerThread { }) })?; - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let reward_set = self.load_signer_set()?; if self.config.get_node_config(false).mock_mining { @@ -566,7 +560,7 @@ impl BlockMinerThread { let mut coordinator = SignCoordinator::new( &reward_set, - miner_privkey_as_scalar, + miner_privkey, &self.config, self.globals.should_keep_running.clone(), ) @@ -583,10 +577,8 @@ impl BlockMinerThread { )) })?; - *attempts += 1; let signature = coordinator.run_sign_v0( new_block, - *attempts, &tip, &self.burnchain, &sort_db, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 35d578c0f1..ee01298422 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -21,7 +21,6 @@ use std::time::Duration; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; -use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -31,8 +30,6 @@ use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NA use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::Error as ChainstateError; -#[cfg(any(test, feature = "testing"))] -use stacks::chainstate::stacks::ThresholdSignature; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; use stacks::types::PublicKey; @@ -42,15 +39,6 @@ use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; -use wsts::common::PolyCommitment; -#[cfg(any(test, feature = "testing"))] -use wsts::curve::ecdsa; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; -use wsts::state_machine::coordinator::{Config as CoordinatorConfig, Coordinator}; -use wsts::state_machine::PublicKeys; -use wsts::v2::Aggregator; use super::Error as NakamotoNodeError; use crate::event_dispatcher::STACKER_DB_CHANNEL; @@ -72,11 +60,8 @@ static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); /// is used by Nakamoto miners to act as the coordinator for the blocks they /// produce. pub struct SignCoordinator { - coordinator: FireCoordinator, receiver: Option>, - message_key: Scalar, - #[cfg(any(test, feature = "testing"))] - wsts_public_keys: PublicKeys, + message_key: StacksPrivateKey, is_mainnet: bool, miners_session: StackerDBSession, signer_entries: HashMap, @@ -86,20 +71,6 @@ pub struct SignCoordinator { pub next_signer_bitvec: BitVec<4000>, } -pub struct NakamotoSigningParams { - /// total number of signers - pub num_signers: u32, - /// total number of keys - pub num_keys: u32, - /// threshold of keys needed to form a valid signature - pub threshold: u32, - /// map of signer_id to controlled key_ids - pub signer_key_ids: HashMap>, - /// ECDSA public keys as Point objects indexed by signer_id - pub signer_public_keys: HashMap, - pub wsts_public_keys: PublicKeys, -} - impl Drop for SignCoordinator { fn drop(&mut self) { STACKER_DB_CHANNEL.replace_receiver(self.receiver.take().expect( @@ -108,112 +79,13 @@ impl Drop for SignCoordinator { } } -impl NakamotoSigningParams { - pub fn parse( - is_mainnet: bool, - reward_set: &[NakamotoSignerEntry], - ) -> Result { - let parsed = SignerEntries::parse(is_mainnet, reward_set).map_err(|e| { - ChainstateError::InvalidStacksBlock(format!( - "Invalid Reward Set: Could not parse into WSTS structs: {e:?}" - )) - })?; - - let num_keys = parsed - .count_keys() - .expect("FATAL: more than u32::max() signers in the reward set"); - let num_signers = parsed - .count_signers() - .expect("FATAL: more than u32::max() signers in the reward set"); - let threshold = parsed - .get_signing_threshold() - .expect("FATAL: more than u32::max() signers in the reward set"); - - Ok(NakamotoSigningParams { - num_signers, - threshold, - num_keys, - signer_key_ids: parsed.coordinator_key_ids, - signer_public_keys: parsed.signer_public_keys, - wsts_public_keys: parsed.public_keys, - }) - } -} - -#[allow(dead_code)] -fn get_signer_commitments( - is_mainnet: bool, - reward_set: &[NakamotoSignerEntry], - stackerdbs: &StackerDBs, - reward_cycle: u64, - expected_aggregate_key: &Point, -) -> Result, ChainstateError> { - let commitment_contract = - MessageSlotID::DkgResults.stacker_db_contract(is_mainnet, reward_cycle); - let signer_set_len = u32::try_from(reward_set.len()) - .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set length exceeds u32".into()))?; - for signer_id in 0..signer_set_len { - let Some(signer_data) = stackerdbs.get_latest_chunk(&commitment_contract, signer_id)? - else { - warn!( - "Failed to fetch DKG result, will look for results from other signers."; - "signer_id" => signer_id - ); - continue; - }; - let Ok(SignerMessageV1::DkgResults { - aggregate_key, - party_polynomials, - }) = SignerMessageV1::consensus_deserialize(&mut signer_data.as_slice()) - else { - warn!( - "Failed to parse DKG result, will look for results from other signers."; - "signer_id" => signer_id, - ); - continue; - }; - - if &aggregate_key != expected_aggregate_key { - warn!( - "Aggregate key in DKG results does not match expected, will look for results from other signers."; - "expected" => %expected_aggregate_key, - "reported" => %aggregate_key, - ); - continue; - } - let computed_key = party_polynomials - .iter() - .fold(Point::default(), |s, (_, comm)| s + comm.poly[0]); - - if expected_aggregate_key != &computed_key { - warn!( - "Aggregate key computed from DKG results does not match expected, will look for results from other signers."; - "expected" => %expected_aggregate_key, - "computed" => %computed_key, - ); - continue; - } - - return Ok(party_polynomials); - } - error!( - "No valid DKG results found for the active signing set, cannot coordinate a group signature"; - "reward_cycle" => reward_cycle, - ); - Err(ChainstateError::InvalidStacksBlock( - "Failed to fetch DKG results for the active signer set".into(), - )) -} - impl SignCoordinator { /// * `reward_set` - the active reward set data, used to construct the signer /// set parameters. - /// * `message_key` - the signing key that the coordinator will use to sign messages - /// broadcasted to the signer set. this should be the miner's registered key. /// * `aggregate_public_key` - the active aggregate key for this cycle pub fn new( reward_set: &RewardSet, - message_key: Scalar, + message_key: StacksPrivateKey, config: &Config, keep_running: Arc, ) -> Result { @@ -224,6 +96,11 @@ impl SignCoordinator { return Err(ChainstateError::NoRegisteredSigners(0)); }; + let signer_entries = SignerEntries::parse(is_mainnet, reward_set_signers).map_err(|e| { + ChainstateError::InvalidStacksBlock(format!( + "Failed to parse NakamotoSignerEntries: {e:?}" + )) + })?; let rpc_socket = config .node .get_rpc_loopback() @@ -240,33 +117,11 @@ impl SignCoordinator { ) .expect("FATAL: unable to construct initial bitvec for signer set"); - let NakamotoSigningParams { - num_signers, - num_keys, - threshold, - signer_key_ids, - signer_public_keys, - wsts_public_keys, - } = NakamotoSigningParams::parse(is_mainnet, reward_set_signers.as_slice())?; debug!( "Initializing miner/coordinator"; - "num_signers" => num_signers, - "num_keys" => num_keys, - "threshold" => threshold, - "signer_key_ids" => ?signer_key_ids, - "signer_public_keys" => ?signer_public_keys, - "wsts_public_keys" => ?wsts_public_keys, + "num_signers" => signer_entries.signer_pks.len(), + "signer_public_keys" => ?signer_entries.signer_pks, ); - let coord_config = CoordinatorConfig { - num_signers, - num_keys, - threshold, - signer_key_ids, - signer_public_keys, - dkg_threshold: threshold, - message_private_key: message_key.clone(), - ..Default::default() - }; let total_weight = reward_set.total_signing_weight().map_err(|e| { warn!("Failed to calculate total weight for the reward set: {e:?}"); @@ -288,8 +143,6 @@ impl SignCoordinator { Ok((slot_id, signer)) }) .collect::, ChainstateError>>()?; - - let coordinator: FireCoordinator = FireCoordinator::new(coord_config); #[cfg(test)] { // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING @@ -303,10 +156,8 @@ impl SignCoordinator { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } let sign_coordinator = Self { - coordinator, message_key, receiver: Some(receiver), - wsts_public_keys, is_mainnet, miners_session, next_signer_bitvec, @@ -325,11 +176,8 @@ impl SignCoordinator { } Ok(Self { - coordinator, - message_key, receiver: Some(receiver), - #[cfg(any(test, feature = "testing"))] - wsts_public_keys, + message_key, is_mainnet, miners_session, next_signer_bitvec, @@ -340,40 +188,6 @@ impl SignCoordinator { }) } - fn get_sign_id(burn_block_height: u64, burnchain: &Burnchain) -> u64 { - burnchain - .pox_constants - .reward_cycle_index(burnchain.first_block_height, burn_block_height) - .expect("FATAL: tried to initialize WSTS coordinator before first burn block height") - } - - /// Send a message over the miners contract using a `Scalar` private key - fn send_miners_message_scalar( - message_key: &Scalar, - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - message: M, - miner_slot_id: MinerSlotID, - is_mainnet: bool, - miners_session: &mut StackerDBSession, - election_sortition: &ConsensusHash, - ) -> Result<(), String> { - let mut miner_sk = StacksPrivateKey::from_slice(&message_key.to_bytes()).unwrap(); - miner_sk.set_compress_public(true); - Self::send_miners_message( - &miner_sk, - sortdb, - tip, - stackerdbs, - message, - miner_slot_id, - is_mainnet, - miners_session, - election_sortition, - ) - } - /// Send a message over the miners contract using a `StacksPrivateKey` pub fn send_miners_message( miner_sk: &StacksPrivateKey, @@ -425,221 +239,6 @@ impl SignCoordinator { } } - #[cfg_attr(test, mutants::skip)] - #[cfg(any(test, feature = "testing"))] - pub fn begin_sign_v1( - &mut self, - block: &NakamotoBlock, - burn_block_height: u64, - block_attempt: u64, - burn_tip: &BlockSnapshot, - burnchain: &Burnchain, - sortdb: &SortitionDB, - stackerdbs: &StackerDBs, - counters: &Counters, - election_sortiton: &ConsensusHash, - ) -> Result { - let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); - let sign_iter_id = block_attempt; - let reward_cycle_id = burnchain - .block_height_to_reward_cycle(burn_tip.block_height) - .expect("FATAL: tried to initialize coordinator before first burn block height"); - self.coordinator.current_sign_id = sign_id; - self.coordinator.current_sign_iter_id = sign_iter_id; - - let proposal_msg = BlockProposal { - block: block.clone(), - burn_height: burn_block_height, - reward_cycle: reward_cycle_id, - }; - - let block_bytes = proposal_msg.serialize_to_vec(); - let nonce_req_msg = self - .coordinator - .start_signing_round(&block_bytes, false, None) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to start signing round in FIRE coordinator: {e:?}" - )) - })?; - Self::send_miners_message_scalar::( - &self.message_key, - sortdb, - burn_tip, - &stackerdbs, - nonce_req_msg.into(), - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortiton, - ) - .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; - counters.bump_naka_proposed_blocks(); - #[cfg(test)] - { - // In test mode, short-circuit waiting for the signers if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(_signatures) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(ThresholdSignature::empty()); - } - } - - let Some(ref mut receiver) = self.receiver else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the StackerDB event receiver".into(), - )); - }; - - loop { - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - - let is_signer_event = - event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); - if !is_signer_event { - debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); - continue; - } - let modified_slots = &event.modified_slots; - - // Update `next_signers_bitvec` with the slots that were modified in the event - modified_slots.iter().for_each(|chunk| { - if let Ok(slot_id) = chunk.slot_id.try_into() { - match &self.next_signer_bitvec.set(slot_id, true) { - Err(e) => { - warn!("Failed to set bitvec for next signer: {e:?}"); - } - _ => (), - }; - } else { - error!("FATAL: slot_id greater than u16, which should never happen."); - } - }); - - let Ok(signer_event) = SignerEvent::try_from(event).map_err(|e| { - warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); - }) else { - continue; - }; - let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { - debug!("Received signer event other than a signer message. Ignoring."); - continue; - }; - if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); - continue; - }; - debug!("Miner/Coordinator: Received messages from signers"; "count" => messages.len()); - let coordinator_pk = ecdsa::PublicKey::new(&self.message_key).map_err(|_e| { - NakamotoNodeError::MinerSignatureError("Bad signing key for the FIRE coordinator") - })?; - let packets: Vec<_> = messages - .into_iter() - .filter_map(|msg| match msg { - SignerMessageV1::DkgResults { .. } - | SignerMessageV1::BlockResponse(_) - | SignerMessageV1::EncryptedSignerState(_) - | SignerMessageV1::Transactions(_) => None, - SignerMessageV1::Packet(packet) => { - debug!("Received signers packet: {packet:?}"); - if !packet.verify(&self.wsts_public_keys, &coordinator_pk) { - warn!("Failed to verify StackerDB packet: {packet:?}"); - None - } else { - Some(packet) - } - } - }) - .collect(); - let (outbound_msgs, op_results) = self - .coordinator - .process_inbound_messages(&packets) - .unwrap_or_else(|e| { - error!( - "Miner/Coordinator: Failed to process inbound message packets"; - "err" => ?e - ); - (vec![], vec![]) - }); - for operation_result in op_results.into_iter() { - match operation_result { - wsts::state_machine::OperationResult::Dkg { .. } - | wsts::state_machine::OperationResult::SignTaproot(_) - | wsts::state_machine::OperationResult::DkgError(_) => { - debug!("Ignoring unrelated operation result"); - } - wsts::state_machine::OperationResult::Sign(signature) => { - // check if the signature actually corresponds to our block? - let block_sighash = block.header.signer_signature_hash(); - let verified = signature.verify( - self.coordinator.aggregate_public_key.as_ref().unwrap(), - &block_sighash.0, - ); - let signature = ThresholdSignature(signature); - if !verified { - warn!( - "Processed signature but didn't validate over the expected block. Returning error."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash - ); - return Err(NakamotoNodeError::SignerSignatureError( - "Signature failed to validate over the expected block".into(), - )); - } else { - info!( - "SignCoordinator: Generated a valid signature for the block"; - "next_signer_bitvec" => self.next_signer_bitvec.binary_str(), - ); - return Ok(signature); - } - } - wsts::state_machine::OperationResult::SignError(e) => { - return Err(NakamotoNodeError::SignerSignatureError(format!( - "Signing failed: {e:?}" - ))) - } - } - } - for msg in outbound_msgs { - match Self::send_miners_message_scalar::( - &self.message_key, - sortdb, - burn_tip, - stackerdbs, - msg.into(), - // TODO: note, in v1, we'll want to add a new slot, but for now, it just shares - // with the block proposal - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortiton, - ) { - Ok(()) => { - debug!("Miner/Coordinator: sent outbound message."); - } - Err(e) => { - warn!( - "Miner/Coordinator: Failed to send message to StackerDB instance: {e:?}." - ); - } - }; - } - } - } - /// Do we ignore signer signatures? #[cfg(test)] fn fault_injection_ignore_signatures() -> bool { @@ -682,7 +281,6 @@ impl SignCoordinator { pub fn run_sign_v0( &mut self, block: &NakamotoBlock, - block_attempt: u64, burn_tip: &BlockSnapshot, burnchain: &Burnchain, sortdb: &SortitionDB, @@ -691,13 +289,9 @@ impl SignCoordinator { counters: &Counters, election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { - let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); - let sign_iter_id = block_attempt; let reward_cycle_id = burnchain .block_height_to_reward_cycle(burn_tip.block_height) .expect("FATAL: tried to initialize coordinator before first burn block height"); - self.coordinator.current_sign_id = sign_id; - self.coordinator.current_sign_iter_id = sign_iter_id; let block_proposal = BlockProposal { block: block.clone(), @@ -709,7 +303,7 @@ impl SignCoordinator { debug!("Sending block proposal message to signers"; "signer_signature_hash" => %block.header.signer_signature_hash(), ); - Self::send_miners_message_scalar::( + Self::send_miners_message::( &self.message_key, sortdb, burn_tip, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17b829557f..c975bfebf9 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -28,8 +28,7 @@ use clarity::vm::{ClarityName, ClarityVersion, Value}; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; -use libsigner::v1::messages::SignerMessage as SignerMessageV1; -use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use libsigner::{SignerSession, StackerDBSession}; use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -91,7 +90,6 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; -use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; @@ -441,27 +439,6 @@ pub fn get_latest_block_proposal( Ok((proposed_block, pubkey)) } -#[allow(dead_code)] -fn get_block_proposal_msg_v1( - miners_stackerdb: &mut StackerDBSession, - slot_id: u32, -) -> NakamotoBlock { - let message: SignerMessageV1 = miners_stackerdb - .get_latest(slot_id) - .expect("Failed to get latest chunk from the miner slot ID") - .expect("No chunk found"); - let SignerMessageV1::Packet(packet) = message else { - panic!("Expected a signer message packet. Got {message:?}"); - }; - let Message::NonceRequest(nonce_request) = packet.msg else { - panic!("Expected a nonce request. Got {:?}", packet.msg); - }; - let block_proposal = - BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) - .expect("Failed to deserialize block proposal"); - block_proposal.block -} - pub fn read_and_sign_block_proposal( configs: &[&Config], signers: &TestSigners, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a25a010465..91f9bc3282 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -13,7 +13,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . mod v0; -mod v1; use std::collections::HashSet; // Copyright (C) 2020-2024 Stacks Open Internet Foundation @@ -42,7 +41,7 @@ use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; -use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; +use stacks::chainstate::stacks::StacksPrivateKey; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, @@ -54,12 +53,11 @@ use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; +use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; -use wsts::state_machine::PublicKeys; use super::nakamoto_integrations::wait_for; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; @@ -76,7 +74,7 @@ use crate::tests::neon_integrations::{ wait_for_runloop, }; use crate::tests::to_addr; -use crate::{BitcoinRegtestController, BurnchainController}; +use crate::BitcoinRegtestController; // Helper struct for holding the btc and stx neon nodes #[allow(dead_code)] @@ -110,8 +108,6 @@ pub struct SignerTest { pub signer_stacks_private_keys: Vec, // link to the stacks node pub stacks_client: StacksClient, - // Unique number used to isolate files created during the test - pub run_stamp: u16, /// The number of cycles to stack for pub num_stacking_cycles: u64, } @@ -224,7 +220,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest u64 { - let prepare_phase_len = self - .running_nodes - .conf - .get_burnchain() - .pox_constants - .prepare_length as u64; - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let curr_reward_cycle = self.get_current_reward_cycle(); - let next_reward_cycle = curr_reward_cycle.saturating_add(1); - let next_reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle); - let next_reward_cycle_reward_set_calculation = next_reward_cycle_height - .saturating_sub(prepare_phase_len) - .saturating_add(1); // +1 as the reward calculation occurs in the SECOND block of the prepare phase/ - - next_reward_cycle_reward_set_calculation.saturating_sub(current_block_height) - } - - fn nmb_blocks_to_reward_cycle_boundary(&mut self, reward_cycle: u64) -> u64 { - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(reward_cycle); - reward_cycle_height.saturating_sub(current_block_height) - } - fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { let commits_submitted = self.running_nodes.commits_submitted.clone(); let mined_block_time = Instant::now(); @@ -421,20 +376,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ThresholdSignature { - let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); - let signer_signature_hex = block_obj.get("signer_signature").unwrap().as_str().unwrap(); - let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); - let signer_signature = - ThresholdSignature::consensus_deserialize(&mut signer_signature_bytes.as_slice()) - .unwrap(); - signer_signature - } - /// Wait for a confirmed block and return a list of individual /// signer signatures fn wait_for_confirmed_block_v0( @@ -558,22 +499,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest SignerSlotID { - let valid_signer_set = - u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); - let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); - - self.stacks_client - .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) - .expect("FATAL: failed to get signer slots from stackerdb") - .iter() - .position(|(address, _)| address == self.stacks_client.get_signer_address()) - .map(|pos| { - SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) - }) - .expect("FATAL: signer not registered") - } - fn get_signer_slots( &self, reward_cycle: u64, @@ -597,11 +522,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>() } - /// Get the wsts public keys for the given reward cycle - fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { + /// Get the signer public keys for the given reward cycle + fn get_signer_public_keys(&self, reward_cycle: u64) -> Vec { let entries = self.get_reward_set_signers(reward_cycle); let entries = SignerEntries::parse(false, &entries).unwrap(); - entries.public_keys + entries.signer_pks } /// Get the signers for the given reward cycle @@ -630,42 +555,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest StacksPrivateKey { - let spawned_signer = self.spawned_signers.remove(signer_idx); - let signer_key = self.signer_stacks_private_keys.remove(signer_idx); - - spawned_signer.stop(); - signer_key - } - - /// (Re)starts a new signer runloop with the given private key - pub fn restart_signer(&mut self, signer_idx: usize, signer_private_key: StacksPrivateKey) { - let signer_config = build_signer_config_tomls( - &[signer_private_key], - &self.running_nodes.conf.node.rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - "12345", // It worked sir, we have the combination! -Great, what's the combination? - self.run_stamp, - 3000 + signer_idx, - Some(100_000), - None, - Some(9000 + signer_idx), - ) - .pop() - .unwrap(); - - info!("Restarting signer"); - let config = SignerConfig::load_from_str(&signer_config).unwrap(); - let signer = SpawnedSigner::new(config); - self.spawned_signers.insert(signer_idx, signer); - } - pub fn shutdown(self) { self.running_nodes .coord_channel diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9f9f8d1a41..e6808a4b77 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2516,8 +2516,7 @@ fn mock_sign_epoch_25() { .iter() .map(|id| id.0) .collect(); - let signer_keys = signer_test.get_signer_public_keys(reward_cycle); - let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); assert_eq!(signer_slot_ids.len(), num_signers); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); @@ -2731,8 +2730,7 @@ fn multiple_miners_mock_sign_epoch_25() { .iter() .map(|id| id.0) .collect(); - let signer_keys = signer_test.get_signer_public_keys(reward_cycle); - let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); assert_eq!(signer_slot_ids.len(), num_signers); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs deleted file mode 100644 index 816db4c5dc..0000000000 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ /dev/null @@ -1,1155 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -use std::collections::HashSet; -use std::sync::atomic::Ordering; -use std::time::{Duration, Instant}; -use std::{env, thread}; - -use clarity::boot_util::boot_code_id; -use clarity::vm::Value; -use libsigner::v1::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; -use libsigner::BlockProposal; -use rand::thread_rng; -use rand_core::RngCore; -use stacks::burnchains::Txid; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; -use stacks::chainstate::stacks::boot::{SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME}; -use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::miner::TransactionEvent; -use stacks::chainstate::stacks::{ - StacksPrivateKey, StacksTransaction, TransactionAnchorMode, TransactionAuth, - TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, -}; -use stacks::util_lib::strings::StacksString; -use stacks_common::bitvec::BitVec; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::CHAIN_ID_TESTNET; -use stacks_common::types::chainstate::{ - ConsensusHash, StacksAddress, StacksBlockId, StacksPublicKey, TrieHash, -}; -use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::MessageSignature; -use stacks_signer::client::{SignerSlotID, StacksClient}; -use stacks_signer::runloop::{RunLoopCommand, SignerCommand, SignerResult}; -use stacks_signer::v1::coordinator::CoordinatorSelector; -use stacks_signer::v1::stackerdb_manager::StackerDBManager; -use stacks_signer::v1::SpawnedSigner; -use tracing_subscriber::prelude::*; -use tracing_subscriber::{fmt, EnvFilter}; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::net::Message; -use wsts::state_machine::OperationResult; - -use super::SignerTest; -use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::tests::nakamoto_integrations::{ - boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, next_block_and, -}; -use crate::tests::neon_integrations::{next_block_and_wait, test_observer}; -use crate::tests::to_addr; -use crate::BurnchainController; - -impl SignerTest { - fn boot_to_epoch_3(&mut self, timeout: Duration) -> Point { - boot_to_epoch_3_reward_set( - &self.running_nodes.conf, - &self.running_nodes.blocks_processed, - &self.signer_stacks_private_keys, - &self.signer_stacks_private_keys, - &mut self.running_nodes.btc_regtest_controller, - Some(self.num_stacking_cycles), - ); - let dkg_vote = self.wait_for_dkg(timeout); - - // Advance and mine the DKG key block - self.run_until_epoch_3_boundary(); - - let reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - assert_eq!(set_dkg, dkg_vote); - - let (vrfs_submitted, commits_submitted) = ( - self.running_nodes.vrfs_submitted.clone(), - self.running_nodes.commits_submitted.clone(), - ); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - info!("Successfully triggered first block to wake up the miner runloop."); - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); - info!("Ready to mine Nakamoto blocks!"); - set_dkg - } - - // Only call after already past the epoch 3.0 boundary - fn run_to_dkg(&mut self, timeout: Duration) -> Option { - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - let nmb_blocks_to_mine_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - let end_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1) // Must subtract 1 since get_headers_height returns current block height + 1 - .saturating_add(nmb_blocks_to_mine_to_dkg); - info!("Mining {nmb_blocks_to_mine_to_dkg} bitcoin block(s) to reach DKG calculation at bitcoin height {end_block_height}"); - for i in 1..=nmb_blocks_to_mine_to_dkg { - info!("Mining bitcoin block #{i} and nakamoto tenure of {nmb_blocks_to_mine_to_dkg}"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - if nmb_blocks_to_mine_to_dkg == 0 { - None - } else { - Some(self.wait_for_dkg(timeout)) - } - } - - // Only call after already past the epoch 3.0 boundary - fn run_until_burnchain_height_nakamoto( - &mut self, - timeout: Duration, - burnchain_height: u64, - ) -> Vec { - let mut points = vec![]; - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height(); - let mut total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); - debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); - let mut nmb_blocks_to_reward_cycle = 0; - let mut blocks_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - while total_nmb_blocks_to_mine > 0 && blocks_to_dkg > 0 { - if blocks_to_dkg > 0 && total_nmb_blocks_to_mine >= blocks_to_dkg { - let dkg = self.run_to_dkg(timeout); - total_nmb_blocks_to_mine -= blocks_to_dkg; - if dkg.is_some() { - points.push(dkg.unwrap()); - } - blocks_to_dkg = 0; - nmb_blocks_to_reward_cycle = self.nmb_blocks_to_reward_cycle_boundary( - self.get_current_reward_cycle().saturating_add(1), - ) - } - if total_nmb_blocks_to_mine >= nmb_blocks_to_reward_cycle { - let end_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1) // Must subtract 1 since get_headers_height returns current block height + 1 - .saturating_add(nmb_blocks_to_reward_cycle); - debug!("Mining {nmb_blocks_to_reward_cycle} Nakamoto block(s) to reach the next reward cycle boundary at {end_block_height}."); - for i in 1..=nmb_blocks_to_reward_cycle { - debug!("Mining Nakamoto block #{i} of {nmb_blocks_to_reward_cycle}"); - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - total_nmb_blocks_to_mine -= nmb_blocks_to_reward_cycle; - nmb_blocks_to_reward_cycle = 0; - blocks_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - } - } - for i in 1..=total_nmb_blocks_to_mine { - info!("Mining Nakamoto block #{i} of {total_nmb_blocks_to_mine} to reach {burnchain_height}"); - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - points - } - - fn mine_and_verify_confirmed_naka_block( - &mut self, - agg_key: &Point, - timeout: Duration, - ) -> MinedNakamotoBlockEvent { - let new_block = self.mine_nakamoto_block(timeout); - let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); - assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); - new_block - } - - fn wait_for_dkg(&mut self, timeout: Duration) -> Point { - debug!("Waiting for DKG..."); - let mut key = Point::default(); - let dkg_now = Instant::now(); - for signer in self.spawned_signers.iter() { - let mut aggregate_public_key = None; - loop { - let results = signer - .res_recv - .recv_timeout(timeout) - .expect("failed to recv dkg results"); - for result in results { - match result { - SignerResult::OperationResult(OperationResult::Dkg(point)) => { - info!("Received aggregate_group_key {point}"); - aggregate_public_key = Some(point); - } - SignerResult::OperationResult(other) => { - panic!("{}", operation_panic_message(&other)) - } - SignerResult::StatusCheck(state) => { - panic!("Received status check result: {:?}", state); - } - } - } - if aggregate_public_key.is_some() || dkg_now.elapsed() > timeout { - break; - } - } - key = aggregate_public_key.expect(&format!( - "Failed to get aggregate public key within {timeout:?}" - )); - } - debug!("Finished waiting for DKG!"); - key - } - - fn generate_invalid_transactions(&self) -> Vec { - let host = self.running_nodes.conf.node.rpc_bind.clone(); - // Get the signer indices - let reward_cycle = self.get_current_reward_cycle(); - - let signer_private_key = self.signer_stacks_private_keys[0]; - - let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, false); - let contract_addr = vote_contract_id.issuer.into(); - let contract_name = vote_contract_id.name.clone(); - - let signer_index = thread_rng().next_u64(); - let signer_index_arg = Value::UInt(signer_index as u128); - - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); - - let round = thread_rng().next_u64(); - let round_arg = Value::UInt(round as u128); - - let reward_cycle_arg = Value::UInt(reward_cycle as u128); - let valid_function_args = vec![ - signer_index_arg.clone(), - point_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ]; - - // Create a invalid transaction that is not a contract call - let invalid_not_contract_call = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&signer_private_key).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let invalid_contract_address = StacksClient::build_unsigned_contract_call_transaction( - &StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&signer_private_key)), - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_contract_name = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - "bad-signers-contract-name".into(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_signers_vote_function = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - "some-other-function".into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_signer_index = - StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - point_arg.clone(), - point_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_key = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - signer_index_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_round = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_reward_cycle = - StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - point_arg.clone(), - round_arg.clone(), - point_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_nonce = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, // Old nonce - ) - .unwrap(); - - let invalid_stacks_client = StacksClient::new( - StacksPrivateKey::new(), - host, - "12345".to_string(), // That's amazing. I've got the same combination on my luggage! - false, - ); - let invalid_signer_tx = invalid_stacks_client - .build_unsigned_vote_for_aggregate_public_key(0, round, point, reward_cycle, 0) - .expect("FATAL: failed to build vote for aggregate public key"); - - let unsigned_txs = vec![ - invalid_nonce, - invalid_not_contract_call, - invalid_contract_name, - invalid_contract_address, - invalid_signers_vote_function, - invalid_function_arg_key, - invalid_function_arg_reward_cycle, - invalid_function_arg_round, - invalid_function_arg_signer_index, - invalid_signer_tx, - ]; - unsigned_txs - .into_iter() - .map(|unsigned| { - invalid_stacks_client - .sign_transaction(unsigned) - .expect("Failed to sign transaction") - }) - .collect() - } -} - -fn operation_panic_message(result: &OperationResult) -> String { - match result { - OperationResult::Sign(sig) => { - format!("Received Signature ({},{})", sig.R, sig.z) - } - OperationResult::SignTaproot(proof) => { - format!("Received SchnorrProof ({},{})", proof.r, proof.s) - } - OperationResult::DkgError(dkg_error) => { - format!("Received DkgError {:?}", dkg_error) - } - OperationResult::SignError(sign_error) => { - format!("Received SignError {}", sign_error) - } - OperationResult::Dkg(point) => { - format!("Received aggregate_group_key {point}") - } - } -} - -#[test] -#[ignore] -/// Test the signer can respond to external commands to perform DKG -fn dkg() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10, vec![]); - info!("Boot to epoch 3.0 reward calculation..."); - boot_to_epoch_3_reward_set( - &signer_test.running_nodes.conf, - &signer_test.running_nodes.blocks_processed, - &signer_test.signer_stacks_private_keys, - &signer_test.signer_stacks_private_keys, - &mut signer_test.running_nodes.btc_regtest_controller, - Some(signer_test.num_stacking_cycles), - ); - - info!("Pox 4 activated and at epoch 3.0 reward set calculation (2nd block of its prepare phase)! Ready for signers to perform DKG and Sign!"); - // First wait for the automatically triggered DKG to complete - let key = signer_test.wait_for_dkg(timeout); - - info!("------------------------- Test DKG -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - - // Determine the coordinator of the current node height - info!("signer_runloop: spawn send commands to do dkg"); - let dkg_now = Instant::now(); - for signer in signer_test.spawned_signers.iter() { - signer - .cmd_send - .send(RunLoopCommand { - reward_cycle, - command: SignerCommand::Dkg, - }) - .expect("failed to send DKG command"); - } - let new_key = signer_test.wait_for_dkg(timeout); - let dkg_elapsed = dkg_now.elapsed(); - assert_ne!(new_key, key); - - info!("DKG Time Elapsed: {:.2?}", dkg_elapsed); -} - -#[test] -#[ignore] -/// Test the signer rejects requests to sign that do not come from a miner -fn sign_request_rejected() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - - info!("Creating invalid blocks to sign..."); - let header1 = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - timestamp: 8, - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::zeros(1).unwrap(), - }; - let mut block1 = NakamotoBlock { - header: header1, - txs: vec![], - }; - let tx_merkle_root1 = { - let txid_vecs = block1 - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - MerkleTree::::new(&txid_vecs).root() - }; - block1.header.tx_merkle_root = tx_merkle_root1; - - let header2 = NakamotoBlockHeader { - version: 1, - chain_length: 3, - burn_spent: 4, - consensus_hash: ConsensusHash([0x05; 20]), - parent_block_id: StacksBlockId([0x06; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), - state_index_root: TrieHash([0x08; 32]), - timestamp: 9, - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::zeros(1).unwrap(), - }; - let mut block2 = NakamotoBlock { - header: header2, - txs: vec![], - }; - let tx_merkle_root2 = { - let txid_vecs = block2 - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - MerkleTree::::new(&txid_vecs).root() - }; - block2.header.tx_merkle_root = tx_merkle_root2; - - let timeout = Duration::from_secs(200); - let mut signer_test: SignerTest = SignerTest::new(10, vec![]); - let _key = signer_test.boot_to_epoch_3(timeout); - - info!("------------------------- Test Sign -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle(); - let block_proposal_1 = BlockProposal { - block: block1.clone(), - burn_height: 0, - reward_cycle, - }; - let block_proposal_2 = BlockProposal { - block: block2.clone(), - burn_height: 0, - reward_cycle, - }; - // Determine the coordinator of the current node height - info!("signer_runloop: spawn send commands to do sign"); - let sign_now = Instant::now(); - let sign_command = RunLoopCommand { - reward_cycle, - command: SignerCommand::Sign { - block_proposal: block_proposal_1, - is_taproot: false, - merkle_root: None, - }, - }; - let sign_taproot_command = RunLoopCommand { - reward_cycle, - command: SignerCommand::Sign { - block_proposal: block_proposal_2, - is_taproot: true, - merkle_root: None, - }, - }; - for signer in signer_test.spawned_signers.iter() { - signer - .cmd_send - .send(sign_command.clone()) - .expect("failed to send sign command"); - signer - .cmd_send - .send(sign_taproot_command.clone()) - .expect("failed to send sign taproot command"); - } - - // Don't wait for signatures. Because the block miner is acting as - // the coordinator, signers won't directly sign commands issued by someone - // other than the miner. Rather, they'll just broadcast their rejections. - - let sign_elapsed = sign_now.elapsed(); - - info!("------------------------- Test Block Rejected -------------------------"); - - // Verify the signers rejected the proposed block - let t_start = Instant::now(); - let signer_message = loop { - assert!( - t_start.elapsed() < Duration::from_secs(30), - "Timed out while waiting for signers block response stacker db event" - ); - - let nakamoto_blocks = test_observer::get_stackerdb_chunks(); - if let Some(message) = find_block_response(nakamoto_blocks) { - break message; - } - thread::sleep(Duration::from_secs(1)); - }; - if let SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) = signer_message { - assert!(matches!( - rejection.reason_code, - RejectCode::ValidationFailed(_) - )); - } else { - panic!("Received unexpected message: {:?}", &signer_message); - } - info!("Sign Time Elapsed: {:.2?}", sign_elapsed); -} - -#[test] -#[ignore] -/// Test that a signer can be offline when a DKG round has commenced and -/// can rejoin the DKG round after it has restarted -fn delayed_dkg() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let timeout = Duration::from_secs(200); - let num_signers = 3; - let mut signer_test = SignerTest::new(num_signers, vec![]); - boot_to_epoch_3_reward_set_calculation_boundary( - &signer_test.running_nodes.conf, - &signer_test.running_nodes.blocks_processed, - &signer_test.signer_stacks_private_keys, - &signer_test.signer_stacks_private_keys, - &mut signer_test.running_nodes.btc_regtest_controller, - Some(signer_test.num_stacking_cycles), - ); - let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - let public_keys = signer_test.get_signer_public_keys(reward_cycle); - let coordinator_selector = CoordinatorSelector::from(public_keys); - let (_, coordinator_public_key) = coordinator_selector.get_coordinator(); - let coordinator_public_key = - StacksPublicKey::from_slice(coordinator_public_key.to_bytes().as_slice()).unwrap(); - let signer_slot_ids: Vec<_> = (0..num_signers) - .into_iter() - .map(|i| SignerSlotID(i as u32)) - .collect(); - let mut stackerdbs: Vec<_> = signer_slot_ids - .iter() - .map(|i| { - StackerDBManager::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // Doesn't matter what key we use. We are just reading, not writing - false, - reward_cycle, - *i, - ) - }) - .collect(); - info!("------------------------- Stop Signers -------------------------"); - let mut to_stop = None; - for (idx, key) in signer_test.signer_stacks_private_keys.iter().enumerate() { - let public_key = StacksPublicKey::from_private(key); - if public_key == coordinator_public_key { - // Do not stop the coordinator. We want coordinator to start a DKG round - continue; - } - // Only stop one signer - to_stop = Some(idx); - break; - } - let signer_idx = to_stop.expect("Failed to find a signer to stop"); - let signer_key = signer_test.stop_signer(signer_idx); - debug!( - "Removed signer {signer_idx} with key: {:?}, {}", - signer_key, - signer_key.to_hex() - ); - info!("------------------------- Start DKG -------------------------"); - info!("Waiting for DKG to start..."); - // Advance one more to trigger DKG - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - timeout.as_secs(), - || Ok(true), - ) - .expect("Failed to mine bitcoin block"); - // Do not proceed until we guarantee that DKG was triggered - let start_time = Instant::now(); - loop { - let stackerdb = stackerdbs.first_mut().unwrap(); - let dkg_packets: Vec<_> = stackerdb - .get_dkg_packets(&signer_slot_ids) - .expect("Failed to get dkg packets"); - let begin_packets: Vec<_> = dkg_packets - .iter() - .filter_map(|packet| { - if matches!(packet.msg, Message::DkgBegin(_)) { - Some(packet) - } else { - None - } - }) - .collect(); - if !begin_packets.is_empty() { - break; - } - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Timed out waiting for DKG to be triggered" - ); - } - - info!("------------------------- Restart Stopped Signer -------------------------"); - - signer_test.restart_signer(signer_idx, signer_key); - - info!("------------------------- Wait for DKG -------------------------"); - let key = signer_test.wait_for_dkg(timeout); - let mut transactions = HashSet::with_capacity(num_signers); - let start_time = Instant::now(); - while transactions.len() < num_signers { - for stackerdb in stackerdbs.iter_mut() { - let current_transactions = stackerdb - .get_current_transactions() - .expect("Failed getting current transactions for signer slot id"); - for tx in current_transactions { - transactions.insert(tx.txid()); - } - } - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Failed to retrieve pending vote transactions within timeout" - ); - } - - // Make sure transactions get mined - let start_time = Instant::now(); - while !transactions.is_empty() { - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Failed to mine transactions within timeout" - ); - next_block_and_wait( - &mut signer_test.running_nodes.btc_regtest_controller, - &signer_test.running_nodes.blocks_processed, - ); - let blocks = test_observer::get_blocks(); - for block in blocks.iter() { - let txs = block.get("transactions").unwrap().as_array().unwrap(); - for tx in txs.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - transactions.remove(&parsed.txid()); - } - } - } - - // Make sure DKG did get set - assert_eq!( - key, - signer_test - .stacks_client - .get_approved_aggregate_key(reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found") - ); -} - -pub fn find_block_response(chunk_events: Vec) -> Option { - for event in chunk_events.into_iter() { - if event.contract_id.name.as_str() - == &format!("signers-1-{}", MessageSlotID::BlockResponse.to_u8()) - || event.contract_id.name.as_str() - == &format!("signers-0-{}", MessageSlotID::BlockResponse.to_u8()) - { - let Some(data) = event.modified_slots.first() else { - continue; - }; - let msg = SignerMessage::consensus_deserialize(&mut data.data.as_slice()).unwrap(); - return Some(msg); - } - } - None -} - -#[test] -#[ignore] -/// Test that a signer can respond to a miners request for a signature on a block proposal -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5. forcibly triggering DKG to set the key correctly -/// The stacks node is next advanced to epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node attempts to mine a Nakamoto block, sending a block to the observing signers via the -/// .miners stacker db instance. The signers submit the block to the stacks node for verification. -/// Upon receiving a Block Validation response approving the block, the signers perform a signing -/// round across its signature hash and return it back to the miner. -/// -/// Test Assertion: -/// Signers return an operation result containing a valid signature across the miner's Nakamoto block's signature hash. -/// Signers broadcasted a signature across the miner's proposed block back to the respective .signers-XXX-YYY contract. -/// Miner appends the signature to the block and finishes mininig it. -fn block_proposal() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let timeout = Duration::from_secs(30); - let short_timeout = Duration::from_secs(30); - - let key = signer_test.boot_to_epoch_3(timeout); - signer_test.mine_nakamoto_block(timeout); - - info!("------------------------- Test Block Proposal -------------------------"); - // Verify that the signers accepted the proposed block, sending back a validate ok response - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - - info!("------------------------- Test Block Signed -------------------------"); - // Verify that the signers signed the proposed block - let signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, timeout); - assert!(signature - .0 - .verify(&key, proposed_signer_signature_hash.as_bytes())); - - // Test prometheus metrics response - #[cfg(feature = "monitoring_prom")] - { - let metrics_response = signer_test.get_signer_metrics(); - - // Because 5 signers are running in the same process, the prometheus metrics - // are incremented once for every signer. This is why we expect the metric to be - // `5`, even though there is only one block proposed. - let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); - assert!(metrics_response.contains(&expected_result)); - } - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers can handle a transition between Nakamoto reward cycles -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 2 full Nakamoto reward cycles, sending blocks to observing signers to sign and return. -/// -/// Test Assertion: -/// Signers can perform DKG and sign blocks across Nakamoto reward cycles. -fn mine_2_nakamoto_reward_cycles() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let nmb_reward_cycles = 2; - let mut signer_test: SignerTest = SignerTest::new(5, vec![]); - let timeout = Duration::from_secs(200); - let first_dkg = signer_test.boot_to_epoch_3(timeout); - let curr_reward_cycle = signer_test.get_current_reward_cycle(); - // Mine 2 full Nakamoto reward cycles (epoch 3 starts in the middle of one, hence the + 1) - let next_reward_cycle = curr_reward_cycle.saturating_add(1); - let final_reward_cycle = next_reward_cycle.saturating_add(nmb_reward_cycles); - let final_reward_cycle_height_boundary = signer_test - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(final_reward_cycle) - .saturating_sub(1); - - info!("------------------------- Test Mine 2 Nakamoto Reward Cycles -------------------------"); - let dkgs = signer_test - .run_until_burnchain_height_nakamoto(timeout, final_reward_cycle_height_boundary); - assert_eq!(dkgs.len() as u64, nmb_reward_cycles.saturating_add(1)); // We will have mined the DKG vote for the following reward cycle - let last_dkg = dkgs - .last() - .expect(&format!( - "Failed to reach DKG for reward cycle {final_reward_cycle_height_boundary}" - )) - .clone(); - assert_ne!(first_dkg, last_dkg); - - let set_dkg = signer_test - .stacks_client - .get_approved_aggregate_key(final_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - assert_eq!(set_dkg, last_dkg); - - let current_burnchain_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - assert_eq!(current_burnchain_height, final_reward_cycle_height_boundary); - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers will accept a miners block proposal and sign it if it contains all expected transactions, -/// filtering invalid transactions from the block requirements -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. It then advances to the prepare phase of the next reward cycle -/// to enable Nakamoto signers to look at the next signer transactions to compare against a proposed block. -/// -/// Test Execution: -/// The node attempts to mine a Nakamoto tenure, sending a block to the observing signers via the -/// .miners stacker db instance. The signers submit the block to the stacks node for verification. -/// Upon receiving a Block Validation response approving the block, the signers verify that it contains -/// all of the NEXT signers' expected transactions, being sure to filter out any invalid transactions -/// from stackerDB as well. -/// -/// Test Assertion: -/// Miner proposes a block to the signers containing all expected transactions. -/// Signers broadcast block approval with a signature back to the waiting miner. -/// Miner includes the signers' signature in the block and finishes mining it. -fn filter_bad_transactions() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test: SignerTest = SignerTest::new(5, vec![]); - let timeout = Duration::from_secs(200); - let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); - let next_signers_dkg = signer_test - .run_to_dkg(timeout) - .expect("Failed to run to DKG"); - assert_ne!(current_signers_dkg, next_signers_dkg); - - info!("------------------------- Submit Invalid Transactions -------------------------"); - - let signer_private_key = signer_test - .signer_stacks_private_keys - .iter() - .find(|pk| { - let addr = to_addr(pk); - addr == *signer_test.stacks_client.get_signer_address() - }) - .cloned() - .expect("Cannot find signer private key for signer id 1"); - let next_reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - // Must submit to the NEXT reward cycle slots as they are the ones looked at by the CURRENT miners - let signer_index = signer_test.get_signer_index(next_reward_cycle); - let mut stackerdb = StackerDBManager::new( - &signer_test.running_nodes.conf.node.rpc_bind, - signer_private_key, - false, - next_reward_cycle, - signer_index, - ); - - debug!( - "Signer address is {}", - &signer_test.stacks_client.get_signer_address() - ); - - let invalid_txs = signer_test.generate_invalid_transactions(); - let invalid_txids: HashSet = invalid_txs.iter().map(|tx| tx.txid()).collect(); - - // Submit transactions to stackerdb for the signers and miners to pick up during block verification - stackerdb - .send_message_with_retry(SignerMessage::Transactions(invalid_txs)) - .expect("Failed to write expected transactions to stackerdb"); - - info!("------------------------- Verify Nakamoto Block Mined -------------------------"); - let mined_block_event = - signer_test.mine_and_verify_confirmed_naka_block(¤t_signers_dkg, timeout); - for tx_event in &mined_block_event.tx_events { - let TransactionEvent::Success(tx_success) = tx_event else { - panic!("Received unexpected transaction event"); - }; - // Since we never broadcast the "invalid" transaction to the mempool and the transaction did not come from a signer or had an invalid nonce - // the miner should never construct a block that contains them and signers should still approve it - assert!( - !invalid_txids.contains(&tx_success.txid), - "Miner included an invalid transaction in the block" - ); - } - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers will be able to continue their operations even if one signer is restarted. -/// -/// Test Setup: -/// The test spins up three stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The signers sign one block as usual. -/// Then, one of the signers is restarted. -/// Finally, the signers sign another block with the restarted signer. -/// -/// Test Assertion: -/// The signers are able to produce a valid signature after one of them is restarted. -fn sign_after_signer_reboot() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 3; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); - - let key = signer_test.boot_to_epoch_3(timeout); - - info!("------------------------- Test Mine Block -------------------------"); - - signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - let signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); - - assert!( - signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), - "Signature verification failed" - ); - - info!("------------------------- Restart one Signer -------------------------"); - let signer_key = signer_test.stop_signer(2); - debug!( - "Removed signer 2 with key: {:?}, {}", - signer_key, - signer_key.to_hex() - ); - signer_test.restart_signer(2, signer_key); - - info!("------------------------- Test Mine Block after restart -------------------------"); - - let last_block = signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - let frost_signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); - - // Check that the latest block's bitvec is all 1's - assert_eq!( - last_block.signer_bitvec, - serde_json::to_value(BitVec::<4000>::ones(num_signers as u16).unwrap()) - .expect("Failed to serialize BitVec") - .as_str() - .expect("Failed to serialize BitVec") - ); - - assert!( - frost_signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), - "Signature verification failed" - ); - - signer_test.shutdown(); -} From 4f14f4eda8e318e05a68461e776efed0f08813ee Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 10:23:54 -0700 Subject: [PATCH 1220/1400] Remove wsts from stacks-signer Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - libsigner/src/libsigner.rs | 2 - libsigner/src/runloop.rs | 43 +- libsigner/src/tests/http.rs | 12 +- libsigner/src/tests/mod.rs | 41 +- libsigner/src/v1/messages.rs | 1869 ----------------- libsigner/src/v1/mod.rs | 17 - stacks-signer/Cargo.toml | 1 - stacks-signer/src/client/mod.rs | 36 - stacks-signer/src/client/stacks_client.rs | 243 +-- stacks-signer/src/lib.rs | 18 +- stacks-signer/src/runloop.rs | 57 +- stacks-signer/src/signerdb.rs | 51 +- stacks-signer/src/v0/signer.rs | 13 +- stackslib/src/chainstate/nakamoto/miner.rs | 4 +- .../stacks-node/src/nakamoto_node/miner.rs | 139 +- testnet/stacks-node/src/tests/signer/mod.rs | 11 +- 17 files changed, 74 insertions(+), 2484 deletions(-) delete mode 100644 libsigner/src/v1/messages.rs delete mode 100644 libsigner/src/v1/mod.rs diff --git a/Cargo.lock b/Cargo.lock index e1d78fec15..d16284fa5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3473,7 +3473,6 @@ dependencies = [ "tracing", "tracing-subscriber", "url", - "wsts", ] [[package]] diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 0da4e68a8f..878d428bfc 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -44,8 +44,6 @@ mod session; mod signer_set; /// v0 signer related code pub mod v0; -/// v1 signer related code -pub mod v1; use std::cmp::Eq; use std::fmt::Debug; diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index d4710f82e6..0a5ed49a6d 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -41,7 +41,7 @@ const STDERR: i32 = 2; /// Trait describing the needful components of a top-level runloop. /// This is where the signer business logic would go. /// Implement this, and you get all the multithreaded setup for free. -pub trait SignerRunLoop { +pub trait SignerRunLoop { /// Hint to set how long to wait for new events fn set_event_timeout(&mut self, timeout: Duration); /// Getter for the event poll timeout @@ -49,12 +49,7 @@ pub trait SignerRunLoop { /// Run one pass of the event loop, given new Signer events discovered since the last pass. /// Returns Some(R) if this is the final pass -- the runloop evaluated to R /// Returns None to keep running. - fn run_one_pass( - &mut self, - event: Option>, - cmd: Option, - res: &Sender, - ) -> Option; + fn run_one_pass(&mut self, event: Option>, res: &Sender) -> Option; /// This is the main loop body for the signer. It continuously receives events from /// `event_recv`, polling for up to `self.get_event_timeout()` units of time. Once it has @@ -66,7 +61,6 @@ pub trait SignerRunLoop { fn main_loop( &mut self, event_recv: Receiver>, - command_recv: Receiver, result_send: Sender, mut event_stop_signaler: EVST, ) -> Option { @@ -81,11 +75,7 @@ pub trait SignerRunLoop { return None; } }; - // Do not block for commands - let next_command_opt = command_recv.try_recv().ok(); - if let Some(final_state) = - self.run_one_pass(next_event_opt, next_command_opt, &result_send) - { + if let Some(final_state) = self.run_one_pass(next_event_opt, &result_send) { info!("Runloop exit; signaling event-receiver to stop"); event_stop_signaler.send(); return Some(final_state); @@ -95,13 +85,11 @@ pub trait SignerRunLoop { } /// The top-level signer implementation -pub struct Signer { +pub struct Signer { /// the runloop itself signer_loop: Option, /// the event receiver to use event_receiver: Option, - /// the command receiver to use - command_receiver: Option>, /// the result sender to use result_sender: Option>, /// phantom data for the codec @@ -193,18 +181,12 @@ pub fn set_runloop_signal_handler(mut st }).expect("FATAL: failed to set signal handler"); } -impl Signer { +impl Signer { /// Create a new signer with the given runloop and event receiver. - pub fn new( - runloop: SL, - event_receiver: EV, - command_receiver: Receiver, - result_sender: Sender, - ) -> Signer { + pub fn new(runloop: SL, event_receiver: EV, result_sender: Sender) -> Signer { Signer { signer_loop: Some(runloop), event_receiver: Some(event_receiver), - command_receiver: Some(command_receiver), result_sender: Some(result_sender), phantom_data: PhantomData, } @@ -212,12 +194,11 @@ impl Signer { } impl< - CMD: Send + 'static, R: Send + 'static, T: SignerEventTrait + 'static, - SL: SignerRunLoop + Send + 'static, + SL: SignerRunLoop + Send + 'static, EV: EventReceiver + Send + 'static, - > Signer + > Signer { /// This is a helper function to spawn both the runloop and event receiver in their own /// threads. Advanced signers may not need this method, and instead opt to run the receiver @@ -234,10 +215,6 @@ impl< .event_receiver .take() .ok_or(EventError::AlreadyRunning)?; - let command_receiver = self - .command_receiver - .take() - .ok_or(EventError::AlreadyRunning)?; let result_sender = self .result_sender .take() @@ -266,9 +243,7 @@ impl< let runloop_thread = thread::Builder::new() .name(format!("signer_runloop:{bind_port}")) .stack_size(THREAD_STACK_SIZE) - .spawn(move || { - signer_loop.main_loop(event_recv, command_receiver, result_sender, stop_signaler) - }) + .spawn(move || signer_loop.main_loop(event_recv, result_sender, stop_signaler)) .map_err(|e| { error!("SignerRunLoop failed to start: {:?}", &e); ret_stop_signaler.send(); diff --git a/libsigner/src/tests/http.rs b/libsigner/src/tests/http.rs index d0f3887b45..b31fb042e8 100644 --- a/libsigner/src/tests/http.rs +++ b/libsigner/src/tests/http.rs @@ -25,7 +25,7 @@ use crate::http::{decode_http_body, decode_http_request, decode_http_response, r #[test] fn test_decode_http_request_ok() { - let tests = vec![ + let tests = [ ("GET /foo HTTP/1.1\r\nHost: localhost:6270\r\n\r\n", ("GET", "/foo", vec![("host", "localhost:6270")])), ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\n\r\n", @@ -61,7 +61,7 @@ fn test_decode_http_request_ok() { #[test] fn test_decode_http_request_err() { - let tests = vec![ + let tests = [ ( "GET /foo HTTP/1.1\r\n", EventError::Deserialize("".to_string()), @@ -99,7 +99,7 @@ fn test_decode_http_request_err() { #[test] fn test_decode_http_response_ok() { - let tests = vec![ + let tests = [ ("HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\nContent-Length: 123\r\nX-Request-ID: 0\r\n\r\n", vec![("content-type", "application/octet-stream"), ("content-length", "123"), ("x-request-id", "0")]), ("HTTP/1.1 200 Ok\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", @@ -123,7 +123,7 @@ fn test_decode_http_response_ok() { #[test] fn test_decode_http_response_err() { - let tests = vec![ + let tests = [ ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nFoo: Bar\r\nX-Request-ID: 0\r\n\r\n", RPCError::HttpError(400)), ("HTTP/1.1 200", @@ -223,7 +223,7 @@ impl Write for MockHTTPSocket { #[test] fn test_run_http_request_with_body() { - let tests = vec![ + let tests = [ ("GET", "/test-no-content-type-and-no-body", None, vec![]), ( "GET", @@ -288,7 +288,7 @@ fn test_run_http_request_with_body() { #[test] fn test_run_http_request_no_body() { - let tests = vec![ + let tests = [ ("GET", "/test-no-content-type-and-no-body", None, vec![]), ( "GET", diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index fbe1e59089..f0361592ba 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -24,11 +24,16 @@ use std::time::Duration; use std::{mem, thread}; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; +use clarity::util::hash::Sha512Trunc256Sum; +use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::QualifiedContractIdentifier; use libstackerdb::StackerDBChunkData; +use stacks_common::bitvec::BitVec; use stacks_common::codec::{ read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, StacksMessageCodec, @@ -38,8 +43,8 @@ use stacks_common::util::sleep_ms; use wsts::net::{DkgBegin, Packet}; use crate::events::{SignerEvent, SignerEventTrait}; -use crate::v1::messages::SignerMessage; -use crate::{Signer, SignerEventReceiver, SignerRunLoop}; +use crate::v0::messages::{BlockRejection, SignerMessage}; +use crate::{BlockProposal, Signer, SignerEventReceiver, SignerRunLoop}; /// Simple runloop implementation. It receives `max_events` events and returns `events` from the /// last call to `run_one_pass` as its final state. @@ -63,7 +68,7 @@ enum Command { Empty, } -impl SignerRunLoop>, Command, T> for SimpleRunLoop { +impl SignerRunLoop>, T> for SimpleRunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.poll_timeout = timeout; } @@ -75,7 +80,6 @@ impl SignerRunLoop>, Command, T> for Sim fn run_one_pass( &mut self, event: Option>, - _cmd: Option, _res: &Sender>>, ) -> Option>> { debug!("Got event: {:?}", &event); @@ -99,16 +103,34 @@ impl SignerRunLoop>, Command, T> for Sim fn test_simple_signer() { let contract_id = NakamotoSigners::make_signers_db_contract_id(0, 0, false); let ev = SignerEventReceiver::new(false); - let (_cmd_send, cmd_recv) = channel(); let (res_send, _res_recv) = channel(); let max_events = 5; - let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, cmd_recv, res_send); + let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, res_send); let endpoint: SocketAddr = "127.0.0.1:30000".parse().unwrap(); let mut chunks = vec![]; + let block_proposal = BlockProposal { + block: NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: ConsensusHash([0; 20]), + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 11, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }, + burn_height: 2, + reward_cycle: 1, + }; for i in 0..max_events { let privk = Secp256k1PrivateKey::new(); - let msg = wsts::net::Message::DkgBegin(DkgBegin { dkg_id: 0 }); - let message = SignerMessage::Packet(Packet { msg, sig: vec![] }); + let message = SignerMessage::BlockProposal(block_proposal.clone()); let message_bytes = message.serialize_to_vec(); let mut chunk = StackerDBChunkData::new(i as u32, 1, message_bytes); chunk.sign(&privk).unwrap(); @@ -178,10 +200,9 @@ fn test_simple_signer() { #[test] fn test_status_endpoint() { let ev = SignerEventReceiver::new(false); - let (_cmd_send, cmd_recv) = channel(); let (res_send, _res_recv) = channel(); let max_events = 1; - let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, cmd_recv, res_send); + let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, res_send); let endpoint: SocketAddr = "127.0.0.1:31000".parse().unwrap(); // simulate a node that's trying to push data diff --git a/libsigner/src/v1/messages.rs b/libsigner/src/v1/messages.rs deleted file mode 100644 index b412d9a66f..0000000000 --- a/libsigner/src/v1/messages.rs +++ /dev/null @@ -1,1869 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Messages in the signer-miner interaction have a multi-level hierarchy. -//! Signers send messages to each other through Packet messages. These messages, -//! as well as `BlockResponse`, `Transactions`, and `DkgResults` messages are stored -//! StackerDBs based on the `MessageSlotID` for the particular message type. This is a -//! shared identifier space between the four message kinds and their subtypes. -//! -//! These four message kinds are differentiated with a `SignerMessageTypePrefix` -//! and the `SignerMessage` enum. - -use std::fmt::{Debug, Display}; -use std::io::{Read, Write}; -use std::net::{SocketAddr, TcpListener, TcpStream}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::Sender; -use std::sync::Arc; - -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; -use blockstack_lib::chainstate::stacks::{StacksTransaction, ThresholdSignature}; -use blockstack_lib::net::api::postblock_proposal::{ - BlockValidateReject, BlockValidateResponse, ValidateRejectCode, -}; -use blockstack_lib::util_lib::boot::boot_code_id; -use clarity::util::retry::BoundReader; -use clarity::vm::types::serialization::SerializationError; -use clarity::vm::types::QualifiedContractIdentifier; -use hashbrown::{HashMap, HashSet}; -use serde::{Deserialize, Serialize}; -use stacks_common::codec::{ - read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, - StacksMessageCodec, -}; -use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::util::hash::Sha512Trunc256Sum; -use tiny_http::{ - Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, -}; -use wsts::common::{PolyCommitment, PublicNonce, Signature, SignatureShare, TupleProof}; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; -use wsts::net::{ - BadPrivateShare, DkgBegin, DkgEnd, DkgEndBegin, DkgFailure, DkgPrivateBegin, DkgPrivateShares, - DkgPublicShares, DkgStatus, Message, NonceRequest, NonceResponse, Packet, - SignatureShareRequest, SignatureShareResponse, -}; -use wsts::schnorr::ID; -use wsts::state_machine::{signer, SignError}; - -use crate::http::{decode_http_body, decode_http_request}; -use crate::{EventError, MessageSlotID as MessageSlotIDTrait, SignerMessage as SignerMessageTrait}; - -define_u8_enum!( -/// Enum representing the stackerdb message identifier: this is -/// the contract index in the signers contracts (i.e., X in signers-0-X) -MessageSlotID { - /// DkgBegin message - DkgBegin = 0, - /// DkgPrivateBegin - DkgPrivateBegin = 1, - /// DkgEndBegin - DkgEndBegin = 2, - /// DkgEnd - DkgEnd = 3, - /// DkgPublicshares - DkgPublicShares = 4, - /// DkgPrivateShares - DkgPrivateShares = 5, - /// NonceRequest - NonceRequest = 6, - /// NonceResponse - NonceResponse = 7, - /// SignatureShareRequest - SignatureShareRequest = 8, - /// SignatureShareResponse - SignatureShareResponse = 9, - /// Block proposal responses for miners to observe - BlockResponse = 10, - /// Transactions list for miners and signers to observe - Transactions = 11, - /// DKG Results - DkgResults = 12, - /// Persisted encrypted signer state containing DKG shares - EncryptedSignerState = 13 -}); - -impl MessageSlotIDTrait for MessageSlotID { - fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { - NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) - } - fn all() -> &'static [Self] { - MessageSlotID::ALL - } -} - -impl SignerMessageTrait for SignerMessage { - fn msg_id(&self) -> Option { - Some(self.msg_id()) - } -} - -define_u8_enum!( -/// Enum representing the signer message type prefix -SignerMessageTypePrefix { - /// A block response message - BlockResponse = 0, - /// A wsts packet message - Packet = 1, - /// A list of transactions that a signer cares about - Transactions = 2, - /// The results of a successful DKG - DkgResults = 3, - /// The encrypted state of the signer to be persisted - EncryptedSignerState = 4 -}); - -#[cfg_attr(test, mutants::skip)] -impl MessageSlotID { - /// Return the StackerDB contract corresponding to messages of this type - pub fn stacker_db_contract( - &self, - mainnet: bool, - reward_cycle: u64, - ) -> QualifiedContractIdentifier { - NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) - } - - /// Return the u32 identifier for the message slot (used to index the contract that stores it) - pub fn to_u32(&self) -> u32 { - self.to_u8().into() - } -} - -impl Display for MessageSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}({})", self, self.to_u8()) - } -} - -impl TryFrom for SignerMessageTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown signer message type prefix: {value}")) - }) - } -} - -impl From<&SignerMessage> for SignerMessageTypePrefix { - #[cfg_attr(test, mutants::skip)] - fn from(message: &SignerMessage) -> Self { - match message { - SignerMessage::Packet(_) => SignerMessageTypePrefix::Packet, - SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, - SignerMessage::Transactions(_) => SignerMessageTypePrefix::Transactions, - SignerMessage::DkgResults { .. } => SignerMessageTypePrefix::DkgResults, - SignerMessage::EncryptedSignerState(_) => SignerMessageTypePrefix::EncryptedSignerState, - } - } -} - -define_u8_enum!( -/// Enum representing the message type prefix -MessageTypePrefix { - /// DkgBegin message - DkgBegin = 0, - /// DkgPrivateBegin message - DkgPrivateBegin = 1, - /// DkgEndBegin message - DkgEndBegin = 2, - /// DkgEnd message - DkgEnd = 3, - /// DkgPublicShares message - DkgPublicShares = 4, - /// DkgPrivateShares message - DkgPrivateShares = 5, - /// NonceRequest message - NonceRequest = 6, - /// NonceResponse message - NonceResponse = 7, - /// SignatureShareRequest message - SignatureShareRequest = 8, - /// SignatureShareResponse message - SignatureShareResponse = 9 -}); - -impl From<&Message> for MessageTypePrefix { - fn from(msg: &Message) -> Self { - match msg { - Message::DkgBegin(_) => MessageTypePrefix::DkgBegin, - Message::DkgPrivateBegin(_) => MessageTypePrefix::DkgPrivateBegin, - Message::DkgEndBegin(_) => MessageTypePrefix::DkgEndBegin, - Message::DkgEnd(_) => MessageTypePrefix::DkgEnd, - Message::DkgPublicShares(_) => MessageTypePrefix::DkgPublicShares, - Message::DkgPrivateShares(_) => MessageTypePrefix::DkgPrivateShares, - Message::NonceRequest(_) => MessageTypePrefix::NonceRequest, - Message::NonceResponse(_) => MessageTypePrefix::NonceResponse, - Message::SignatureShareRequest(_) => MessageTypePrefix::SignatureShareRequest, - Message::SignatureShareResponse(_) => MessageTypePrefix::SignatureShareResponse, - } - } -} - -impl TryFrom for MessageTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown packet type prefix: {value}")) - }) - } -} - -define_u8_enum!( -/// Enum representing the reject code type prefix -RejectCodeTypePrefix { - /// Validation failed - ValidationFailed = 0, - /// Signed rejection - SignedRejection = 1, - /// Insufficient signers - InsufficientSigners = 2, - /// Missing transactions - MissingTransactions = 3, - /// Connectivity issues - ConnectivityIssues = 4, - /// Nonce timeout - NonceTimeout = 5, - /// Aggregator error - AggregatorError = 6 -}); - -impl TryFrom for RejectCodeTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown reject code type prefix: {value}")) - }) - } -} - -impl From<&RejectCode> for RejectCodeTypePrefix { - fn from(reject_code: &RejectCode) -> Self { - match reject_code { - RejectCode::ValidationFailed(_) => RejectCodeTypePrefix::ValidationFailed, - RejectCode::SignedRejection(_) => RejectCodeTypePrefix::SignedRejection, - RejectCode::InsufficientSigners(_) => RejectCodeTypePrefix::InsufficientSigners, - RejectCode::MissingTransactions(_) => RejectCodeTypePrefix::MissingTransactions, - RejectCode::ConnectivityIssues => RejectCodeTypePrefix::ConnectivityIssues, - RejectCode::NonceTimeout(_) => RejectCodeTypePrefix::NonceTimeout, - RejectCode::AggregatorError(_) => RejectCodeTypePrefix::AggregatorError, - } - } -} - -/// The messages being sent through the stacker db contracts -#[derive(Clone, PartialEq, Serialize, Deserialize)] -pub enum SignerMessage { - /// The signed/validated Nakamoto block for miners to observe - BlockResponse(BlockResponse), - /// DKG and Signing round data for other signers to observe - Packet(Packet), - /// The list of transactions for miners and signers to observe that this signer cares about - Transactions(Vec), - /// The results of a successful DKG - DkgResults { - /// The aggregate key from the DKG round - aggregate_key: Point, - /// The polynomial commits used to construct the aggregate key - party_polynomials: Vec<(u32, PolyCommitment)>, - }, - /// The encrypted state of the signer to be persisted - EncryptedSignerState(Vec), -} - -impl Debug for SignerMessage { - #[cfg_attr(test, mutants::skip)] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::BlockResponse(b) => Debug::fmt(b, f), - Self::Packet(p) => Debug::fmt(p, f), - Self::Transactions(t) => f.debug_tuple("Transactions").field(t).finish(), - Self::DkgResults { - aggregate_key, - party_polynomials, - } => { - let party_polynomials: Vec<_> = party_polynomials - .iter() - .map(|(ix, commit)| (ix, commit.to_string())) - .collect(); - f.debug_struct("DkgResults") - .field("aggregate_key", &aggregate_key.to_string()) - .field("party_polynomials", &party_polynomials) - .finish() - } - Self::EncryptedSignerState(s) => { - f.debug_tuple("EncryptedSignerState").field(s).finish() - } - } - } -} - -impl SignerMessage { - /// Helper function to determine the slot ID for the provided stacker-db writer id - #[cfg_attr(test, mutants::skip)] - pub fn msg_id(&self) -> MessageSlotID { - match self { - Self::Packet(packet) => match packet.msg { - Message::DkgBegin(_) => MessageSlotID::DkgBegin, - Message::DkgPrivateBegin(_) => MessageSlotID::DkgPrivateBegin, - Message::DkgEndBegin(_) => MessageSlotID::DkgEndBegin, - Message::DkgEnd(_) => MessageSlotID::DkgEnd, - Message::DkgPublicShares(_) => MessageSlotID::DkgPublicShares, - Message::DkgPrivateShares(_) => MessageSlotID::DkgPrivateShares, - Message::NonceRequest(_) => MessageSlotID::NonceRequest, - Message::NonceResponse(_) => MessageSlotID::NonceResponse, - Message::SignatureShareRequest(_) => MessageSlotID::SignatureShareRequest, - Message::SignatureShareResponse(_) => MessageSlotID::SignatureShareResponse, - }, - Self::BlockResponse(_) => MessageSlotID::BlockResponse, - Self::Transactions(_) => MessageSlotID::Transactions, - Self::DkgResults { .. } => MessageSlotID::DkgResults, - Self::EncryptedSignerState(_) => MessageSlotID::EncryptedSignerState, - } - } -} - -impl SignerMessage { - /// Provide an interface for consensus serializing a DkgResults `SignerMessage` - /// without constructing the DkgResults struct (this eliminates a clone) - pub fn serialize_dkg_result<'a, W: Write, I>( - fd: &mut W, - aggregate_key: &Point, - party_polynomials: I, - ) -> Result<(), CodecError> - where - I: ExactSizeIterator + Iterator, - { - SignerMessageTypePrefix::DkgResults - .to_u8() - .consensus_serialize(fd)?; - Self::serialize_dkg_result_components(fd, aggregate_key, party_polynomials) - } - - /// Serialize the internal components of DkgResults (this eliminates a clone) - fn serialize_dkg_result_components<'a, W: Write, I>( - fd: &mut W, - aggregate_key: &Point, - party_polynomials: I, - ) -> Result<(), CodecError> - where - I: ExactSizeIterator + Iterator, - { - aggregate_key.inner_consensus_serialize(fd)?; - let polynomials_len: u32 = party_polynomials - .len() - .try_into() - .map_err(|_| CodecError::ArrayTooLong)?; - polynomials_len.consensus_serialize(fd)?; - for (party_id, polynomial) in party_polynomials { - party_id.consensus_serialize(fd)?; - polynomial.inner_consensus_serialize(fd)?; - } - Ok(()) - } -} - -impl StacksMessageCodec for SignerMessage { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(SignerMessageTypePrefix::from(self) as u8))?; - match self { - SignerMessage::Packet(packet) => { - packet.inner_consensus_serialize(fd)?; - } - SignerMessage::BlockResponse(block_response) => { - write_next(fd, block_response)?; - } - SignerMessage::Transactions(transactions) => { - write_next(fd, transactions)?; - } - SignerMessage::DkgResults { - aggregate_key, - party_polynomials, - } => { - Self::serialize_dkg_result_components( - fd, - aggregate_key, - party_polynomials.iter().map(|(a, b)| (a, b)), - )?; - } - SignerMessage::EncryptedSignerState(encrypted_state) => { - write_next(fd, encrypted_state)?; - } - }; - Ok(()) - } - - #[cfg_attr(test, mutants::skip)] - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = SignerMessageTypePrefix::try_from(type_prefix_byte)?; - let message = match type_prefix { - SignerMessageTypePrefix::Packet => { - let packet = Packet::inner_consensus_deserialize(fd)?; - SignerMessage::Packet(packet) - } - SignerMessageTypePrefix::BlockResponse => { - let block_response = read_next::(fd)?; - SignerMessage::BlockResponse(block_response) - } - SignerMessageTypePrefix::Transactions => { - let transactions = read_next::, _>(fd)?; - SignerMessage::Transactions(transactions) - } - SignerMessageTypePrefix::DkgResults => { - let aggregate_key = Point::inner_consensus_deserialize(fd)?; - let party_polynomial_len = u32::consensus_deserialize(fd)?; - let mut party_polynomials = Vec::with_capacity( - party_polynomial_len - .try_into() - .expect("FATAL: u32 could not fit in usize"), - ); - for _ in 0..party_polynomial_len { - let party_id = u32::consensus_deserialize(fd)?; - let polynomial = PolyCommitment::inner_consensus_deserialize(fd)?; - party_polynomials.push((party_id, polynomial)); - } - Self::DkgResults { - aggregate_key, - party_polynomials, - } - } - SignerMessageTypePrefix::EncryptedSignerState => { - // Typically the size of the signer state is much smaller, but in the fully degenerate case the size of the persisted state is - // 2800 * 32 * 4 + C for some small constant C. - // To have some margin, we're expanding the left term with an additional factor 4 - let max_encrypted_state_size = 2800 * 32 * 4 * 4; - let mut bound_reader = BoundReader::from_reader(fd, max_encrypted_state_size); - let encrypted_state = read_next::<_, _>(&mut bound_reader)?; - SignerMessage::EncryptedSignerState(encrypted_state) - } - }; - Ok(message) - } -} - -/// Work around for the fact that a lot of the structs being desierialized are not defined in messages.rs -pub trait StacksMessageCodecExtensions: Sized { - /// Serialize the struct to the provided writer - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError>; - /// Deserialize the struct from the provided reader - fn inner_consensus_deserialize(fd: &mut R) -> Result; -} - -impl StacksMessageCodecExtensions for Scalar { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.to_bytes()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let scalar_bytes: [u8; 32] = read_next(fd)?; - Ok(Scalar::from(scalar_bytes)) - } -} - -impl StacksMessageCodecExtensions for Point { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.compress().as_bytes().to_vec()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let compressed_bytes: Vec = read_next(fd)?; - let compressed = Compressed::try_from(compressed_bytes.as_slice()) - .map_err(|e| CodecError::DeserializeError(e.to_string()))?; - Point::try_from(&compressed).map_err(|e| CodecError::DeserializeError(e.to_string())) - } -} - -impl StacksMessageCodecExtensions for PolyCommitment { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.id.inner_consensus_serialize(fd)?; - let commit_len: u32 = self - .poly - .len() - .try_into() - .map_err(|_| CodecError::ArrayTooLong)?; - commit_len.consensus_serialize(fd)?; - for poly in self.poly.iter() { - poly.inner_consensus_serialize(fd)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let id = ID::inner_consensus_deserialize(fd)?; - let commit_len = u32::consensus_deserialize(fd)?; - let mut poly = Vec::with_capacity( - commit_len - .try_into() - .expect("FATAL: u32 could not fit in usize"), - ); - for _ in 0..commit_len { - poly.push(Point::inner_consensus_deserialize(fd)?); - } - Ok(Self { id, poly }) - } -} - -impl StacksMessageCodecExtensions for ID { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.id.inner_consensus_serialize(fd)?; - self.kG.inner_consensus_serialize(fd)?; - self.kca.inner_consensus_serialize(fd) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let id = Scalar::inner_consensus_deserialize(fd)?; - let k_g = Point::inner_consensus_deserialize(fd)?; - let kca = Scalar::inner_consensus_deserialize(fd)?; - Ok(Self { id, kG: k_g, kca }) - } -} - -#[allow(non_snake_case)] -impl StacksMessageCodecExtensions for TupleProof { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.R.inner_consensus_serialize(fd)?; - self.rB.inner_consensus_serialize(fd)?; - self.z.inner_consensus_serialize(fd) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let R = Point::inner_consensus_deserialize(fd)?; - let rB = Point::inner_consensus_deserialize(fd)?; - let z = Scalar::inner_consensus_deserialize(fd)?; - Ok(Self { R, rB, z }) - } -} - -impl StacksMessageCodecExtensions for BadPrivateShare { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.shared_key.inner_consensus_serialize(fd)?; - self.tuple_proof.inner_consensus_serialize(fd) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let shared_key = Point::inner_consensus_deserialize(fd)?; - let tuple_proof = TupleProof::inner_consensus_deserialize(fd)?; - Ok(Self { - shared_key, - tuple_proof, - }) - } -} - -impl StacksMessageCodecExtensions for HashSet { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(self.len() as u32))?; - for i in self { - write_next(fd, i)?; - } - Ok(()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let mut set = Self::new(); - let len = read_next::(fd)?; - for _ in 0..len { - let i = read_next::(fd)?; - set.insert(i); - } - Ok(set) - } -} - -define_u8_enum!( -/// Enum representing the DKG failure type prefix -DkgFailureTypePrefix { - /// Bad state - BadState = 0, - /// Missing public shares - MissingPublicShares = 1, - /// Bad public shares - BadPublicShares = 2, - /// Missing private shares - MissingPrivateShares = 3, - /// Bad private shares - BadPrivateShares = 4 -}); - -impl TryFrom for DkgFailureTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown DKG failure type prefix: {value}")) - }) - } -} - -impl From<&DkgFailure> for DkgFailureTypePrefix { - fn from(failure: &DkgFailure) -> Self { - match failure { - DkgFailure::BadState => DkgFailureTypePrefix::BadState, - DkgFailure::MissingPublicShares(_) => DkgFailureTypePrefix::MissingPublicShares, - DkgFailure::BadPublicShares(_) => DkgFailureTypePrefix::BadPublicShares, - DkgFailure::MissingPrivateShares(_) => DkgFailureTypePrefix::MissingPrivateShares, - DkgFailure::BadPrivateShares(_) => DkgFailureTypePrefix::BadPrivateShares, - } - } -} - -impl StacksMessageCodecExtensions for DkgFailure { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(DkgFailureTypePrefix::from(self) as u8))?; - match self { - DkgFailure::BadState => { - // No additional data to serialize - } - DkgFailure::MissingPublicShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::BadPublicShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::MissingPrivateShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::BadPrivateShares(shares) => { - write_next(fd, &(shares.len() as u32))?; - for (id, share) in shares { - write_next(fd, id)?; - share.inner_consensus_serialize(fd)?; - } - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let failure_type_prefix_byte = read_next::(fd)?; - let failure_type_prefix = DkgFailureTypePrefix::try_from(failure_type_prefix_byte)?; - let failure_type = match failure_type_prefix { - DkgFailureTypePrefix::BadState => DkgFailure::BadState, - DkgFailureTypePrefix::MissingPublicShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::MissingPublicShares(set) - } - DkgFailureTypePrefix::BadPublicShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::BadPublicShares(set) - } - DkgFailureTypePrefix::MissingPrivateShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::MissingPrivateShares(set) - } - DkgFailureTypePrefix::BadPrivateShares => { - let mut map = HashMap::new(); - let len = read_next::(fd)?; - for _ in 0..len { - let i = read_next::(fd)?; - let bad_share = BadPrivateShare::inner_consensus_deserialize(fd)?; - map.insert(i, bad_share); - } - DkgFailure::BadPrivateShares(map) - } - }; - Ok(failure_type) - } -} - -impl StacksMessageCodecExtensions for DkgBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - Ok(DkgBegin { dkg_id }) - } -} - -impl StacksMessageCodecExtensions for DkgPrivateBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_ids)?; - write_next(fd, &self.key_ids) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_ids = read_next::, _>(fd)?; - let key_ids = read_next::, _>(fd)?; - Ok(DkgPrivateBegin { - dkg_id, - signer_ids, - key_ids, - }) - } -} - -impl StacksMessageCodecExtensions for DkgEndBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_ids)?; - write_next(fd, &self.key_ids) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_ids = read_next::, _>(fd)?; - let key_ids = read_next::, _>(fd)?; - Ok(DkgEndBegin { - dkg_id, - signer_ids, - key_ids, - }) - } -} - -define_u8_enum!( -/// Enum representing the DKG status type prefix -DkgStatusTypePrefix { - /// Success - Success = 0, - /// Failure - Failure = 1 -}); - -impl TryFrom for DkgStatusTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown DKG status type prefix: {value}")) - }) - } -} - -impl From<&DkgStatus> for DkgStatusTypePrefix { - fn from(status: &DkgStatus) -> Self { - match status { - DkgStatus::Success => DkgStatusTypePrefix::Success, - DkgStatus::Failure(_) => DkgStatusTypePrefix::Failure, - } - } -} - -impl StacksMessageCodecExtensions for DkgEnd { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(DkgStatusTypePrefix::from(&self.status) as u8))?; - match &self.status { - DkgStatus::Success => { - // No additional data to serialize - } - DkgStatus::Failure(failure) => { - failure.inner_consensus_serialize(fd)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let status_type_prefix_byte = read_next::(fd)?; - let status_type_prefix = DkgStatusTypePrefix::try_from(status_type_prefix_byte)?; - let status = match status_type_prefix { - DkgStatusTypePrefix::Success => DkgStatus::Success, - DkgStatusTypePrefix::Failure => { - let failure = DkgFailure::inner_consensus_deserialize(fd)?; - DkgStatus::Failure(failure) - } - }; - Ok(DkgEnd { - dkg_id, - signer_id, - status, - }) - } -} - -impl StacksMessageCodecExtensions for DkgPublicShares { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.comms.len() as u32))?; - for (id, comm) in &self.comms { - write_next(fd, id)?; - comm.id.id.inner_consensus_serialize(fd)?; - comm.id.kG.inner_consensus_serialize(fd)?; - comm.id.kca.inner_consensus_serialize(fd)?; - write_next(fd, &(comm.poly.len() as u32))?; - for poly in comm.poly.iter() { - poly.inner_consensus_serialize(fd)? - } - } - Ok(()) - } - - #[allow(non_snake_case)] - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut comms = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let scalar_id = Scalar::inner_consensus_deserialize(fd)?; - let kG = Point::inner_consensus_deserialize(fd)?; - let kca = Scalar::inner_consensus_deserialize(fd)?; - let num_poly_coeffs = read_next::(fd)?; - let mut poly = Vec::new(); - for _ in 0..num_poly_coeffs { - poly.push(Point::inner_consensus_deserialize(fd)?); - } - comms.push(( - id, - PolyCommitment { - id: ID { - id: scalar_id, - kG, - kca, - }, - poly, - }, - )); - } - Ok(DkgPublicShares { - dkg_id, - signer_id, - comms, - }) - } -} - -impl StacksMessageCodecExtensions for DkgPrivateShares { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.shares.len() as u32))?; - for (id, share_map) in &self.shares { - write_next(fd, id)?; - write_next(fd, &(share_map.len() as u32))?; - for (id, share) in share_map { - write_next(fd, id)?; - write_next(fd, share)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut shares = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let num_share_map = read_next::(fd)?; - let mut share_map = HashMap::new(); - for _ in 0..num_share_map { - let id = read_next::(fd)?; - let share: Vec = read_next(fd)?; - share_map.insert(id, share); - } - shares.push((id, share_map)); - } - Ok(DkgPrivateShares { - dkg_id, - signer_id, - shares, - }) - } -} - -impl StacksMessageCodecExtensions for NonceRequest { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.message)?; - write_next(fd, &(self.is_taproot as u8))?; - write_next(fd, &(self.merkle_root.is_some() as u8))?; - if let Some(merkle_root) = self.merkle_root { - write_next(fd, &merkle_root)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let message = read_next::, _>(fd)?; - let is_taproot = read_next::(fd)? != 0; - let has_merkle_root = read_next::(fd)? != 0; - let merkle_root = if has_merkle_root { - Some(read_next::<[u8; 32], _>(fd)?) - } else { - None - }; - - Ok(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message, - is_taproot, - merkle_root, - }) - } -} - -impl StacksMessageCodecExtensions for NonceResponse { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &self.key_ids)?; - write_next(fd, &(self.nonces.len() as u32))?; - for nonce in &self.nonces { - nonce.D.inner_consensus_serialize(fd)?; - nonce.E.inner_consensus_serialize(fd)?; - } - write_next(fd, &self.message)?; - Ok(()) - } - - #[allow(non_snake_case)] - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let key_ids = read_next::, _>(fd)?; - let num_nonces = read_next::(fd)?; - let mut nonces = Vec::new(); - for _ in 0..num_nonces { - let D = Point::inner_consensus_deserialize(fd)?; - let E = Point::inner_consensus_deserialize(fd)?; - nonces.push(PublicNonce { D, E }); - } - let message = read_next::, _>(fd)?; - - Ok(NonceResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - key_ids, - nonces, - message, - }) - } -} - -impl StacksMessageCodecExtensions for SignatureShareRequest { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &(self.nonce_responses.len() as u32))?; - for nonce_response in &self.nonce_responses { - nonce_response.inner_consensus_serialize(fd)?; - } - write_next(fd, &self.message)?; - write_next(fd, &(self.is_taproot as u8))?; - write_next(fd, &(self.merkle_root.is_some() as u8))?; - if let Some(merkle_root) = self.merkle_root { - write_next(fd, &merkle_root)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let num_nonce_responses = read_next::(fd)?; - let mut nonce_responses = Vec::new(); - for _ in 0..num_nonce_responses { - nonce_responses.push(NonceResponse::inner_consensus_deserialize(fd)?); - } - let message = read_next::, _>(fd)?; - let is_taproot = read_next::(fd)? != 0; - let has_merkle_root = read_next::(fd)? != 0; - let merkle_root = if has_merkle_root { - Some(read_next::<[u8; 32], _>(fd)?) - } else { - None - }; - - Ok(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses, - message, - is_taproot, - merkle_root, - }) - } -} - -impl StacksMessageCodecExtensions for SignatureShareResponse { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.signature_shares.len() as u32))?; - for share in &self.signature_shares { - write_next(fd, &share.id)?; - share.z_i.inner_consensus_serialize(fd)?; - write_next(fd, &share.key_ids)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut signature_shares = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let z_i = Scalar::inner_consensus_deserialize(fd)?; - let key_ids = read_next::, _>(fd)?; - signature_shares.push(SignatureShare { id, z_i, key_ids }); - } - Ok(SignatureShareResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - signature_shares, - }) - } -} - -impl StacksMessageCodecExtensions for Message { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(MessageTypePrefix::from(self) as u8))?; - match self { - Message::DkgBegin(dkg_begin) => { - dkg_begin.inner_consensus_serialize(fd)?; - } - Message::DkgPrivateBegin(dkg_private_begin) => { - dkg_private_begin.inner_consensus_serialize(fd)?; - } - Message::DkgEndBegin(dkg_end_begin) => { - dkg_end_begin.inner_consensus_serialize(fd)?; - } - Message::DkgEnd(dkg_end) => { - dkg_end.inner_consensus_serialize(fd)?; - } - Message::DkgPublicShares(dkg_public_shares) => { - dkg_public_shares.inner_consensus_serialize(fd)?; - } - Message::DkgPrivateShares(dkg_private_shares) => { - dkg_private_shares.inner_consensus_serialize(fd)?; - } - Message::NonceRequest(nonce_request) => { - nonce_request.inner_consensus_serialize(fd)?; - } - Message::NonceResponse(nonce_response) => { - nonce_response.inner_consensus_serialize(fd)?; - } - Message::SignatureShareRequest(signature_share_request) => { - signature_share_request.inner_consensus_serialize(fd)?; - } - Message::SignatureShareResponse(signature_share_response) => { - signature_share_response.inner_consensus_serialize(fd)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = MessageTypePrefix::try_from(type_prefix_byte)?; - let message = match type_prefix { - MessageTypePrefix::DkgBegin => { - Message::DkgBegin(DkgBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgPrivateBegin => { - Message::DkgPrivateBegin(DkgPrivateBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgEndBegin => { - Message::DkgEndBegin(DkgEndBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgEnd => Message::DkgEnd(DkgEnd::inner_consensus_deserialize(fd)?), - MessageTypePrefix::DkgPublicShares => { - Message::DkgPublicShares(DkgPublicShares::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgPrivateShares => { - Message::DkgPrivateShares(DkgPrivateShares::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::NonceRequest => { - Message::NonceRequest(NonceRequest::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::NonceResponse => { - Message::NonceResponse(NonceResponse::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::SignatureShareRequest => Message::SignatureShareRequest( - SignatureShareRequest::inner_consensus_deserialize(fd)?, - ), - MessageTypePrefix::SignatureShareResponse => Message::SignatureShareResponse( - SignatureShareResponse::inner_consensus_deserialize(fd)?, - ), - }; - Ok(message) - } -} - -impl StacksMessageCodecExtensions for Packet { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.msg.inner_consensus_serialize(fd)?; - write_next(fd, &self.sig)?; - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let msg = Message::inner_consensus_deserialize(fd)?; - let sig: Vec = read_next(fd)?; - Ok(Packet { msg, sig }) - } -} - -define_u8_enum!( -/// Enum representing the block response type prefix -BlockResponseTypePrefix { - /// Accepted - Accepted = 0, - /// Rejected - Rejected = 1 -}); - -impl TryFrom for BlockResponseTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown block response type prefix: {value}")) - }) - } -} - -impl From<&BlockResponse> for BlockResponseTypePrefix { - fn from(block_response: &BlockResponse) -> Self { - match block_response { - BlockResponse::Accepted(_) => BlockResponseTypePrefix::Accepted, - BlockResponse::Rejected(_) => BlockResponseTypePrefix::Rejected, - } - } -} - -/// The response that a signer sends back to observing miners -/// either accepting or rejecting a Nakamoto block with the corresponding reason -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum BlockResponse { - /// The Nakamoto block was accepted and therefore signed - Accepted((Sha512Trunc256Sum, ThresholdSignature)), - /// The Nakamoto block was rejected and therefore not signed - Rejected(BlockRejection), -} - -impl std::fmt::Display for BlockResponse { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BlockResponse::Accepted(a) => { - write!( - f, - "BlockAccepted: signer_sighash = {}, signature = {}", - a.0, a.1 - ) - } - BlockResponse::Rejected(r) => { - write!( - f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}", - r.reason_code, r.reason, r.signer_signature_hash - ) - } - } - } -} - -impl BlockResponse { - /// Create a new accepted BlockResponse for the provided block signer signature hash and signature - pub fn accepted(hash: Sha512Trunc256Sum, sig: Signature) -> Self { - Self::Accepted((hash, ThresholdSignature(sig))) - } - - /// Create a new rejected BlockResponse for the provided block signer signature hash and signature - pub fn rejected(hash: Sha512Trunc256Sum, sig: Signature) -> Self { - Self::Rejected(BlockRejection::new( - hash, - RejectCode::SignedRejection(ThresholdSignature(sig)), - )) - } -} - -impl StacksMessageCodec for BlockResponse { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; - match self { - BlockResponse::Accepted((hash, sig)) => { - write_next(fd, hash)?; - write_next(fd, sig)?; - } - BlockResponse::Rejected(rejection) => { - write_next(fd, rejection)?; - } - }; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; - let response = match type_prefix { - BlockResponseTypePrefix::Accepted => { - let hash = read_next::(fd)?; - let sig = read_next::(fd)?; - BlockResponse::Accepted((hash, sig)) - } - BlockResponseTypePrefix::Rejected => { - let rejection = read_next::(fd)?; - BlockResponse::Rejected(rejection) - } - }; - Ok(response) - } -} - -/// A rejection response from a signer for a proposed block -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BlockRejection { - /// The reason for the rejection - pub reason: String, - /// The reason code for the rejection - pub reason_code: RejectCode, - /// The signer signature hash of the block that was rejected - pub signer_signature_hash: Sha512Trunc256Sum, -} - -impl BlockRejection { - /// Create a new BlockRejection for the provided block and reason code - pub fn new(signer_signature_hash: Sha512Trunc256Sum, reason_code: RejectCode) -> Self { - Self { - reason: reason_code.to_string(), - reason_code, - signer_signature_hash, - } - } -} - -impl StacksMessageCodec for BlockRejection { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.reason.as_bytes().to_vec())?; - write_next(fd, &self.reason_code)?; - write_next(fd, &self.signer_signature_hash)?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let reason_bytes = read_next::, _>(fd)?; - let reason = String::from_utf8(reason_bytes).map_err(|e| { - CodecError::DeserializeError(format!("Failed to decode reason string: {:?}", &e)) - })?; - let reason_code = read_next::(fd)?; - let signer_signature_hash = read_next::(fd)?; - Ok(Self { - reason, - reason_code, - signer_signature_hash, - }) - } -} - -impl From for BlockRejection { - fn from(reject: BlockValidateReject) -> Self { - Self { - reason: reject.reason, - reason_code: RejectCode::ValidationFailed(reject.reason_code), - signer_signature_hash: reject.signer_signature_hash, - } - } -} - -/// This enum is used to supply a `reason_code` for block rejections -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum RejectCode { - /// RPC endpoint Validation failed - ValidationFailed(ValidateRejectCode), - /// Signers signed a block rejection - SignedRejection(ThresholdSignature), - /// Nonce timeout was reached - NonceTimeout(Vec), - /// Insufficient signers agreed to sign the block - InsufficientSigners(Vec), - /// An internal error occurred in the signer when aggregating the signaure - AggregatorError(String), - /// Missing the following expected transactions - MissingTransactions(Vec), - /// The block was rejected due to connectivity issues with the signer - ConnectivityIssues, -} - -impl From<&SignError> for RejectCode { - fn from(err: &SignError) -> Self { - match err { - SignError::NonceTimeout(_valid_signers, malicious_signers) => { - Self::NonceTimeout(malicious_signers.clone()) - } - SignError::InsufficientSigners(malicious_signers) => { - Self::InsufficientSigners(malicious_signers.clone()) - } - SignError::Aggregator(e) => Self::AggregatorError(e.to_string()), - } - } -} - -impl StacksMessageCodec for RejectCode { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(RejectCodeTypePrefix::from(self) as u8))?; - match self { - RejectCode::ValidationFailed(code) => write_next(fd, &(*code as u8))?, - RejectCode::SignedRejection(sig) => write_next(fd, sig)?, - RejectCode::InsufficientSigners(malicious_signers) - | RejectCode::NonceTimeout(malicious_signers) => write_next(fd, malicious_signers)?, - RejectCode::MissingTransactions(missing_transactions) => { - write_next(fd, missing_transactions)? - } - RejectCode::AggregatorError(reason) => write_next(fd, &reason.as_bytes().to_vec())?, - RejectCode::ConnectivityIssues => write_next(fd, &4u8)?, - }; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = RejectCodeTypePrefix::try_from(type_prefix_byte)?; - let code = match type_prefix { - RejectCodeTypePrefix::ValidationFailed => RejectCode::ValidationFailed( - ValidateRejectCode::try_from(read_next::(fd)?).map_err(|e| { - CodecError::DeserializeError(format!( - "Failed to decode validation reject code: {:?}", - &e - )) - })?, - ), - RejectCodeTypePrefix::SignedRejection => { - RejectCode::SignedRejection(read_next::(fd)?) - } - RejectCodeTypePrefix::InsufficientSigners => { - RejectCode::InsufficientSigners(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::MissingTransactions => { - RejectCode::MissingTransactions(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::NonceTimeout => { - RejectCode::NonceTimeout(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::ConnectivityIssues => RejectCode::ConnectivityIssues, - RejectCodeTypePrefix::AggregatorError => { - let reason_bytes = read_next::, _>(fd)?; - let reason = String::from_utf8(reason_bytes).map_err(|e| { - CodecError::DeserializeError(format!( - "Failed to decode reason string: {:?}", - &e - )) - })?; - RejectCode::AggregatorError(reason) - } - }; - Ok(code) - } -} - -impl std::fmt::Display for RejectCode { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - RejectCode::ValidationFailed(code) => write!(f, "Validation failed: {:?}", code), - RejectCode::SignedRejection(sig) => { - write!(f, "A threshold number of signers rejected the block with the following signature: {:?}.", sig) - } - RejectCode::InsufficientSigners(malicious_signers) => write!( - f, - "Insufficient signers agreed to sign the block. The following signers are malicious: {:?}", - malicious_signers - ), - RejectCode::NonceTimeout(malicious_signers) => write!( - f, - "Nonce timeout occurred signers. The following signers are malicious: {:?}", - malicious_signers - ), - RejectCode::MissingTransactions(missing_transactions) => write!( - f, - "Missing the following expected transactions: {:?}", - missing_transactions.iter().map(|tx| tx.txid()).collect::>() - ), - RejectCode::ConnectivityIssues => write!( - f, - "The block was rejected due to connectivity issues with the signer." - ), - RejectCode::AggregatorError(reason) => write!( - f, - "An internal error occurred in the signer when aggregating the signaure: {:?}", - reason - ), - } - } -} - -impl From for SignerMessage { - fn from(packet: Packet) -> Self { - Self::Packet(packet) - } -} - -impl From for SignerMessage { - fn from(block_response: BlockResponse) -> Self { - Self::BlockResponse(block_response) - } -} - -impl From for SignerMessage { - fn from(block_rejection: BlockRejection) -> Self { - Self::BlockResponse(BlockResponse::Rejected(block_rejection)) - } -} - -impl From for SignerMessage { - fn from(rejection: BlockValidateReject) -> Self { - Self::BlockResponse(BlockResponse::Rejected(rejection.into())) - } -} - -#[cfg(test)] -mod test { - use blockstack_lib::chainstate::stacks::{ - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionVersion, - }; - use blockstack_lib::util_lib::strings::StacksString; - use rand::Rng; - use rand_core::OsRng; - use stacks_common::consts::CHAIN_ID_TESTNET; - use stacks_common::types::chainstate::StacksPrivateKey; - use wsts::common::Signature; - - use super::{StacksMessageCodecExtensions, *}; - - #[test] - #[should_panic] - // V1 signer slots do not have enough slots in Epoch 2.5. Something will need to be updated! - fn signer_slots_count_is_sane() { - let slot_identifiers_len = MessageSlotID::ALL.len(); - assert!( - SIGNER_SLOTS_PER_USER as usize >= slot_identifiers_len, - "stacks_common::SIGNER_SLOTS_PER_USER ({}) must be >= slot identifiers ({})", - SIGNER_SLOTS_PER_USER, - slot_identifiers_len, - ); - } - - #[test] - fn serde_reject_code() { - let code = RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::SignedRejection(ThresholdSignature::empty()); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::InsufficientSigners(vec![0, 1, 2]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::NonceTimeout(vec![0, 1, 2]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::AggregatorError("Test Error".into()); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let code = RejectCode::MissingTransactions(vec![tx]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::ConnectivityIssues; - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - } - - #[test] - fn serde_block_rejection() { - let rejection = BlockRejection::new( - Sha512Trunc256Sum([0u8; 32]), - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([1u8; 32]), - RejectCode::SignedRejection(ThresholdSignature::empty()), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::InsufficientSigners(vec![0, 1, 2]), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::NonceTimeout(vec![0, 1, 2]), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::AggregatorError("Test Error".into()), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - } - - #[test] - fn serde_block_response() { - let response = - BlockResponse::Accepted((Sha512Trunc256Sum([0u8; 32]), ThresholdSignature::empty())); - let serialized_response = response.serialize_to_vec(); - let deserialized_response = read_next::(&mut &serialized_response[..]) - .expect("Failed to deserialize BlockResponse"); - assert_eq!(response, deserialized_response); - - let response = BlockResponse::Rejected(BlockRejection::new( - Sha512Trunc256Sum([1u8; 32]), - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), - )); - let serialized_response = response.serialize_to_vec(); - let deserialized_response = read_next::(&mut &serialized_response[..]) - .expect("Failed to deserialize BlockResponse"); - assert_eq!(response, deserialized_response); - } - - #[test] - fn serde_point_scalar() { - let mut rng = OsRng; - let scalar = Scalar::random(&mut rng); - let mut serialized_scalar = vec![]; - scalar - .inner_consensus_serialize(&mut serialized_scalar) - .expect("serialization to buffer failed."); - let deserialized_scalar = Scalar::inner_consensus_deserialize(&mut &serialized_scalar[..]) - .expect("Failed to deserialize Scalar"); - assert_eq!(scalar, deserialized_scalar); - - let point = Point::from(scalar); - let mut serialized_point = vec![]; - point - .inner_consensus_serialize(&mut serialized_point) - .expect("serialization to buffer failed."); - let deserialized_point = Point::inner_consensus_deserialize(&mut &serialized_point[..]) - .expect("Failed to deserialize Point"); - assert_eq!(point, deserialized_point); - } - - fn test_fixture_packet(msg: Message) { - let packet = Packet { - msg, - sig: vec![1u8; 20], - }; - let mut serialized_packet = vec![]; - packet - .inner_consensus_serialize(&mut serialized_packet) - .expect("serialization to buffer failed."); - let deserialized_packet = Packet::inner_consensus_deserialize(&mut &serialized_packet[..]) - .expect("Failed to deserialize Packet"); - assert_eq!(packet, deserialized_packet); - } - - #[test] - fn serde_packet() { - // Test DKG begin Packet - test_fixture_packet(Message::DkgBegin(DkgBegin { dkg_id: 0 })); - - let dkg_id = rand::thread_rng().gen(); - let signer_id = rand::thread_rng().gen(); - let sign_id = rand::thread_rng().gen(); - let sign_iter_id = rand::thread_rng().gen(); - let mut signer_ids = [0u32; 100]; - rand::thread_rng().fill(&mut signer_ids[..]); - - let mut key_ids = [0u32; 100]; - rand::thread_rng().fill(&mut key_ids[..]); - let nmb_items = rand::thread_rng().gen_range(1..100); - - // Test DKG private begin Packet - test_fixture_packet(Message::DkgPrivateBegin(DkgPrivateBegin { - dkg_id, - signer_ids: signer_ids.to_vec(), - key_ids: key_ids.to_vec(), - })); - - // Test DKG end begin Packet - test_fixture_packet(Message::DkgEndBegin(DkgEndBegin { - dkg_id, - signer_ids: signer_ids.to_vec(), - key_ids: key_ids.to_vec(), - })); - - // Test DKG end Packet Success - test_fixture_packet(Message::DkgEnd(DkgEnd { - dkg_id, - signer_id, - status: DkgStatus::Success, - })); - - // Test DKG end Packet Failure - test_fixture_packet(Message::DkgEnd(DkgEnd { - dkg_id, - signer_id, - status: DkgStatus::Failure(DkgFailure::BadState), - })); - - // Test DKG public shares Packet - let rng = &mut OsRng; - let comms = (0..nmb_items) - .map(|i| { - ( - i, - PolyCommitment { - id: ID { - id: Scalar::random(rng), - kG: Point::from(Scalar::random(rng)), - kca: Scalar::random(rng), - }, - poly: vec![ - Point::from(Scalar::random(rng)), - Point::from(Scalar::random(rng)), - ], - }, - ) - }) - .collect(); - test_fixture_packet(Message::DkgPublicShares(DkgPublicShares { - dkg_id, - signer_id, - comms, - })); - - // Test DKG private shares Packet - let mut shares = vec![]; - for i in 0..nmb_items { - let mut shares_map = HashMap::new(); - for i in 0..nmb_items { - let mut bytes = [0u8; 20]; - rng.fill(&mut bytes[..]); - shares_map.insert(i, bytes.to_vec()); - } - shares.push((i, shares_map)); - } - test_fixture_packet(Message::DkgPrivateShares(DkgPrivateShares { - dkg_id, - signer_id, - shares, - })); - - // Test Nonce request Packet with merkle root - let mut message = [0u8; 40]; - rng.fill(&mut message[..]); - let mut merkle_root_bytes = [0u8; 32]; - rng.fill(&mut merkle_root_bytes[..]); - let merkle_root = Some(merkle_root_bytes); - - test_fixture_packet(Message::NonceRequest(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message: message.to_vec(), - is_taproot: true, - merkle_root, - })); - - // Test Nonce request Packet with no merkle root - test_fixture_packet(Message::NonceRequest(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message: message.to_vec(), - is_taproot: false, - merkle_root: None, - })); - - // Test Nonce response Packet - let mut nonces = vec![]; - for _ in 0..nmb_items { - nonces.push(PublicNonce { - D: Point::from(Scalar::random(rng)), - E: Point::from(Scalar::random(rng)), - }); - } - let nonce_response = NonceResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - key_ids: key_ids.to_vec(), - nonces, - message: message.to_vec(), - }; - test_fixture_packet(Message::NonceResponse(nonce_response.clone())); - - // Test Signature share request Packet with merkle root and nonce response - test_fixture_packet(Message::SignatureShareRequest(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses: vec![nonce_response], - message: message.to_vec(), - is_taproot: true, - merkle_root, - })); - - // Test Signature share request Packet with no merkle root and nonce response - test_fixture_packet(Message::SignatureShareRequest(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses: vec![], - message: message.to_vec(), - is_taproot: false, - merkle_root: None, - })); - - // Test Signature share response Packet - let mut signature_shares = vec![]; - for i in 0..nmb_items { - let mut key_ids = vec![]; - for i in 0..nmb_items { - key_ids.push(i); - } - signature_shares.push(SignatureShare { - id: i, - z_i: Scalar::random(rng), - key_ids, - }); - } - test_fixture_packet(Message::SignatureShareResponse(SignatureShareResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - signature_shares, - })); - } - - #[test] - fn serde_signer_message() { - let rng = &mut OsRng; - let signer_message = SignerMessage::Packet(Packet { - msg: Message::DkgBegin(DkgBegin { dkg_id: 0 }), - sig: vec![1u8; 20], - }); - - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - - let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(( - Sha512Trunc256Sum([2u8; 32]), - ThresholdSignature(Signature { - R: Point::from(Scalar::random(rng)), - z: Scalar::random(rng), - }), - ))); - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let signer_message = SignerMessage::Transactions(vec![tx]); - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - } -} diff --git a/libsigner/src/v1/mod.rs b/libsigner/src/v1/mod.rs deleted file mode 100644 index e5a691efb2..0000000000 --- a/libsigner/src/v1/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -/// Messages for the v1 signer -pub mod messages; diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 64e3cd5ca9..da94cc10de 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -43,7 +43,6 @@ tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -wsts = { workspace = true } rand = { workspace = true } url = "2.1.0" rusqlite = { workspace = true } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index c36f73a3f9..081b5c07ab 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -148,7 +148,6 @@ pub(crate) mod tests { }; use stacks_common::types::{StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::{Hash160, Sha256Sum}; - use wsts::curve::point::Point; use super::*; use crate::config::{GlobalConfig, SignerConfig}; @@ -325,41 +324,6 @@ pub(crate) mod tests { (format!("HTTP/1.1 200 Ok\n\n{pox_info_json}"), pox_info) } - /// Build a response for the get_approved_aggregate_key request - pub fn build_get_approved_aggregate_key_response(point: Option) -> String { - let clarity_value = if let Some(point) = point { - ClarityValue::some( - ClarityValue::buff_from(point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"), - ) - .expect("BUG: Failed to create clarity value from point") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - - /// Build a response for the get_approved_aggregate_key request - pub fn build_get_vote_for_aggregate_key_response(point: Option) -> String { - let clarity_value = if let Some(point) = point { - ClarityValue::some(ClarityValue::Tuple( - TupleData::from_data(vec![ - ( - "aggregate-public-key".into(), - ClarityValue::buff_from(point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"), - ), - ("signer-weight".into(), ClarityValue::UInt(1)), // fixed for testing purposes - ]) - .expect("BUG: Failed to create clarity value from tuple data"), - )) - .expect("BUG: Failed to create clarity value from tuple data") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - /// Build a response for the get_peer_info_with_retry request with a specific stacks tip height and consensus hash pub fn build_get_peer_info_response( burn_block_height: Option, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 7b490144fc..dbe4f9094d 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -18,7 +18,7 @@ use std::collections::{HashMap, VecDeque}; use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_NAME, }; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ @@ -54,7 +54,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::StacksEpochId; use stacks_common::{debug, warn}; -use wsts::curve::point::{Compressed, Point}; use super::SignerSlotID; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -273,40 +272,6 @@ impl StacksClient { .collect()) } - /// Get the vote for a given round, reward cycle, and signer address - pub fn get_vote_for_aggregate_public_key( - &self, - round: u64, - reward_cycle: u64, - signer: StacksAddress, - ) -> Result, ClientError> { - debug!("Getting vote for aggregate public key..."); - let function_name = ClarityName::from("get-vote"); - let function_args = &[ - ClarityValue::UInt(reward_cycle as u128), - ClarityValue::UInt(round as u128), - ClarityValue::Principal(signer.into()), - ]; - let value = self.read_only_contract_call( - &boot_code_addr(self.mainnet), - &ContractName::from(SIGNERS_VOTING_NAME), - &function_name, - function_args, - )?; - // Return value is of type: - // ```clarity - // (option { aggregate-public-key: (buff 33), signer-weight: uint }) - // ``` - let inner_data = value.expect_optional()?; - if let Some(inner_data) = inner_data { - let tuple = inner_data.expect_tuple()?; - let key_value = tuple.get_owned("aggregate-public-key")?; - self.parse_aggregate_public_key(key_value) - } else { - Ok(None) - } - } - /// Retrieve the medium estimated transaction fee in uSTX from the stacks node for the given transaction pub fn get_medium_estimated_fee_ustx( &self, @@ -406,27 +371,6 @@ impl StacksClient { Ok(()) } - /// Retrieve the approved DKG aggregate public key for the given reward cycle - pub fn get_approved_aggregate_key( - &self, - reward_cycle: u64, - ) -> Result, ClientError> { - let function_name = ClarityName::from("get-approved-aggregate-key"); - let voting_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let value = self.read_only_contract_call( - &voting_contract_id.issuer.into(), - &voting_contract_id.name, - &function_name, - function_args, - )?; - let inner_data = value.expect_optional()?; - inner_data.map_or_else( - || Ok(None), - |key_value| self.parse_aggregate_public_key(key_value), - ) - } - /// Retrieve the current consumed weight for the given reward cycle and DKG round pub fn get_round_vote_weight( &self, @@ -736,61 +680,6 @@ impl StacksClient { Ok(account_entry) } - /// Helper function that attempts to deserialize a clarity hex string as the aggregate public key - fn parse_aggregate_public_key( - &self, - value: ClarityValue, - ) -> Result, ClientError> { - debug!("Parsing aggregate public key..."); - let data = value.expect_buff(33)?; - // It is possible that the point was invalid though when voted upon and this cannot be prevented by pox 4 definitions... - // Pass up this error if the conversions fail. - let compressed_data = Compressed::try_from(data.as_slice()).map_err(|e| { - ClientError::MalformedClarityValue(format!( - "Failed to convert aggregate public key to compressed data: {e}" - )) - })?; - let dkg_public_key = Point::try_from(&compressed_data).map_err(|e| { - ClientError::MalformedClarityValue(format!( - "Failed to convert aggregate public key to a point: {e}" - )) - })?; - Ok(Some(dkg_public_key)) - } - - /// Helper function to create a stacks transaction for a modifying contract call - pub fn build_unsigned_vote_for_aggregate_public_key( - &self, - signer_index: u32, - round: u64, - dkg_public_key: Point, - reward_cycle: u64, - nonce: u64, - ) -> Result { - debug!("Building {SIGNERS_VOTING_FUNCTION_NAME} transaction..."); - let contract_address = boot_code_addr(self.mainnet); - let contract_name = ContractName::from(SIGNERS_VOTING_NAME); - let function_name = ClarityName::from(SIGNERS_VOTING_FUNCTION_NAME); - let function_args = vec![ - ClarityValue::UInt(signer_index as u128), - ClarityValue::buff_from(dkg_public_key.compress().data.to_vec())?, - ClarityValue::UInt(round as u128), - ClarityValue::UInt(reward_cycle as u128), - ]; - - let unsigned_tx = Self::build_unsigned_contract_call_transaction( - &contract_address, - contract_name, - function_name, - &function_args, - &self.stacks_private_key, - self.tx_version, - self.chain_id, - nonce, - )?; - Ok(unsigned_tx) - } - /// Try to post a completed nakamoto block to our connected stacks-node /// Returns `true` if the block was accepted or `false` if the block /// was rejected. @@ -1036,15 +925,13 @@ mod tests { use rand_core::RngCore; use stacks_common::bitvec::BitVec; use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; - use wsts::curve::scalar::Scalar; use super::*; use crate::client::tests::{ - build_account_nonce_response, build_get_approved_aggregate_key_response, - build_get_last_round_response, build_get_last_set_cycle_response, - build_get_medium_estimated_fee_ustx_response, build_get_peer_info_response, - build_get_pox_data_response, build_get_round_info_response, build_get_tenure_tip_response, - build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, + build_account_nonce_response, build_get_last_round_response, + build_get_last_set_cycle_response, build_get_medium_estimated_fee_ustx_response, + build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, + build_get_tenure_tip_response, build_get_weight_threshold_response, build_read_only_response, write_response, MockServerClient, }; @@ -1174,45 +1061,6 @@ mod tests { assert!(matches!(res, Err(ClientError::ReqwestError(_)))); } - #[test] - fn get_aggregate_public_key_should_succeed() { - let orig_point = Point::from(Scalar::random(&mut rand::thread_rng())); - let response = build_get_approved_aggregate_key_response(Some(orig_point)); - let mock = MockServerClient::new(); - let h = spawn(move || mock.client.get_approved_aggregate_key(0)); - write_response(mock.server, response.as_bytes()); - let res = h.join().unwrap().unwrap(); - assert_eq!(res, Some(orig_point)); - - let response = build_get_approved_aggregate_key_response(None); - let mock = MockServerClient::new(); - let h = spawn(move || mock.client.get_approved_aggregate_key(0)); - write_response(mock.server, response.as_bytes()); - let res = h.join().unwrap().unwrap(); - assert!(res.is_none()); - } - - #[test] - fn parse_valid_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let orig_point = Point::from(Scalar::random(&mut rand::thread_rng())); - let clarity_value = ClarityValue::buff_from(orig_point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"); - let result = mock - .client - .parse_aggregate_public_key(clarity_value) - .unwrap(); - assert_eq!(result, Some(orig_point)); - } - - #[test] - fn parse_invalid_aggregate_public_key_should_fail() { - let mock = MockServerClient::new(); - let value = ClarityValue::UInt(10_u128); - let result = mock.client.parse_aggregate_public_key(value); - assert!(result.is_err()) - } - #[test] fn transaction_contract_call_should_send_bytes_to_node() { let mock = MockServerClient::new(); @@ -1265,58 +1113,6 @@ mod tests { ); } - #[test] - fn build_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let nonce = thread_rng().next_u64(); - let signer_index = thread_rng().next_u32(); - let round = thread_rng().next_u64(); - let reward_cycle = thread_rng().next_u64(); - - let h = spawn(move || { - mock.client.build_unsigned_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - nonce, - ) - }); - assert!(h.join().unwrap().is_ok()); - } - - #[test] - fn broadcast_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let nonce = thread_rng().next_u64(); - let signer_index = thread_rng().next_u32(); - let round = thread_rng().next_u64(); - let reward_cycle = thread_rng().next_u64(); - let unsigned_tx = mock - .client - .build_unsigned_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - nonce, - ) - .unwrap(); - let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); - let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction(&tx_clone)); - - write_response( - mock.server, - format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), - ); - let returned_txid = h.join().unwrap().unwrap(); - - assert_eq!(returned_txid, tx.txid()); - } - #[test] fn core_info_call_for_burn_block_height_should_succeed() { let mock = MockServerClient::new(); @@ -1579,9 +1375,10 @@ mod tests { #[test] fn get_reward_set_should_succeed() { let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())).compress(); + let private_key = StacksPrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); let mut bytes = [0u8; 33]; - bytes.copy_from_slice(point.as_bytes()); + bytes.copy_from_slice(&public_key.to_bytes_compressed()); let stacker_set = RewardSet { rewarded_addresses: vec![PoxAddress::standard_burn_address(false)], start_cycle_state: PoxStartCycleInfo { @@ -1606,30 +1403,6 @@ mod tests { assert_eq!(h.join().unwrap().unwrap(), stacker_set.signers); } - #[test] - fn get_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let stacks_address = mock.client.stacks_address; - let key_response = build_get_vote_for_aggregate_key_response(Some(point)); - let h = spawn(move || { - mock.client - .get_vote_for_aggregate_public_key(0, 0, stacks_address) - }); - write_response(mock.server, key_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), Some(point)); - - let mock = MockServerClient::new(); - let stacks_address = mock.client.stacks_address; - let key_response = build_get_vote_for_aggregate_key_response(None); - let h = spawn(move || { - mock.client - .get_vote_for_aggregate_public_key(0, 0, stacks_address) - }); - write_response(mock.server, key_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), None); - } - #[test] fn get_round_vote_weight_should_succeed() { let mock = MockServerClient::new(); diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 8bac540e7a..20c2bc2ca8 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -57,7 +57,7 @@ use stacks_common::{info, warn}; use crate::client::StacksClient; use crate::config::SignerConfig; -use crate::runloop::{RunLoop, RunLoopCommand}; +use crate::runloop::RunLoop; /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { @@ -74,13 +74,6 @@ pub trait Signer: Debug + Display { res: &Sender>, current_reward_cycle: u64, ); - /// Process a command - fn process_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - command: Option, - ); /// Check if the signer is in the middle of processing blocks fn has_unprocessed_blocks(&self) -> bool; } @@ -90,14 +83,12 @@ pub type RunningSigner = libsigner::RunningSigner, Vec /// The wrapper for the runloop signer type type RunLoopSigner = - libsigner::Signer, RunLoop, SignerEventReceiver, T>; + libsigner::Signer, RunLoop, SignerEventReceiver, T>; /// The spawned signer pub struct SpawnedSigner + Send, T: SignerEventTrait> { /// The underlying running signer thread handle running_signer: RunningSigner, - /// The command sender for interacting with the running signer - pub cmd_send: Sender, /// The result receiver for interacting with the running signer pub res_recv: Receiver>, /// The spawned signer's config @@ -131,7 +122,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner For more information, check the documentation at \ https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like." ); - let (cmd_send, cmd_recv) = channel(); let (res_send, res_recv) = channel(); let ev = SignerEventReceiver::new(config.network.is_mainnet()); #[cfg(feature = "monitoring_prom")] @@ -139,12 +129,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); } let runloop = RunLoop::new(config.clone()); - let mut signer: RunLoopSigner = - libsigner::Signer::new(runloop, ev, cmd_recv, res_send); + let mut signer: RunLoopSigner = libsigner::Signer::new(runloop, ev, res_send); let running_signer = signer.spawn(endpoint).expect("Failed to spawn signer"); SpawnedSigner { running_signer, - cmd_send, res_recv, _phantom: std::marker::PhantomData, config, diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index d8d159a086..855957a70a 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -1,5 +1,3 @@ -use std::collections::VecDeque; -use std::fmt::Debug; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -15,16 +13,15 @@ use std::fmt::Debug; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Debug; use std::sync::mpsc::Sender; use std::time::Duration; use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; -use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerRunLoop}; +use libsigner::{SignerEntries, SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; -use wsts::common::MerkleRoot; -use wsts::state_machine::OperationResult; use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; @@ -55,14 +52,6 @@ pub struct StateInfo { pub enum SignerResult { /// The signer has received a status check StatusCheck(StateInfo), - /// The signer has completed an operation - OperationResult(OperationResult), -} - -impl From for SignerResult { - fn from(result: OperationResult) -> Self { - SignerResult::OperationResult(result) - } } impl From for SignerResult { @@ -71,31 +60,6 @@ impl From for SignerResult { } } -/// Which signer operation to perform -#[derive(PartialEq, Clone, Debug)] -pub enum SignerCommand { - /// Generate a DKG aggregate public key - Dkg, - /// Sign a message - Sign { - /// The block to sign over - block_proposal: BlockProposal, - /// Whether to make a taproot signature - is_taproot: bool, - /// Taproot merkle root - merkle_root: Option, - }, -} - -/// Which operation to perform -#[derive(PartialEq, Clone, Debug)] -pub struct RunLoopCommand { - /// Which signer operation to perform - pub command: SignerCommand, - /// The reward cycle we are performing the operation for - pub reward_cycle: u64, -} - /// The runloop state #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub enum State { @@ -213,8 +177,6 @@ where pub stacks_signers: HashMap>, /// The state of the runloop pub state: State, - /// The commands received thus far - pub commands: VecDeque, /// The current reward cycle info. Only None if the runloop is uninitialized pub current_reward_cycle_info: Option, /// Cache sortitin data from `stacks-node` @@ -230,7 +192,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo stacks_client, stacks_signers: HashMap::with_capacity(2), state: State::Uninitialized, - commands: VecDeque::new(), current_reward_cycle_info: None, sortition_state: None, } @@ -492,7 +453,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo } impl, T: StacksMessageCodec + Clone + Send + Debug> - SignerRunLoop, RunLoopCommand, T> for RunLoop + SignerRunLoop, T> for RunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.config.event_timeout = timeout; @@ -505,11 +466,10 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> fn run_one_pass( &mut self, event: Option>, - cmd: Option, res: &Sender>, ) -> Option> { debug!( - "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", + "Running one pass for the signer. state={:?}, event={event:?}", self.state ); // This is the only event that we respond to from the outer signer runloop @@ -525,9 +485,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> } } - if let Some(cmd) = cmd { - self.commands.push_back(cmd); - } if self.state == State::Uninitialized { if let Err(e) = self.initialize_runloop() { error!("Failed to initialize signer runloop: {e}."); @@ -560,12 +517,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> res, current_reward_cycle, ); - // After processing event, run the next command for each signer - signer.process_command( - &self.stacks_client, - current_reward_cycle, - self.commands.pop_front(), - ); } if self.state == State::NoRegisteredSigners && event.is_some() { let next_reward_cycle = current_reward_cycle.saturating_add(1); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 1b8a57abbb..06b9d703c3 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -36,7 +36,6 @@ use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, define_u8_enum, error}; -use wsts::net::NonceRequest; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] /// A vote across the signer set for a block @@ -67,21 +66,6 @@ impl StacksMessageCodec for NakamotoBlockVote { } } -#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] -/// Information specific to Signer V1 -pub struct BlockInfoV1 { - /// The associated packet nonce request if we have one - pub nonce_request: Option, -} - -impl From for BlockInfoV1 { - fn from(value: NonceRequest) -> Self { - Self { - nonce_request: Some(value), - } - } -} - #[derive(Serialize, Deserialize, Debug, PartialEq, Default)] /// Store extra version-specific info in `BlockInfo` pub enum ExtraBlockInfo { @@ -90,28 +74,6 @@ pub enum ExtraBlockInfo { None, /// Extra data for Signer V0 V0, - /// Extra data for Signer V1 - V1(BlockInfoV1), -} - -impl ExtraBlockInfo { - /// Take `nonce_request` if it exists - pub fn take_nonce_request(&mut self) -> Option { - match self { - ExtraBlockInfo::None | ExtraBlockInfo::V0 => None, - ExtraBlockInfo::V1(v1) => v1.nonce_request.take(), - } - } - /// Set `nonce_request` if it exists - pub fn set_nonce_request(&mut self, value: NonceRequest) -> Result<(), &str> { - match self { - ExtraBlockInfo::None | ExtraBlockInfo::V0 => Err("Field doesn't exist"), - ExtraBlockInfo::V1(v1) => { - v1.nonce_request = Some(value); - Ok(()) - } - } - } } define_u8_enum!( @@ -217,14 +179,6 @@ impl From for BlockInfo { } } impl BlockInfo { - /// Create a new BlockInfo with an associated nonce request packet - pub fn new_v1_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { - let mut block_info = BlockInfo::from(block_proposal); - block_info.ext = ExtraBlockInfo::V1(BlockInfoV1::from(nonce_request)); - block_info.signed_over = true; - block_info - } - /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { @@ -283,7 +237,10 @@ impl BlockInfo { ) } BlockState::LocallyRejected => { - matches!(prev_state, BlockState::Unprocessed) + matches!( + prev_state, + BlockState::Unprocessed | BlockState::LocallyRejected + ) } BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 7c94ec908c..d6eaa37af8 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -39,7 +39,7 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; -use crate::runloop::{RunLoopCommand, SignerResult}; +use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; @@ -259,17 +259,6 @@ impl SignerTrait for Signer { } } - fn process_command( - &mut self, - _stacks_client: &StacksClient, - _current_reward_cycle: u64, - command: Option, - ) { - if let Some(command) = command { - warn!("{self}: Received a command: {command:?}. V0 Signers do not support commands. Ignoring...") - } - } - fn has_unprocessed_blocks(&self) -> bool { self.signer_db .has_unprocessed_blocks(self.reward_cycle) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 1d267b047f..4d99b53821 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -487,7 +487,6 @@ impl NakamotoBlockBuilder { tenure_info: NakamotoTenureInfo, settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - signer_transactions: Vec, signer_bitvec_len: u16, ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { let (tip_consensus_hash, tip_block_hash, tip_height) = ( @@ -522,14 +521,13 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); - let mut initial_txs: Vec<_> = [ + let initial_txs: Vec<_> = [ tenure_info.tenure_change_tx.clone(), tenure_info.coinbase_tx.clone(), ] .into_iter() .filter_map(|x| x) .collect(); - initial_txs.extend(signer_transactions); // TODO: update this mempool check to prioritize signer vote transactions over other transactions let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a2f949a8cc..4d9d4f968e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -13,17 +13,13 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; -use clarity::vm::clarity::ClarityConnection; -use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use hashbrown::HashSet; -use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; -use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; +use clarity::vm::types::PrincipalData; +use libsigner::v0::messages::{MinerSlotID, SignerMessage}; use libsigner::StackerDBSession; use rand::{thread_rng, Rng}; use stacks::burnchains::Burnchain; @@ -32,7 +28,6 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::coordinator::OnChainRewardSetProvider; use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use stacks::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; @@ -46,7 +41,6 @@ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; use stacks::util::secp256k1::MessageSignature; -use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; @@ -331,7 +325,7 @@ impl BlockMinerThread { } } - match self.mine_block(&stackerdbs) { + match self.mine_block() { Ok(x) => { if !self.validate_timestamp(&x)? { info!("Block mined too quickly. Will try again."; @@ -591,125 +585,6 @@ impl BlockMinerThread { return Ok((reward_set, signature)); } - fn get_stackerdb_contract_and_slots( - &self, - stackerdbs: &StackerDBs, - msg_id: &MessageSlotID, - reward_cycle: u64, - ) -> Result<(QualifiedContractIdentifier, HashMap), NakamotoNodeError> { - let stackerdb_contracts = stackerdbs - .get_stackerdb_contract_ids() - .expect("FATAL: could not get the stacker DB contract ids"); - - let signers_contract_id = - msg_id.stacker_db_contract(self.config.is_mainnet(), reward_cycle); - if !stackerdb_contracts.contains(&signers_contract_id) { - return Err(NakamotoNodeError::SignerSignatureError( - "No signers contract found, cannot wait for signers".into(), - )); - }; - // Get the slots for every signer - let signers = stackerdbs - .get_signers(&signers_contract_id) - .expect("FATAL: could not get signers from stacker DB"); - let mut slot_ids_addresses = HashMap::with_capacity(signers.len()); - for (slot_id, address) in stackerdbs - .get_signers(&signers_contract_id) - .expect("FATAL: could not get signers from stacker DB") - .into_iter() - .enumerate() - { - slot_ids_addresses.insert( - u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range"), - address, - ); - } - Ok((signers_contract_id, slot_ids_addresses)) - } - - fn get_signer_transactions( - &self, - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - stackerdbs: &StackerDBs, - ) -> Result, NakamotoNodeError> { - let next_reward_cycle = self - .burnchain - .block_height_to_reward_cycle(self.burn_block.block_height) - .expect("FATAL: no reward cycle for burn block") - .wrapping_add(1); - let (signers_contract_id, slot_ids_addresses) = self.get_stackerdb_contract_and_slots( - stackerdbs, - &MessageSlotID::Transactions, - next_reward_cycle, - )?; - let slot_ids = slot_ids_addresses.keys().cloned().collect::>(); - let addresses = slot_ids_addresses.values().cloned().collect::>(); - // Get the transactions from the signers for the next block - let signer_chunks = stackerdbs - .get_latest_chunks(&signers_contract_id, &slot_ids) - .expect("FATAL: could not get latest chunks from stacker DB"); - let signer_messages: Vec<(u32, SignerMessageV1)> = slot_ids - .iter() - .zip(signer_chunks.into_iter()) - .filter_map(|(slot_id, chunk)| { - chunk.and_then(|chunk| { - read_next::(&mut &chunk[..]) - .ok() - .map(|msg| (*slot_id, msg)) - }) - }) - .collect(); - - if signer_messages.is_empty() { - return Ok(vec![]); - } - - let (consensus_hash, block_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); - let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - - // Get all nonces for the signers from clarity DB to use to validate transactions - let account_nonces = chainstate - .with_read_only_clarity_tx( - &sortdb - .index_handle_at_block(chainstate, &stacks_block_id) - .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, - &stacks_block_id, - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - addresses - .iter() - .map(|address| { - ( - address.clone(), - clarity_db - .get_account_nonce(&address.clone().into()) - .unwrap_or(0), - ) - }) - .collect::>() - }) - }, - ) - .unwrap_or_default(); - let mut filtered_transactions: HashMap = HashMap::new(); - for (_slot, signer_message) in signer_messages { - match signer_message { - SignerMessageV1::Transactions(transactions) => { - NakamotoSigners::update_filtered_transactions( - &mut filtered_transactions, - &account_nonces, - self.config.is_mainnet(), - transactions, - ) - } - _ => {} // Any other message is ignored - } - } - Ok(filtered_transactions.into_values().collect()) - } - /// Fault injection -- possibly fail to broadcast /// Return true to drop the block fn fault_injection_broadcast_fail(&self) -> bool { @@ -834,7 +709,7 @@ impl BlockMinerThread { &sort_db, &self.burn_block, &stackerdbs, - SignerMessageV0::BlockPushed(block), + SignerMessage::BlockPushed(block), MinerSlotID::BlockPushed, chain_state.mainnet, &mut miners_session, @@ -1117,7 +992,7 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. - fn mine_block(&mut self, stackerdbs: &StackerDBs) -> Result { + fn mine_block(&mut self) -> Result { debug!("block miner thread ID is {:?}", thread::current().id()); let burn_db_path = self.config.get_burn_db_file_path(); @@ -1165,9 +1040,6 @@ impl BlockMinerThread { parent_block_info.stacks_parent_header.microblock_tail = None; - let signer_transactions = - self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; - let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); // build the block itself @@ -1186,7 +1058,6 @@ impl BlockMinerThread { // we'll invoke the event dispatcher ourselves so that it calculates the // correct signer_sighash for `process_mined_nakamoto_block_event` Some(&self.event_dispatcher), - signer_transactions, signer_bitvec_len.unwrap_or(0), ) .map_err(|e| { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 91f9bc3282..4248e72145 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -302,19 +302,12 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { - panic!("Recieved an operation result."); - } - SignerResult::StatusCheck(state_info) => { - output.push(Some(state_info)); - } - } + output.push(Some(state_info)); } output } From 6f60813c91468dce9971a4a859c98f86dc31a1e0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 25 Sep 2024 12:31:14 -0500 Subject: [PATCH 1221/1400] use better sqlite column affinity --- stackslib/src/core/mempool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 61306a9764..bf2b5aff57 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -806,7 +806,7 @@ const MEMPOOL_SCHEMA_6_NONCES: &'static [&'static str] = &[ const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &'static [&'static str] = &[ r#" -- ALLOW NULL - ALTER TABLE mempool ADD COLUMN time_estimate_ms NUMBER; + ALTER TABLE mempool ADD COLUMN time_estimate_ms INTEGER; "#, r#" INSERT INTO schema_version (version) VALUES (7) From 77fc9d8aaebf86b566eb4aa2724e95674191c9e2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:32:05 -0700 Subject: [PATCH 1222/1400] Remove wsts from stackslib Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - libsigner/src/events.rs | 2 +- libsigner/src/v0/messages.rs | 4 +- stackslib/Cargo.toml | 1 - .../burn/operations/vote_for_aggregate_key.rs | 11 +- .../chainstate/nakamoto/coordinator/tests.rs | 1 - stackslib/src/chainstate/nakamoto/mod.rs | 7 +- .../src/chainstate/nakamoto/signer_set.rs | 13 +- stackslib/src/chainstate/nakamoto/tenure.rs | 5 +- .../src/chainstate/nakamoto/test_signers.rs | 177 +++--------------- .../src/chainstate/nakamoto/tests/mod.rs | 80 ++++---- .../src/chainstate/nakamoto/tests/node.rs | 2 - stackslib/src/chainstate/stacks/boot/mod.rs | 25 +-- .../src/chainstate/stacks/boot/pox_4_tests.rs | 42 +++-- stackslib/src/chainstate/stacks/mod.rs | 43 ----- .../src/chainstate/stacks/transaction.rs | 43 ----- .../nakamoto/download_state_machine.rs | 1 - stackslib/src/net/download/nakamoto/mod.rs | 1 - stackslib/src/net/download/nakamoto/tenure.rs | 1 - stackslib/src/net/mod.rs | 10 +- stackslib/src/net/tests/download/nakamoto.rs | 5 +- stackslib/src/net/tests/mod.rs | 1 - .../src/tests/nakamoto_integrations.rs | 15 +- 23 files changed, 115 insertions(+), 376 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d16284fa5c..7e0f1ab61d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3523,7 +3523,6 @@ dependencies = [ "time 0.2.27", "url", "winapi 0.3.9", - "wsts", ] [[package]] diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 4fb6d7a507..70e7853d65 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -25,7 +25,7 @@ use std::time::SystemTime; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; -use blockstack_lib::chainstate::stacks::{StacksTransaction, ThresholdSignature}; +use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7209398c1c..47d317992d 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -887,8 +887,8 @@ impl From for SignerMessage { mod test { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::chainstate::stacks::{ - ThresholdSignature, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionVersion, }; use blockstack_lib::util_lib::strings::StacksString; use clarity::consts::CHAIN_ID_MAINNET; diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index d04fc3b1af..edd58c6161 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -56,7 +56,6 @@ stacks-common = { path = "../stacks-common" } pox-locking = { path = "../pox-locking" } libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" -wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } diff --git a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs index 3933eacaa6..648859abc6 100644 --- a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs +++ b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs @@ -21,7 +21,6 @@ use stacks_common::deps_common::bitcoin::blockdata::script::Builder; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::secp256k1::Secp256k1PublicKey; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::bitcoin::bits::parse_script; use crate::burnchains::bitcoin::{BitcoinTxInput, BitcoinTxInputStructured}; @@ -175,26 +174,18 @@ impl VoteForAggregateKeyOp { /// Check the payload of a vote-for-aggregate-key burn op. /// Both `signer_key` and `aggregate_key` are checked for validity against - /// `Secp256k1PublicKey` from `stacks_common` as well as `Point` from wsts. + /// `Secp256k1PublicKey` from `stacks_common` pub fn check(&self) -> Result<(), op_error> { // Check to see if the aggregate key is valid let aggregate_key_bytes = self.aggregate_key.as_bytes(); Secp256k1PublicKey::from_slice(aggregate_key_bytes) .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - let compressed = Compressed::try_from(aggregate_key_bytes.clone()) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Point::try_from(&compressed).map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - // Check to see if the signer key is valid let signer_key_bytes = self.signer_key.as_bytes(); Secp256k1PublicKey::from_slice(signer_key_bytes) .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - let compressed = Compressed::try_from(signer_key_bytes.clone()) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Point::try_from(&compressed).map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Ok(()) } } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index cf016adb7d..0a59c1a67b 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -34,7 +34,6 @@ use stacks_common::types::{Address, StacksEpoch, StacksEpochId, StacksPublicKeyB use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e97fefafff..6a850e6d35 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -53,7 +53,6 @@ use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; -use wsts::curve::point::Point; use self::signer_set::SignerCalculation; use super::burn::db::sortdb::{ @@ -74,7 +73,7 @@ use super::stacks::db::{ use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeError, TenureChangePayload, ThresholdSignature, TransactionPayload, + TenureChangeError, TenureChangePayload, TransactionPayload, }; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -4499,8 +4498,8 @@ impl NakamotoChainState { /// Boot code instantiation for the aggregate public key. /// TODO: This should be removed once it's possible for stackers to vote on the aggregate /// public key - pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: &Point) { - let agg_pub_key = to_hex(&apk.compress().data); + pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: Vec) { + let agg_pub_key = to_hex(&apk); let contract_content = format!( "(define-read-only ({}) 0x{})", BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index d7eaad51b5..38e76f7e51 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -47,7 +47,6 @@ use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{ @@ -73,8 +72,8 @@ use crate::chainstate::stacks::db::{ use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeCause, TenureChangeError, TenureChangePayload, ThresholdSignature, - TransactionPayload, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, + TenureChangeCause, TenureChangeError, TenureChangePayload, TransactionPayload, + MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, }; use crate::clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use crate::clarity_vm::clarity::{ @@ -101,7 +100,7 @@ pub struct SignerCalculation { pub struct AggregateKeyVoteParams { pub signer_index: u64, - pub aggregate_key: Point, + pub aggregate_key: Vec, pub voting_round: u64, pub reward_cycle: u64, } @@ -547,10 +546,8 @@ impl NakamotoSigners { } let signer_index_value = payload.function_args.first()?; let signer_index = u64::try_from(signer_index_value.clone().expect_u128().ok()?).ok()?; - let point_value = payload.function_args.get(1)?; - let point_bytes = point_value.clone().expect_buff(33).ok()?; - let compressed_data = Compressed::try_from(point_bytes.as_slice()).ok()?; - let aggregate_key = Point::try_from(&compressed_data).ok()?; + let aggregate_key_value = payload.function_args.get(1)?; + let aggregate_key = aggregate_key_value.clone().expect_buff(33).ok()?; let round_value = payload.function_args.get(2)?; let voting_round = u64::try_from(round_value.clone().expect_u128().ok()?).ok()?; let reward_cycle = diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 059da96b7a..4b7734653c 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -87,7 +87,6 @@ use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; -use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{ @@ -108,8 +107,8 @@ use crate::chainstate::stacks::db::{ use crate::chainstate::stacks::events::StacksTransactionReceipt; use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeCause, TenureChangeError, TenureChangePayload, ThresholdSignature, - TransactionPayload, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, + TenureChangeCause, TenureChangeError, TenureChangePayload, TransactionPayload, + MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, }; use crate::clarity_vm::clarity::{ClarityInstance, PreCommitClarityBlock}; use crate::clarity_vm::database::SortitionDBRef; diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 4ab7613751..6fd559da69 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -25,8 +25,9 @@ use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; use hashbrown::HashMap; +use rand::distributions::Standard; use rand::seq::SliceRandom; -use rand::{CryptoRng, RngCore, SeedableRng}; +use rand::{CryptoRng, Rng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; @@ -36,8 +37,6 @@ use stacks_common::types::chainstate::{ use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; -use wsts::curve::point::Point; -use wsts::traits::Aggregator; use self::boot::RewardSet; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; @@ -68,75 +67,32 @@ use crate::util_lib::db::Error as db_error; #[derive(Debug, Clone, PartialEq)] pub struct TestSigners { - /// The parties that will sign the blocks - pub signer_parties: Vec, - /// The commitments to the polynomials for the aggregate public key - pub poly_commitments: HashMap, - /// The aggregate public key - pub aggregate_public_key: Point, - /// The total number of key ids distributed among signer_parties - pub num_keys: u32, - /// The number of vote shares required to sign a block + /// The number of signatures required to validate a block pub threshold: u32, - /// The key ids distributed among signer_parties - pub party_key_ids: Vec>, - /// The cycle for which the signers are valid - pub cycle: u64, /// The signer's private keys pub signer_keys: Vec, + /// The aggregate public key + pub aggregate_public_key: Vec, + /// The cycle for which the aggregate public key was generated + pub cycle: u64, } impl Default for TestSigners { fn default() -> Self { - let mut rng = rand_core::OsRng::default(); - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signer_parties: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + let num_signers = 5; + let threshold = 5 * 7 / 10; let mut signer_keys = Vec::::new(); - for _ in 0..num_keys { + for _ in 0..num_signers { signer_keys.push(Secp256k1PrivateKey::default()); } - - // Generate an aggregate public key - let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); - sig_aggregator - .init(&poly_commitments) - .expect("aggregator init failed"); - let aggregate_public_key = sig_aggregator.poly[0]; Self { - signer_parties, - aggregate_public_key, - poly_commitments, - num_keys, threshold, - party_key_ids, - cycle: 0, signer_keys, + aggregate_public_key, + cycle: 0, } } } @@ -149,50 +105,15 @@ impl TestSigners { /// Internal function to generate aggregate key information fn default_with_signers(signer_keys: Vec) -> Self { - let mut rng = rand_core::OsRng::default(); - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signer_parties: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); - - // Generate an aggregate public key - let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); - sig_aggregator - .init(&poly_commitments) - .expect("aggregator init failed"); - let aggregate_public_key = sig_aggregator.poly[0]; + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + let num_signers = signer_keys.len(); + let threshold = u32::try_from(num_signers * 7 / 10).unwrap(); Self { - signer_parties, - aggregate_public_key, - poly_commitments, - num_keys, threshold, - party_key_ids, - cycle: 0, signer_keys, + aggregate_public_key, + cycle: 0, } } @@ -278,25 +199,6 @@ impl TestSigners { keys.iter().map(|key| key.sign(&msg).unwrap()).collect() } - /// Sign a Nakamoto block using the aggregate key. - /// NB: this function is current unused. - #[allow(dead_code)] - fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { - let mut rng = rand_core::OsRng::default(); - let msg = block.header.signer_signature_hash().0; - let (nonces, sig_shares, key_ids) = - wsts::v2::test_helpers::sign(msg.as_slice(), &mut self.signer_parties, &mut rng); - - let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); - sig_aggregator - .init(&self.poly_commitments) - .expect("aggregator init failed"); - let signature = sig_aggregator - .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) - .expect("aggregator sig failed"); - ThresholdSignature(signature) - } - /// Generate an list of signatures for a block. Only /// signers in the reward set will be included. pub fn generate_ordered_signatures( @@ -353,45 +255,16 @@ impl TestSigners { } // Generate and assign a new aggregate public key - pub fn generate_aggregate_key(&mut self, cycle: u64) -> Point { + pub fn generate_aggregate_key(&mut self, cycle: u64) -> Vec { // If the key is already generated for this cycle, return it if cycle == self.cycle { debug!("Returning cached aggregate key for cycle {}", cycle); return self.aggregate_public_key.clone(); } - debug!("Generating aggregate key for cycle {}", cycle); - let mut rng = ChaCha20Rng::seed_from_u64(cycle); - let num_parties = self.party_key_ids.len().try_into().unwrap(); - // Create the parties - self.signer_parties = self - .party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - self.num_keys, - self.threshold, - &mut rng, - ) - }) - .collect(); - self.poly_commitments = - match wsts::v2::test_helpers::dkg(&mut self.signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); - sig_aggregator - .init(&self.poly_commitments) - .expect("aggregator init failed"); - self.aggregate_public_key = sig_aggregator.poly[0]; - self.cycle = cycle; - self.aggregate_public_key.clone() + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + self.aggregate_public_key = aggregate_public_key.clone(); + aggregate_public_key } } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 722cfa541a..ea163730ec 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -25,7 +25,8 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; use clarity::vm::Value; use libstackerdb::StackerDBChunkData; -use rand::{thread_rng, RngCore}; +use rand::distributions::Standard; +use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::ToSql; use rusqlite::{params, Connection}; use stacks_common::address::AddressHashMode; @@ -45,8 +46,6 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use stdext::prelude::Integer; use stx_genesis::GenesisData; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; use crate::burnchains::{BurnchainSigner, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::tests::make_fork_run; @@ -83,9 +82,9 @@ use crate::chainstate::stacks::db::{ }; use crate::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksTransaction, - StacksTransactionSigner, TenureChangeCause, TenureChangePayload, ThresholdSignature, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionContractCall, - TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, + TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; use crate::core; use crate::core::{StacksEpochExtension, STACKS_EPOCH_3_0_MARKER}; @@ -2170,9 +2169,8 @@ fn parse_vote_for_aggregate_public_key_valid() { let signer_index = thread_rng().next_u64(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key.clone()).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2181,7 +2179,7 @@ fn parse_vote_for_aggregate_public_key_valid() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2201,7 +2199,7 @@ fn parse_vote_for_aggregate_public_key_valid() { }; let params = NakamotoSigners::parse_vote_for_aggregate_public_key(&valid_tx).unwrap(); assert_eq!(params.signer_index, signer_index); - assert_eq!(params.aggregate_key, point); + assert_eq!(params.aggregate_key, aggregate_key); assert_eq!(params.voting_round, round); assert_eq!(params.reward_cycle, reward_cycle); } @@ -2217,10 +2215,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2229,7 +2225,7 @@ fn parse_vote_for_aggregate_public_key_invalid() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2297,8 +2293,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { contract_name: contract_name.clone(), function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ], @@ -2340,8 +2336,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), reward_cycle_arg.clone(), ], }), @@ -2361,9 +2357,9 @@ fn parse_vote_for_aggregate_public_key_invalid() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), ], }), }; @@ -2403,9 +2399,8 @@ fn valid_vote_transaction() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2414,7 +2409,7 @@ fn valid_vote_transaction() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2454,9 +2449,8 @@ fn valid_vote_transaction_malformed_transactions() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2465,7 +2459,7 @@ fn valid_vote_transaction_malformed_transactions() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2566,8 +2560,8 @@ fn valid_vote_transaction_malformed_transactions() { contract_name: contract_name.clone(), function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ], @@ -2609,8 +2603,8 @@ fn valid_vote_transaction_malformed_transactions() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), reward_cycle_arg.clone(), ], }), @@ -2630,9 +2624,9 @@ fn valid_vote_transaction_malformed_transactions() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), ], }), }; @@ -2689,9 +2683,8 @@ fn filter_one_transaction_per_signer_multiple_addresses() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2700,7 +2693,7 @@ fn filter_one_transaction_per_signer_multiple_addresses() { let function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2818,9 +2811,8 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2829,7 +2821,7 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { let function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index d23d608ec7..e7d6fef03f 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -35,8 +35,6 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; -use wsts::curve::point::Point; -use wsts::traits::Aggregator; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::tests::*; diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 88ecc8887e..8562449dd3 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -44,8 +44,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, StacksAddress, StacksBlockId, StacksPublicKey, }; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; use crate::burnchains::bitcoin::address::BitcoinAddress; use crate::burnchains::{Address, Burnchain, PoxConstants}; @@ -1350,7 +1348,7 @@ impl StacksChainState { sortdb: &SortitionDB, block_id: &StacksBlockId, reward_cycle: u64, - ) -> Result, Error> { + ) -> Result>, Error> { let aggregate_public_key_opt = self .eval_boot_code_read_only( sortdb, @@ -1367,11 +1365,7 @@ impl StacksChainState { let aggregate_public_key = match aggregate_public_key_opt { Some(value) => { // A point should have 33 bytes exactly. - let data = value.expect_buff(33)?; - let msg = - "Pox-4 signers-voting get-approved-aggregate-key returned a corrupted value."; - let compressed_data = Compressed::try_from(data.as_slice()).expect(msg); - Some(Point::try_from(&compressed_data).expect(msg)) + Some(value.expect_buff(33)?) } None => None, }; @@ -2038,13 +2032,12 @@ pub mod test { key: &StacksPrivateKey, nonce: u64, signer_index: u128, - aggregate_public_key: &Point, + aggregate_public_key: Vec, round: u128, cycle: u128, ) -> StacksTransaction { - let aggregate_public_key_val = - Value::buff_from(aggregate_public_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key_val = Value::buff_from(aggregate_public_key) + .expect("Failed to serialize aggregate public key"); make_signers_vote_for_aggregate_public_key_value( key, nonce, @@ -2085,7 +2078,7 @@ pub mod test { peer: &mut TestPeer<'_>, latest_block_id: StacksBlockId, reward_cycle: u128, - ) -> Option { + ) -> Option> { let key_opt = readonly_call( peer, &latest_block_id, @@ -2095,11 +2088,7 @@ pub mod test { ) .expect_optional() .unwrap(); - key_opt.map(|key_value| { - let data = key_value.expect_buff(33).unwrap(); - let compressed_data = Compressed::try_from(data.as_slice()).unwrap(); - Point::try_from(&compressed_data).unwrap() - }) + key_opt.map(|key_value| key_value.expect_buff(33).unwrap()) } pub fn make_pox_2_increase( diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 8fee5bd5b3..d1cceae7cf 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -44,7 +44,6 @@ use stacks_common::types::{Address, PrivateKey}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stdext::num::integer::Integer; -use wsts::curve::point::{Compressed, Point}; use super::test::*; use super::RawRewardSetEntry; @@ -7154,7 +7153,7 @@ fn test_scenario_one(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7164,7 +7163,7 @@ fn test_scenario_one(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7185,7 +7184,7 @@ fn test_scenario_one(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -7542,7 +7541,7 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7552,7 +7551,7 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7562,7 +7561,7 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 3, next_reward_cycle, ); @@ -7572,7 +7571,7 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -8289,7 +8288,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8299,7 +8298,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8309,7 +8308,7 @@ fn test_scenario_four(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8334,7 +8333,7 @@ fn test_scenario_four(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8388,7 +8387,10 @@ fn test_scenario_four(use_nakamoto: bool) { let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) .expect("No approved key found"); - assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + assert_eq!( + approved_key, + peer_config.aggregate_public_key.clone().unwrap() + ); // Alice stack-extend err tx let alice_extend_err = make_pox_4_extend( @@ -8422,7 +8424,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, 7, ); @@ -9714,7 +9716,7 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9722,7 +9724,7 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9730,7 +9732,7 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9922,7 +9924,7 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9930,7 +9932,7 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9938,7 +9940,7 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 35c82f9b94..127751abbb 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -734,49 +734,6 @@ pub enum TenureChangeError { NotNakamoto, } -/// Schnorr threshold signature using types from `wsts` -#[derive(Debug, Clone, PartialEq)] -pub struct ThresholdSignature(pub wsts::common::Signature); -impl FromSql for ThresholdSignature { - fn column_result(value: ValueRef) -> FromSqlResult { - let hex_str = value.as_str()?; - let bytes = hex_bytes(&hex_str).map_err(|_| FromSqlError::InvalidType)?; - let ts = ThresholdSignature::consensus_deserialize(&mut &bytes[..]) - .map_err(|_| FromSqlError::InvalidType)?; - Ok(ts) - } -} - -impl fmt::Display for ThresholdSignature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - to_hex(&self.serialize_to_vec()).fmt(f) - } -} - -impl ToSql for ThresholdSignature { - fn to_sql(&self) -> rusqlite::Result { - let bytes = self.serialize_to_vec(); - let hex_str = to_hex(&bytes); - Ok(hex_str.into()) - } -} - -impl serde::Serialize for ThresholdSignature { - fn serialize(&self, s: S) -> Result { - let bytes = self.serialize_to_vec(); - s.serialize_str(&to_hex(&bytes)) - } -} - -impl<'de> serde::Deserialize<'de> for ThresholdSignature { - fn deserialize>(d: D) -> Result { - let hex_str = String::deserialize(d)?; - let bytes = hex_bytes(&hex_str).map_err(serde::de::Error::custom)?; - ThresholdSignature::consensus_deserialize(&mut bytes.as_slice()) - .map_err(serde::de::Error::custom) - } -} - /// A transaction from Stackers to signal new mining tenure #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TenureChangePayload { diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 2204f57a25..c45b212b68 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -28,9 +28,6 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; -use wsts::common::Signature as Secp256k1Signature; -use wsts::curve::point::{Compressed as Secp256k1Compressed, Point as Secp256k1Point}; -use wsts::curve::scalar::Scalar as Secp256k1Scalar; use crate::burnchains::Txid; use crate::chainstate::stacks::{TransactionPayloadID, *}; @@ -154,46 +151,6 @@ impl StacksMessageCodec for TenureChangeCause { } } -impl StacksMessageCodec for ThresholdSignature { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - let compressed = self.0.R.compress(); - let bytes = compressed.as_bytes(); - fd.write_all(bytes).map_err(CodecError::WriteError)?; - write_next(fd, &self.0.z.to_bytes())?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - // Read curve point - let mut buf = [0u8; 33]; - fd.read_exact(&mut buf).map_err(CodecError::ReadError)?; - let R = Secp256k1Point::try_from(&Secp256k1Compressed::from(buf)) - .map_err(|_| CodecError::DeserializeError("Failed to read curve point".into()))?; - - // Read scalar - let mut buf = [0u8; 32]; - fd.read_exact(&mut buf).map_err(CodecError::ReadError)?; - let z = Secp256k1Scalar::from(buf); - - Ok(Self(Secp256k1Signature { R, z })) - } -} - -impl ThresholdSignature { - pub fn verify(&self, public_key: &Secp256k1Point, msg: &[u8]) -> bool { - self.0.verify(public_key, msg) - } - - /// Create an empty/null signature. This is not valid data, but it is used - /// as a placeholder in the header during mining. - pub fn empty() -> Self { - Self(Secp256k1Signature { - R: Secp256k1Point::G(), - z: Secp256k1Scalar::new(), - }) - } -} - impl StacksMessageCodec for TenureChangePayload { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.tenure_consensus_hash)?; diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index a2f4fe5dc5..132a03f34d 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index 5f03c3811a..eb43d8aecd 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -127,7 +127,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 80065dc0c6..98f102969a 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 54661a2f09..628243d53e 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1749,7 +1749,6 @@ pub mod test { use stacks_common::util::secp256k1::*; use stacks_common::util::uint::*; use stacks_common::util::vrf::*; - use wsts::curve::point::Point; use {mio, rand}; use self::nakamoto::test_signers::TestSigners; @@ -2099,7 +2098,7 @@ pub mod test { pub services: u16, /// aggregate public key to use /// (NOTE: will be used post-Nakamoto) - pub aggregate_public_key: Option, + pub aggregate_public_key: Option>, pub test_stackers: Option>, pub test_signers: Option, } @@ -2457,11 +2456,8 @@ pub mod test { let mut receipts = vec![]; if let Some(agg_pub_key) = agg_pub_key_opt { - debug!( - "Setting aggregate public key to {}", - &to_hex(&agg_pub_key.compress().data) - ); - NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, &agg_pub_key); + debug!("Setting aggregate public key to {}", &to_hex(&agg_pub_key)); + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, agg_pub_key); } else { debug!("Not setting aggregate public key"); } diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index a6307b324b..cc90d90011 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -36,9 +36,8 @@ use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, ThresholdSignature, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionVersion, + CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::net::api::gettenureinfo::RPCGetTenureInfo; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index a74cb0fd2c..07227c930e 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -35,7 +35,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::{Address, StacksEpochId}; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c975bfebf9..3c238153ac 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -871,9 +871,8 @@ pub fn boot_to_epoch_3( if let Some(signers) = self_signing { // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); // Vote on the aggregate public key @@ -1026,9 +1025,8 @@ pub fn boot_to_pre_epoch_3_boundary( if let Some(signers) = self_signing { // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); // Vote on the aggregate public key @@ -1183,9 +1181,8 @@ fn signer_vote_if_needed( // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); for (i, signer_sk) in signer_sks.iter().enumerate() { let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce; From f909a4559918fe568bf3299d484fa1114a331971 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:36:03 -0700 Subject: [PATCH 1223/1400] Remove wsts from libsigner Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 - libsigner/Cargo.toml | 1 - libsigner/src/events.rs | 6 ------ libsigner/src/tests/mod.rs | 1 - 4 files changed, 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e0f1ab61d..fdac038acd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1926,7 +1926,6 @@ dependencies = [ "stackslib", "thiserror", "tiny_http", - "wsts", ] [[package]] diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 7da9801674..63241d3256 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -32,7 +32,6 @@ stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib"} thiserror = "1.0" tiny_http = "0.12" -wsts = { workspace = true } [dev-dependencies] mutants = "0.0.3" diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 70e7853d65..1de0e34f09 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -48,12 +48,6 @@ use stacks_common::util::HexError; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, }; -use wsts::common::Signature; -use wsts::net::{ - DkgBegin, DkgEnd, DkgEndBegin, DkgPrivateBegin, DkgPrivateShares, DkgPublicShares, DkgStatus, - Message, NonceRequest, NonceResponse, Packet, SignatureShareRequest, SignatureShareResponse, -}; -use wsts::state_machine::signer; use crate::http::{decode_http_body, decode_http_request}; use crate::EventError; diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index f0361592ba..8ef6d38eee 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -40,7 +40,6 @@ use stacks_common::codec::{ }; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; -use wsts::net::{DkgBegin, Packet}; use crate::events::{SignerEvent, SignerEventTrait}; use crate::v0::messages::{BlockRejection, SignerMessage}; From 27034e3d2ff6bf805a5ea042a7035cc0dd1865eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:38:24 -0700 Subject: [PATCH 1224/1400] Remove wsts from stacks-common and stacks-core Signed-off-by: Jacinta Ferrant --- Cargo.lock | 470 +--------------------------- Cargo.toml | 1 - stacks-common/Cargo.toml | 1 - stacks-common/src/util/secp256k1.rs | 82 ----- 4 files changed, 14 insertions(+), 540 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fdac038acd..850de29902 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -32,16 +32,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array 0.14.7", -] - [[package]] name = "aes" version = "0.6.0" @@ -50,18 +40,7 @@ checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" dependencies = [ "aes-soft", "aesni", - "cipher 0.2.5", -] - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.4.4", - "cpufeatures", + "cipher", ] [[package]] @@ -70,25 +49,11 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" dependencies = [ - "aead 0.3.2", - "aes 0.6.0", - "cipher 0.2.5", - "ctr 0.6.0", - "ghash 0.3.1", - "subtle", -] - -[[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead 0.5.2", - "aes 0.8.4", - "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "aead", + "aes", + "cipher", + "ctr", + "ghash", "subtle", ] @@ -98,7 +63,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" dependencies = [ - "cipher 0.2.5", + "cipher", "opaque-debug", ] @@ -108,7 +73,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" dependencies = [ - "cipher 0.2.5", + "cipher", "opaque-debug", ] @@ -208,12 +173,6 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" -[[package]] -name = "arrayvec" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" - [[package]] name = "ascii" version = "1.1.0" @@ -504,18 +463,6 @@ version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - [[package]] name = "block-buffer" version = "0.9.0" @@ -550,33 +497,12 @@ dependencies = [ "tracing", ] -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" - -[[package]] -name = "bs58" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" -dependencies = [ - "tinyvec", -] - [[package]] name = "bumpalo" version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - [[package]] name = "byteorder" version = "1.5.0" @@ -645,16 +571,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - [[package]] name = "clap" version = "2.34.0" @@ -763,7 +679,7 @@ version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" dependencies = [ - "aes-gcm 0.8.0", + "aes-gcm", "base64 0.13.1", "hkdf", "hmac", @@ -875,12 +791,6 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - [[package]] name = "crypto-common" version = "0.1.6" @@ -888,7 +798,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", "typenum", ] @@ -929,16 +838,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" dependencies = [ - "cipher 0.2.5", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1213,18 +1113,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - [[package]] name = "fnv" version = "1.0.7" @@ -1256,12 +1144,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - [[package]] name = "futures" version = "0.3.30" @@ -1433,17 +1315,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" dependencies = [ "opaque-debug", - "polyval 0.4.5", -] - -[[package]] -name = "ghash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" -dependencies = [ - "opaque-debug", - "polyval 0.6.1", + "polyval", ] [[package]] @@ -1497,7 +1369,6 @@ checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", - "serde", ] [[package]] @@ -1554,12 +1425,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - [[package]] name = "hkdf" version = "0.10.0" @@ -1580,15 +1445,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "http" version = "0.2.11" @@ -1716,26 +1572,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "impl-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "indexmap" version = "2.2.3" @@ -1752,15 +1588,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "instant" version = "0.1.12" @@ -2189,54 +2016,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "p256k1" -version = "7.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40a031a559eb38c35a14096f21c366254501a06d41c4b327d2a7515d713a5b7" -dependencies = [ - "bitvec", - "bs58 0.4.0", - "cc", - "hex", - "itertools", - "num-traits", - "primitive-types", - "proc-macro2", - "quote", - "rand_core 0.6.4", - "rustfmt-wrapper", - "serde", - "sha2 0.10.8", - "syn 2.0.48", -] - -[[package]] -name = "parity-scale-codec" -version = "3.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "parking" version = "2.2.0" @@ -2396,7 +2175,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27abb6e4638dcecc65a92b50d7f1d87dd6dea987ba71db987b6bf881f4877e9d" dependencies = [ "num-traits", - "serde", ] [[package]] @@ -2407,19 +2185,7 @@ checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" dependencies = [ "cpuid-bool", "opaque-debug", - "universal-hash 0.4.0", -] - -[[package]] -name = "polyval" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "opaque-debug", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -2444,26 +2210,6 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" -[[package]] -name = "primitive-types" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" -dependencies = [ - "toml_edit 0.20.7", -] - [[package]] name = "proc-macro-error" version = "1.0.4" @@ -2541,12 +2287,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - [[package]] name = "rand" version = "0.7.3" @@ -2886,12 +2626,6 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - [[package]] name = "rustc_version" version = "0.2.3" @@ -2919,19 +2653,6 @@ dependencies = [ "semver 1.0.21", ] -[[package]] -name = "rustfmt-wrapper" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1adc9dfed5cc999077978cc7163b9282c5751c8d39827c4ea8c8c220ca5a440" -dependencies = [ - "serde", - "tempfile", - "thiserror", - "toml 0.8.10", - "toolchain_find", -] - [[package]] name = "rustix" version = "0.37.27" @@ -3137,15 +2858,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "serde_spanned" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" -dependencies = [ - "serde", -] - [[package]] name = "serde_stacker" version = "0.1.11" @@ -3394,7 +3106,6 @@ dependencies = [ "slog-term", "time 0.2.27", "winapi 0.3.9", - "wsts", ] [[package]] @@ -3431,7 +3142,7 @@ dependencies = [ "tikv-jemallocator", "tiny_http", "tokio", - "toml 0.5.11", + "toml", "tracing", "tracing-subscriber", "url", @@ -3468,7 +3179,7 @@ dependencies = [ "stackslib", "thiserror", "tiny_http", - "toml 0.5.11", + "toml", "tracing", "tracing-subscriber", "url", @@ -3533,12 +3244,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "stdext" version = "0.3.2" @@ -3663,24 +3368,6 @@ dependencies = [ "libc", ] -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" -dependencies = [ - "cfg-if 1.0.0", - "fastrand 2.0.1", - "rustix 0.38.31", - "windows-sys 0.52.0", -] - [[package]] name = "term" version = "0.7.0" @@ -3931,64 +3618,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.22.5", -] - -[[package]] -name = "toml_datetime" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e68c159e8f5ba8a28c4eb7b0c0c190d77bb479047ca713270048145a9ad28a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.6.1", -] - -[[package]] -name = "toolchain_find" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc8c9a7f0a2966e1acdaf0461023d0b01471eeead645370cf4c3f5cff153f2a" -dependencies = [ - "home", - "once_cell", - "regex", - "semver 1.0.21", - "walkdir", -] - [[package]] name = "tower-service" version = "0.3.2" @@ -4094,18 +3723,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - [[package]] name = "unicase" version = "2.7.0" @@ -4152,16 +3769,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - [[package]] name = "untrusted" version = "0.7.1" @@ -4556,24 +4163,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" -[[package]] -name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d90f4e0f530c4c69f62b80d839e9ef3855edc9cba471a160c4d692deed62b401" -dependencies = [ - "memchr", -] - [[package]] name = "winreg" version = "0.50.0" @@ -4594,37 +4183,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "wsts" -version = "9.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c80d57a61294350ed91e91eb20a6c34da084ec8f15d039bab79ce3efabbd1a4" -dependencies = [ - "aes-gcm 0.10.3", - "bs58 0.5.0", - "hashbrown", - "hex", - "num-traits", - "p256k1", - "polynomial", - "primitive-types", - "rand_core 0.6.4", - "serde", - "sha2 0.10.8", - "thiserror", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - [[package]] name = "zerocopy" version = "0.7.32" diff --git a/Cargo.toml b/Cargo.toml index 8ac168f1f7..2114e23dfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,6 @@ rand_core = "0.6" rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" -wsts = { version = "9.0.0", default-features = false } rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } # Use a bit more than default optimization for diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 75692d83c6..81b4326d4c 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -31,7 +31,6 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" libc = "0.2.82" -wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true, optional = true } diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 034a5a4941..c3b80acac5 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -26,9 +26,6 @@ use secp256k1::{ use serde::de::{Deserialize, Error as de_Error}; use serde::ser::Error as ser_Error; use serde::Serialize; -use wsts::common::Signature as WSTSSignature; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; use super::hash::Sha256Sum; use crate::impl_byte_array_message_codec; @@ -713,83 +710,4 @@ mod tests { runtime_verify - runtime_recover ); } - - /* - #[test] - fn test_schnorr_signature_serde() { - use wsts::traits::Aggregator; - - // Test that an empty conversion fails. - let empty_signature = SchnorrSignature::default(); - assert!(empty_signature.to_wsts_signature().is_none()); - - // Generate a random Signature and ensure it successfully converts - let mut rng = rand_core::OsRng::default(); - let msg = - "You Idiots! These Are Not Them! You\'ve Captured Their Stunt Doubles!".as_bytes(); - - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![0, 1, 2], vec![3, 4], vec![5, 6, 7], vec![8, 9]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signers: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); - - // Generate an aggregate public key - let comms = match wsts::v2::test_helpers::dkg(&mut signers, &mut rng) { - Ok(comms) => comms, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let aggregate_public_key = comms - .iter() - .fold(Point::default(), |s, comm| s + comm.poly[0]); - - // signers [0,1,3] have "threshold" keys - { - let mut signers = [signers[0].clone(), signers[1].clone(), signers[3].clone()].to_vec(); - let mut sig_agg = wsts::v2::Aggregator::new(num_keys, threshold); - - sig_agg.init(comms.clone()).expect("aggregator init failed"); - - let (nonces, sig_shares, key_ids) = - wsts::v2::test_helpers::sign(msg, &mut signers, &mut rng); - let original_signature = sig_agg - .sign(msg, &nonces, &sig_shares, &key_ids) - .expect("aggregator sig failed"); - // Serialize the signature and verify the results - let schnorr_signature = SchnorrSignature::from(&original_signature); - assert_eq!( - schnorr_signature[..33], - original_signature.R.compress().data[..] - ); - assert_eq!(schnorr_signature[33..], original_signature.z.to_bytes()); - - // Deserialize the signature and verify the results - let reverted_signature = schnorr_signature - .to_wsts_signature() - .expect("Failed to convert schnorr signature to wsts signature"); - assert_eq!(reverted_signature.R, original_signature.R); - assert_eq!(reverted_signature.z, original_signature.z); - assert!(original_signature.verify(&aggregate_public_key, msg)); - assert!(reverted_signature.verify(&aggregate_public_key, msg)); - } - } - */ } From 40043eb11366a52086f39e5ca9c5bf462c44574b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:39:30 -0700 Subject: [PATCH 1225/1400] Fix comment on SignCoordinator in stacks-node Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index ee01298422..f570009be5 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -54,11 +54,8 @@ pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mute /// waking up to check timeouts? static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); -/// The `SignCoordinator` struct represents a WSTS FIRE coordinator whose -/// sole function is to serve as the coordinator for Nakamoto block signing. -/// This coordinator does not operate as a DKG coordinator. Rather, this struct -/// is used by Nakamoto miners to act as the coordinator for the blocks they -/// produce. +/// The `SignCoordinator` struct sole function is to serve as the coordinator for Nakamoto block signing. +/// This struct is used by Nakamoto miners to act as the coordinator for the blocks they produce. pub struct SignCoordinator { receiver: Option>, message_key: StacksPrivateKey, From 1f53fac8051d3ec77ec065dedb930d2cef427c11 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 11:57:44 -0700 Subject: [PATCH 1226/1400] Do not blanket set pox_sync_sample_secs to a postiive integer Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 3 ++- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a567507235..d144fbf17d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -585,7 +585,7 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); conf.burnchain.poll_time_secs = 1; - conf.node.pox_sync_sample_secs = 5; + conf.node.pox_sync_sample_secs = 0; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -2159,6 +2159,7 @@ fn multiple_miners() { let node_2_p2p = 51025; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 1; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b70d9c39dc..e7985bc257 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3347,6 +3347,7 @@ fn multiple_miners_with_nakamoto_blocks() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 1; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); From 81a09c2b5be565e7e369200a461534e2e5104640 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 25 Sep 2024 12:22:11 -0700 Subject: [PATCH 1227/1400] Add serde to hashrbown Signed-off-by: Jacinta Ferrant --- Cargo.lock | 1 + Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 850de29902..dc27c931cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1369,6 +1369,7 @@ checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", "allocator-api2", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 2114e23dfc..10dc427e2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ members = [ # Dependencies we want to keep the same between workspace members [workspace.dependencies] ed25519-dalek = { version = "2.1.1", features = ["serde", "rand_core"] } -hashbrown = "0.14.3" +hashbrown = { version = "0.14.3", features = ["serde"] } rand_core = "0.6" rand = "0.8" rand_chacha = "0.3.1" From 677accded78509d0c72675ac13d203cf38ed54f1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 26 Sep 2024 08:15:55 -0500 Subject: [PATCH 1228/1400] more test fixes --- .../src/tests/nakamoto_integrations.rs | 56 +++++++++++++++++-- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3261490247..6b072a1f4d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1188,6 +1188,10 @@ fn signer_vote_if_needed( btc_regtest_controller.get_burnchain().first_block_height, reward_cycle, ); + let epochs = btc_regtest_controller.get_stacks_epochs(); + let is_naka_epoch = epochs[StacksEpoch::find_epoch(&epochs, block_height).unwrap()] + .epoch_id + .uses_nakamoto_blocks(); if block_height >= prepare_phase_start { // If the key is already set, do nothing. @@ -1210,6 +1214,7 @@ fn signer_vote_if_needed( clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) .expect("Failed to serialize aggregate public key"); + let mut expected_nonces = vec![]; for (i, signer_sk) in signer_sks.iter().enumerate() { let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce; @@ -1228,8 +1233,19 @@ fn signer_vote_if_needed( clarity::vm::Value::UInt(reward_cycle as u128 + 1), ], ); + expected_nonces.push((to_addr(signer_sk), signer_nonce + 1)); submit_tx(&http_origin, &voting_tx); } + + if is_naka_epoch { + wait_for(30, || { + let all_bumped = expected_nonces.iter().all(|(addr, expected_nonce)| { + get_account(&http_origin, addr).nonce >= *expected_nonce + }); + Ok(all_bumped) + }) + .expect("Timed out waiting for an interim nakamoto block to process our transactions"); + } } } @@ -1465,7 +1481,7 @@ fn simple_neon_integration() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); @@ -1601,6 +1617,19 @@ fn simple_neon_integration() { ) .unwrap(); + wait_for(30, || { + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + .is_some() + }); + Ok(transfer_tx_included) + }) + .expect("Timed out waiting for submitted transaction to be included in a block"); + // Mine 15 more nakamoto tenures for _i in 0..15 { next_block_and_mine_commit( @@ -2416,7 +2445,7 @@ fn correct_burn_outs() { epochs[epoch_30_ix].start_height = 225; } - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.initial_balances.clear(); let accounts: Vec<_> = (0..8) .map(|ix| { @@ -5183,6 +5212,15 @@ fn check_block_heights() { next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + // in the first tenure, make sure that the contracts are published + if tenure_ix == 0 { + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for contracts to publish"); + } + let heights1_value = call_read_only( &naka_conf, &sender_addr, @@ -5247,9 +5285,15 @@ fn check_block_heights() { .clone() .expect_u128() .unwrap(); + let expected_height = if tenure_ix == 0 { + // tenure 0 will include an interim block at this point because of the contract publish + // txs + last_stacks_block_height + 2 + } else { + last_stacks_block_height + 1 + }; assert_eq!( - sbh, - last_stacks_block_height + 1, + sbh, expected_height, "Stacks block heights should have incremented" ); last_stacks_block_height = sbh; @@ -5373,8 +5417,8 @@ fn check_block_heights() { assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); assert_eq!( tip.stacks_block_height, - block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), - "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + block_height_pre_3_0 + 1 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined 1 + (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" ); coord_channel diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a25a010465..9bf79f9ce7 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -266,7 +266,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>()); Ok(finished_signers.len() == self.spawned_signers.len()) - }).unwrap(); + }).expect("Timed out while waiting for the signers to be registered"); } pub fn wait_for_cycle(&mut self, timeout_secs: u64, reward_cycle: u64) { From 419ce4311f8e6d3705aaec86b35814c008ea064b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 26 Sep 2024 11:04:58 -0500 Subject: [PATCH 1229/1400] more test fixes --- .../src/tests/nakamoto_integrations.rs | 75 ++++++++++++------- .../src/tests/neon_integrations.rs | 2 + 2 files changed, 50 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6b072a1f4d..2bc0082fed 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5909,6 +5909,15 @@ fn clarity_burn_state() { }) .unwrap(); + // in the first tenure, make sure that the contracts are published + if tenure_ix == 0 { + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for contracts to publish"); + } + let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; info!("Expecting burn block height to be {}", burn_block_height); @@ -7100,14 +7109,24 @@ fn check_block_times() { contract3_name, contract_clarity3, ); - sender_nonce += 1; submit_tx(&http_origin, &contract_tx3); + sender_nonce += 1; + + // sleep to ensure seconds have changed + thread::sleep(Duration::from_secs(3)); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + // make sure that the contracts are published + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for contracts to publish"); + let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {:?}", info.stacks_tip_height); let last_stacks_block_height = info.stacks_tip_height as u128; let last_tenure_height = last_stacks_block_height as u128; @@ -7116,7 +7135,7 @@ fn check_block_times() { &sender_addr, contract0_name, "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let time0 = time0_value .expect_optional() @@ -7130,7 +7149,7 @@ fn check_block_times() { &sender_addr, contract1_name, "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let time1 = time1_value .expect_optional() @@ -7148,7 +7167,7 @@ fn check_block_times() { &sender_addr, contract3_name, "get-tenure-time", - vec![&clarity::vm::Value::UInt(last_tenure_height - 1)], + vec![&clarity::vm::Value::UInt(last_tenure_height - 2)], ); let time3_tenure = time3_tenure_value .expect_optional() @@ -7166,7 +7185,7 @@ fn check_block_times() { &sender_addr, contract3_name, "get-block-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let time3_block = time3_block_value .expect_optional() @@ -7176,14 +7195,10 @@ fn check_block_times() { .unwrap(); // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(1)); + thread::sleep(Duration::from_secs(2)); // Mine a Nakamoto block info!("Mining Nakamoto block"); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let transfer_tx = @@ -7191,19 +7206,15 @@ fn check_block_times() { sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + // make sure that the contracts are published + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for transfer to complete"); let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {:?}", info.stacks_tip_height); let last_stacks_block_height = info.stacks_tip_height as u128; let time0a_value = call_read_only( @@ -7221,7 +7232,7 @@ fn check_block_times() { .unwrap(); assert!( time0a - time0 >= 1, - "get-block-info? time should have changed" + "get-block-info? time should have changed. time_0 = {time0}. time_0_a = {time0a}" ); let time1a_value = call_read_only( @@ -7598,9 +7609,19 @@ fn check_block_info() { sender_nonce += 1; submit_tx(&http_origin, &contract_tx3); + // sleep to ensure seconds have changed + thread::sleep(Duration::from_secs(3)); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + // make sure that the contracts are published + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Timed out waiting for contracts to publish"); + let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; @@ -7610,7 +7631,7 @@ fn check_block_info() { &sender_addr, contract0_name, "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let tuple0 = result0.expect_tuple().unwrap().data_map; assert_block_info(&tuple0, &miner, &miner_spend); @@ -7620,7 +7641,7 @@ fn check_block_info() { &sender_addr, contract1_name, "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let tuple1 = result1.expect_tuple().unwrap().data_map; assert_eq!(tuple0, tuple1); @@ -7630,7 +7651,7 @@ fn check_block_info() { &sender_addr, contract3_name, "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let tuple3_tenure0 = result3_tenure.expect_tuple().unwrap().data_map; assert_eq!( @@ -7661,7 +7682,7 @@ fn check_block_info() { &sender_addr, contract3_name, "get-block-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], ); let tuple3_block1 = result3_block.expect_tuple().unwrap().data_map; assert_eq!( diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 3dd299c861..4ec3b311d4 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -929,6 +929,8 @@ pub fn call_read_only( let http_origin = format!("http://{}", &conf.node.rpc_bind); let client = reqwest::blocking::Client::new(); + info!("Call read only: {contract}.{function}({args:?})"); + let path = format!( "{http_origin}/v2/contracts/call-read/{}/{}/{}", principal, contract, function From 6a3746c9f6ca4edec8e9ce66efdbf074e87f5615 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 26 Sep 2024 15:13:22 -0500 Subject: [PATCH 1230/1400] fix test assertion --- testnet/stacks-node/src/tests/signer/mod.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 9bf79f9ce7..551fc3faa0 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -727,7 +727,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { wait_for(timeout_secs, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events + let block_rejections: HashSet<_> = stackerdb_events .into_iter() .flat_map(|chunk| chunk.modified_slots) .filter_map(|chunk| { @@ -739,7 +739,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest None, } }) - .collect::>(); - Ok(block_rejections.len() == expected_signers.len()) + .collect(); + info!("Checking block rejections"; "rejected_len" => block_rejections.len(), "expected_len" => expected_signers.len()); + Ok(block_rejections.len() >= expected_signers.len()) }) } } From cd49977a7383b85c45a317cb174eb1c2231ecbb4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 26 Sep 2024 14:39:50 -0700 Subject: [PATCH 1231/1400] Do not advance unless the bootstrapped or follower node also hits epoch 3 and shutdown the runloop Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 89 ++++++++++++++++++---- 1 file changed, 73 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e7985bc257..2cbbf67a51 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1393,8 +1393,8 @@ fn multiple_miners() { config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); - config.node.pox_sync_sample_secs = 5; config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 1; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -1446,17 +1446,30 @@ fn multiple_miners() { ); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let rl2_coord_channels = run_loop_2.coordinator_channels(); let Counters { naka_submitted_commits: rl2_commits, .. } = run_loop_2.counters(); - let _run_loop_2_thread = thread::Builder::new() + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; info!("------------------------- Reached Epoch 3.0 -------------------------"); @@ -1553,6 +1566,12 @@ fn multiple_miners() { u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } @@ -1728,6 +1747,18 @@ fn miner_forking() { .unwrap(); signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; naka_skip_commit_op.0.lock().unwrap().replace(false); @@ -3403,18 +3434,31 @@ fn multiple_miners_with_nakamoto_blocks() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let rl2_coord_channels = run_loop_2.coordinator_channels(); let Counters { naka_submitted_commits: rl2_commits, naka_mined_blocks: blocks_mined2, .. } = run_loop_2.counters(); - let _run_loop_2_thread = thread::Builder::new() + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; info!("------------------------- Reached Epoch 3.0 -------------------------"); @@ -3536,7 +3580,12 @@ fn multiple_miners_with_nakamoto_blocks() { btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); - + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } @@ -3575,6 +3624,7 @@ fn partial_tenure_fork() { let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let localhost = "127.0.0.1"; // All signers are listening to node 1 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -3586,11 +3636,12 @@ fn partial_tenure_fork() { signer_config.node_host = node_1_rpc_bind.clone(); }, |config| { - let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); - config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); - config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); - config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 1; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3620,11 +3671,10 @@ fn partial_tenure_fork() { let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); - let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); @@ -3646,6 +3696,8 @@ fn partial_tenure_fork() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let Counters { naka_mined_blocks: blocks_mined2, naka_proposed_blocks: blocks_proposed2, @@ -3653,7 +3705,7 @@ fn partial_tenure_fork() { } = run_loop_2.counters(); signer_test.boot_to_epoch_3(); - let _run_loop_2_thread = thread::Builder::new() + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); @@ -3918,7 +3970,12 @@ fn partial_tenure_fork() { .unwrap() .unwrap(); assert_eq!(tip.stacks_block_height, ignore_block - 1); - + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } From 04c6c8aeacdddaa8bdeafdac3008188a88cff7fd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 27 Sep 2024 07:46:19 -0700 Subject: [PATCH 1232/1400] Remove stale functions and config options left over from v1 signer and update the README file Signed-off-by: Jacinta Ferrant --- stacks-signer/README.md | 136 +++++---- stacks-signer/src/client/mod.rs | 94 ------- stacks-signer/src/client/stacks_client.rs | 303 +-------------------- stacks-signer/src/config.rs | 177 +----------- stacks-signer/src/main.rs | 2 +- stacks-signer/src/monitoring/mod.rs | 46 ---- stacks-signer/src/monitoring/prometheus.rs | 32 --- stacks-signer/src/runloop.rs | 7 - 8 files changed, 85 insertions(+), 712 deletions(-) diff --git a/stacks-signer/README.md b/stacks-signer/README.md index b3c287d9e3..6e9e0be760 100644 --- a/stacks-signer/README.md +++ b/stacks-signer/README.md @@ -1,6 +1,6 @@ # stacks-signer: Stacks Signer CLI -stacks-signer is a command-line interface (CLI) for executing DKG (Distributed Key Generation) rounds, signing transactions and blocks, and more within the Stacks blockchain ecosystem. This tool provides various subcommands to interact with the StackerDB contract, perform cryptographic operations, and run a Stacks compliant signer. +stacks-signer is a command-line interface (CLI) for operating a Stacks compliant signer. This tool provides various subcommands to interact with the StackerDB contract, generate SIP voting and stacking signatures, and monitoring the Signer network for expected behaviour. ## Installation @@ -25,18 +25,92 @@ To use stacks-signer, you need to build and install the Rust program. You can do ./target/release/stacks-signer --help ``` +4. **Build with Prometheus Metrics Enabled**: You can optionally build and run the stacks-signer with monitoring metrics enabled. + + ```bash + cd stacks-signer + cargo build --release --features "monitoring_prom" + cargo run --features "monitoring_prom" -p stacks-signer run --config + ``` + +You must specify the "metrics_endpoint" option in the config file to serve these metrics. +See [metrics documentation](TODO) for a complete breakdown of the available metrics. + ## Usage The stacks-signer CLI provides the following subcommands: +### `run` + +Start the signer and handle requests to sign Stacks block proposals. + +```bash +./stacks-signer run --config + +``` + +### `monitor-signers` + +Periodically query the current reward cycle's signers' StackerDB slots to verify their operation. + +```bash +./stacks-signer monitor-signers --host --interval --max-age + +``` +- `--host`: The Stacks node to connect to. +- `--interval`: The polling interval in seconds for querying stackerDB. +- `--max-age`: The max age in seconds before a signer message is considered stale. + +### `generate-stacking-signature` + +Generate a signature for stacking. + +```bash +./stacks-signer generate-stacking-signature --config --pox-address

--reward-cycle --period --max-amount --auth-id + +``` +- `--config`: The path to the signer configuration file. +- `--pox-address`: The BTC address used to receive rewards +- `--reward-cycle`: The reward cycle during which this signature is used +- `--method`: Stacking metod that can be used +- `--period`: Number of cycles used as a lock period. Use `1` for stack-aggregation-commit method +- `--max-amount`: The max amount of uSTX that can be used in this unique transaction +- `--auth-id`: A unique identifier to prevent re-using this authorization +- `--json`: Output information in JSON format + +### `generate-vote` + +Generate a vote signature for a specific SIP + +```bash +./stacks-signer generate-vote --config --vote --sip + +``` +- `--config`: The path to the signer configuration file. +- `--vote`: The vote (YES or NO) +- `--sip`: the number of the SIP being voted on + +### `verify-vote` + +Verify the validity of a vote signature for a specific SIP. + +```bash +./stacks-signer verify-vote --public-key --signature --vote --sip + +``` +- `--public-key`: The stacks public key to verify against in hexadecimal format +- `--signature`: The message signature in hexadecimal format +- `--vote`: The vote (YES or NO) +- `--sip`: the number of the SIP being voted on + ### `get-chunk` Retrieve a chunk from the StackerDB instance. ```bash ./stacks-signer get-chunk --host --contract --slot_id --slot_version -``` +``` - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--slot-id`: The slot ID to get. @@ -49,7 +123,6 @@ Retrieve the latest chunk from the StackerDB instance. ```bash ./stacks-signer get-latest-chunk --host --contract --slot-id ``` - - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--slot-id`: The slot ID to get. @@ -71,7 +144,6 @@ Upload a chunk to the StackerDB instance. ```bash ./stacks-signer put-chunk --host --contract --private_key --slot-id --slot-version [--data ] ``` - - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--private_key`: The Stacks private key to use in hexademical format. @@ -79,64 +151,10 @@ Upload a chunk to the StackerDB instance. - `--slot-version`: The slot version to get. - `--data`: The data to upload. If you wish to pipe data using STDIN, use with '-'. -### `dkg` - -Run a distributed key generation round through stacker-db. - -```bash -./stacks-signer dkg --config -``` - -- `--config`: The path to the signer configuration file. - -### `dkg-sign` - -Run a distributed key generation round and sign a given message through stacker-db. - -```bash -./stacks-signer dkg-sign --config [--data ] -``` -- `--config`: The path to the signer configuration file. -- `--data`: The data to sign. If you wish to pipe data using STDIN, use with '-'. - - -### `dkg-sign` - -Sign a given message through stacker-db. - -```bash -./stacks-signer sign --config [--data ] -``` -- `--config`: The path to the signer configuration file. -- `--data`: The data to sign. If you wish to pipe data using STDIN, use with '-'. - -### `run` - -Start the signer and handle requests to sign messages and participate in DKG rounds via stacker-db. -```bash -./stacks-signer run --config -``` -- `--config`: The path to the signer configuration file. - -### `generate-files` - -Generate the necessary files to run a collection of signers to communicate via stacker-db. - -```bash -./stacks-signer generate-files --host --contract --num-signers --num-keys --network --dir -``` -- `--host`: The stacks node host to connect to. -- `--contract`: The contract ID of the StackerDB signer contract. -- `--num-signers`: The number of signers to generate configuration files for. -- `--num-keys`: The total number of key ids to distribute among the signers. -- `--private-keys:` A path to a file containing a list of hexadecimal representations of Stacks private keys. Required if `--num-keys` is not set. -- `--network`: The network to use. One of "mainnet" or "testnet". -- `--dir`: The directory to write files to. Defaults to the current directory. -- `--timeout`: Optional timeout in milliseconds to use when polling for updates in the StackerDB runloop. - ## Contributing To contribute to the stacks-signer project, please read the [Contributing Guidelines](../CONTRIBUTING.md). + ## License This program is open-source software released under the terms of the GNU General Public License (GPL). You should have received a copy of the GNU General Public License along with this program. \ No newline at end of file diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 081b5c07ab..5e957a2166 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -129,15 +129,12 @@ pub(crate) mod tests { use blockstack_lib::chainstate::stacks::boot::POX_4_NAME; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; - use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::{ RPCPoxCurrentCycleInfo, RPCPoxEpoch, RPCPoxInfoData, RPCPoxNextCycleInfo, }; - use blockstack_lib::net::api::postfeerate::{RPCFeeEstimate, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::costs::ExecutionCost; - use clarity::vm::types::TupleData; use clarity::vm::Value as ClarityValue; use libsigner::SignerEntries; use rand::distributions::Standard; @@ -221,28 +218,6 @@ pub(crate) mod tests { ConsensusHash(hash) } - /// Build a response for the get_last_round request - pub fn build_get_last_round_response(round: u64) -> String { - let value = ClarityValue::some(ClarityValue::UInt(round as u128)) - .expect("Failed to create response"); - build_read_only_response(&value) - } - - /// Build a response for the get_account_nonce request - pub fn build_account_nonce_response(nonce: u64) -> String { - let account_nonce_entry = AccountEntryResponse { - nonce, - balance: "0x00000000000000000000000000000000".to_string(), - locked: "0x00000000000000000000000000000000".to_string(), - unlock_height: thread_rng().next_u64(), - balance_proof: None, - nonce_proof: None, - }; - let account_nonce_entry_json = serde_json::to_string(&account_nonce_entry) - .expect("Failed to serialize account nonce entry"); - format!("HTTP/1.1 200 OK\n\n{account_nonce_entry_json}") - } - /// Build a response to get_pox_data_with_retry where it returns a specific reward cycle id and block height pub fn build_get_pox_data_response( reward_cycle: Option, @@ -377,44 +352,6 @@ pub(crate) mod tests { format!("HTTP/1.1 200 OK\n\n{{\"okay\":true,\"result\":\"{hex}\"}}") } - /// Build a response for the get_medium_estimated_fee_ustx_response request with a specific medium estimate - pub fn build_get_medium_estimated_fee_ustx_response( - medium_estimate: u64, - ) -> (String, RPCFeeEstimateResponse) { - // Generate some random info - let fee_response = RPCFeeEstimateResponse { - estimated_cost: ExecutionCost { - write_length: thread_rng().next_u64(), - write_count: thread_rng().next_u64(), - read_length: thread_rng().next_u64(), - read_count: thread_rng().next_u64(), - runtime: thread_rng().next_u64(), - }, - estimated_cost_scalar: thread_rng().next_u64(), - cost_scalar_change_by_byte: thread_rng().next_u32() as f64, - estimations: vec![ - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: thread_rng().next_u64(), - }, - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: medium_estimate, - }, - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: thread_rng().next_u64(), - }, - ], - }; - let fee_response_json = serde_json::to_string(&fee_response) - .expect("Failed to serialize fee estimate response"); - ( - format!("HTTP/1.1 200 OK\n\n{fee_response_json}"), - fee_response, - ) - } - /// Generate a signer config with the given number of signers and keys where the first signer is /// obtained from the provided global config pub fn generate_signer_config(config: &GlobalConfig, num_signers: u32) -> SignerConfig { @@ -473,43 +410,12 @@ pub(crate) mod tests { stacks_private_key: config.stacks_private_key, node_host: config.node_host.to_string(), mainnet: config.network.is_mainnet(), - dkg_end_timeout: config.dkg_end_timeout, - dkg_private_timeout: config.dkg_private_timeout, - dkg_public_timeout: config.dkg_public_timeout, - nonce_timeout: config.nonce_timeout, - sign_timeout: config.sign_timeout, - tx_fee_ustx: config.tx_fee_ustx, - max_tx_fee_ustx: config.max_tx_fee_ustx, db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, } } - pub fn build_get_round_info_response(info: Option<(u64, u64)>) -> String { - let clarity_value = if let Some((vote_count, vote_weight)) = info { - ClarityValue::some(ClarityValue::Tuple( - TupleData::from_data(vec![ - ("votes-count".into(), ClarityValue::UInt(vote_count as u128)), - ( - "votes-weight".into(), - ClarityValue::UInt(vote_weight as u128), - ), - ]) - .expect("BUG: Failed to create clarity value from tuple data"), - )) - .expect("BUG: Failed to create clarity value from tuple data") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - - pub fn build_get_weight_threshold_response(threshold: u64) -> String { - let clarity_value = ClarityValue::UInt(threshold as u128); - build_read_only_response(&clarity_value) - } - pub fn build_get_tenure_tip_response(header_types: &StacksBlockHeaderTypes) -> String { let response_json = serde_json::to_string(header_types).expect("Failed to serialize tenure tip info"); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index dbe4f9094d..f415896e86 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -15,11 +15,8 @@ // along with this program. If not, see . use std::collections::{HashMap, VecDeque}; -use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_NAME, -}; +use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, @@ -37,9 +34,7 @@ use blockstack_lib::net::api::getstackers::{GetStackersErrors, GetStackersRespon use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postblock_v3; -use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; -use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; -use clarity::util::hash::to_hex; +use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use libsigner::v0::messages::PeerInfo; @@ -272,44 +267,6 @@ impl StacksClient { .collect()) } - /// Retrieve the medium estimated transaction fee in uSTX from the stacks node for the given transaction - pub fn get_medium_estimated_fee_ustx( - &self, - tx: &StacksTransaction, - ) -> Result { - debug!("stacks_node_client: Getting estimated fee..."); - let request = FeeRateEstimateRequestBody { - estimated_len: Some(tx.tx_len()), - transaction_payload: to_hex(&tx.payload.serialize_to_vec()), - }; - let timer = - crate::monitoring::new_rpc_call_timer(&self.fees_transaction_path(), &self.http_origin); - let send_request = || { - self.stacks_node_client - .post(self.fees_transaction_path()) - .header("Content-Type", "application/json") - .json(&request) - .send() - .map_err(backoff::Error::transient) - }; - let response = retry_with_exponential_backoff(send_request)?; - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - timer.stop_and_record(); - let fee_estimate_response = response.json::()?; - let fee = fee_estimate_response - .estimations - .get(1) - .map(|estimate| estimate.fee) - .ok_or_else(|| { - ClientError::UnexpectedResponseFormat( - "RPCFeeEstimateResponse missing medium fee estimate".into(), - ) - })?; - Ok(fee) - } - /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { let pox_info = self.get_pox_data()?; @@ -371,52 +328,6 @@ impl StacksClient { Ok(()) } - /// Retrieve the current consumed weight for the given reward cycle and DKG round - pub fn get_round_vote_weight( - &self, - reward_cycle: u64, - round_id: u64, - ) -> Result, ClientError> { - let function_name = ClarityName::from("get-round-info"); - let pox_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ - ClarityValue::UInt(reward_cycle as u128), - ClarityValue::UInt(round_id as u128), - ]; - let value = self.read_only_contract_call( - &pox_contract_id.issuer.into(), - &pox_contract_id.name, - &function_name, - function_args, - )?; - let inner_data = value.expect_optional()?; - let Some(inner_data) = inner_data else { - return Ok(None); - }; - let round_info = inner_data.expect_tuple()?; - let votes_weight = round_info.get("votes-weight")?.to_owned().expect_u128()?; - Ok(Some(votes_weight)) - } - - /// Retrieve the weight threshold required to approve a DKG vote - pub fn get_vote_threshold_weight(&self, reward_cycle: u64) -> Result { - let function_name = ClarityName::from("get-threshold-weight"); - let pox_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let value = self.read_only_contract_call( - &pox_contract_id.issuer.into(), - &pox_contract_id.name, - &function_name, - function_args, - )?; - Ok(value.expect_u128()?) - } - - /// Retrieve the current account nonce for the provided address - pub fn get_account_nonce(&self, address: &StacksAddress) -> Result { - self.get_account_entry(address).map(|entry| entry.nonce) - } - /// Get information about the tenures between `chosen_parent` and `last_sortition` pub fn get_tenure_forking_info( &self, @@ -545,33 +456,6 @@ impl StacksClient { Ok(peer_info_data) } - /// Retrieve the last DKG vote round number for the current reward cycle - pub fn get_last_round(&self, reward_cycle: u64) -> Result, ClientError> { - debug!("Getting the last DKG vote round of reward cycle {reward_cycle}..."); - let contract_addr = boot_code_addr(self.mainnet); - let contract_name = ContractName::from(SIGNERS_VOTING_NAME); - let function_name = ClarityName::from("get-last-round"); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let opt_value = self - .read_only_contract_call( - &contract_addr, - &contract_name, - &function_name, - function_args, - )? - .expect_optional()?; - let round = if let Some(value) = opt_value { - Some(u64::try_from(value.expect_u128()?).map_err(|e| { - ClientError::MalformedContractData(format!( - "Failed to convert vote round to u64: {e}" - )) - })?) - } else { - None - }; - Ok(round) - } - /// Get the reward set signers from the stacks node for the given reward cycle pub fn get_reward_set_signers( &self, @@ -711,34 +595,6 @@ impl StacksClient { Ok(post_block_resp.accepted) } - /// Helper function to submit a transaction to the Stacks mempool - pub fn submit_transaction(&self, tx: &StacksTransaction) -> Result { - let txid = tx.txid(); - let tx = tx.serialize_to_vec(); - debug!("stacks_node_client: Submitting transaction to the stacks node..."; - "txid" => %txid, - ); - let timer = - crate::monitoring::new_rpc_call_timer(&self.transaction_path(), &self.http_origin); - let send_request = || { - self.stacks_node_client - .post(self.transaction_path()) - .header("Content-Type", "application/octet-stream") - .body(tx.clone()) - .send() - .map_err(|e| { - debug!("Failed to submit transaction to the Stacks node: {e:?}"); - backoff::Error::transient(e) - }) - }; - let response = retry_with_exponential_backoff(send_request)?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - Ok(txid) - } - /// Makes a read only contract call to a stacks contract pub fn read_only_contract_call( &self, @@ -794,10 +650,6 @@ impl StacksClient { format!("{}/v2/pox", self.http_origin) } - fn transaction_path(&self) -> String { - format!("{}/v2/transactions", self.http_origin) - } - fn read_only_path( &self, contract_addr: &StacksAddress, @@ -839,10 +691,6 @@ impl StacksClient { format!("{}/v3/stacker_set/{reward_cycle}", self.http_origin) } - fn fees_transaction_path(&self) -> String { - format!("{}/v2/fees/transaction", self.http_origin) - } - fn tenure_tip_path(&self, consensus_hash: &ConsensusHash) -> String { format!("{}/v3/tenures/tip/{}", self.http_origin, consensus_hash) } @@ -905,7 +753,6 @@ impl StacksClient { #[cfg(test)] mod tests { use std::collections::BTreeMap; - use std::io::{BufWriter, Write}; use std::thread::spawn; use blockstack_lib::burnchains::Address; @@ -924,15 +771,13 @@ mod tests { use rand::thread_rng; use rand_core::RngCore; use stacks_common::bitvec::BitVec; - use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; + use stacks_common::consts::SIGNER_SLOTS_PER_USER; use super::*; use crate::client::tests::{ - build_account_nonce_response, build_get_last_round_response, - build_get_last_set_cycle_response, build_get_medium_estimated_fee_ustx_response, - build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, - build_get_tenure_tip_response, build_get_weight_threshold_response, - build_read_only_response, write_response, MockServerClient, + build_get_last_set_cycle_response, build_get_peer_info_response, + build_get_pox_data_response, build_get_tenure_tip_response, build_read_only_response, + write_response, MockServerClient, }; #[test] @@ -1061,58 +906,6 @@ mod tests { assert!(matches!(res, Err(ClientError::ReqwestError(_)))); } - #[test] - fn transaction_contract_call_should_send_bytes_to_node() { - let mock = MockServerClient::new(); - let private_key = StacksPrivateKey::new(); - let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( - &mock.client.stacks_address, - ContractName::from("contract-name"), - ClarityName::from("function-name"), - &[], - &private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, - ) - .unwrap(); - - let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); - - let mut tx_bytes = [0u8; 1024]; - { - let mut tx_bytes_writer = BufWriter::new(&mut tx_bytes[..]); - tx.consensus_serialize(&mut tx_bytes_writer).unwrap(); - tx_bytes_writer.flush().unwrap(); - } - - let bytes_len = tx_bytes - .iter() - .enumerate() - .rev() - .find(|(_, &x)| x != 0) - .unwrap() - .0 - + 1; - - let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction(&tx_clone)); - - let request_bytes = write_response( - mock.server, - format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), - ); - let returned_txid = h.join().unwrap().unwrap(); - - assert_eq!(returned_txid, tx.txid()); - assert!( - request_bytes - .windows(bytes_len) - .any(|window| window == &tx_bytes[..bytes_len]), - "Request bytes did not contain the transaction bytes" - ); - } - #[test] fn core_info_call_for_burn_block_height_should_succeed() { let mock = MockServerClient::new(); @@ -1134,29 +927,6 @@ mod tests { assert!(h.join().unwrap().is_err()); } - #[test] - fn get_account_nonce_should_succeed() { - let mock = MockServerClient::new(); - let address = mock.client.stacks_address; - let h = spawn(move || mock.client.get_account_nonce(&address)); - let nonce = thread_rng().next_u64(); - write_response(mock.server, build_account_nonce_response(nonce).as_bytes()); - let returned_nonce = h.join().unwrap().expect("Failed to deserialize response"); - assert_eq!(returned_nonce, nonce); - } - - #[test] - fn get_account_nonce_should_fail() { - let mock = MockServerClient::new(); - let address = mock.client.stacks_address; - let h = spawn(move || mock.client.get_account_nonce(&address)); - write_response( - mock.server, - b"HTTP/1.1 200 OK\n\n{\"nonce\":\"invalid nonce\",\"balance\":\"0x00000000000000000000000000000000\",\"locked\":\"0x00000000000000000000000000000000\",\"unlock_height\":0}" - ); - assert!(h.join().unwrap().is_err()); - } - #[test] fn parse_valid_signer_slots_should_succeed() { let mock = MockServerClient::new(); @@ -1361,17 +1131,6 @@ mod tests { assert_eq!(reduced_peer_info.server_version, peer_info.server_version); } - #[test] - fn get_last_round_should_succeed() { - let mock = MockServerClient::new(); - let round = rand::thread_rng().next_u64(); - let response = build_get_last_round_response(round); - let h = spawn(move || mock.client.get_last_round(0)); - - write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap().unwrap(), round); - } - #[test] fn get_reward_set_should_succeed() { let mock = MockServerClient::new(); @@ -1403,56 +1162,6 @@ mod tests { assert_eq!(h.join().unwrap().unwrap(), stacker_set.signers); } - #[test] - fn get_round_vote_weight_should_succeed() { - let mock = MockServerClient::new(); - let vote_count = rand::thread_rng().next_u64(); - let weight = rand::thread_rng().next_u64(); - let round_response = build_get_round_info_response(Some((vote_count, weight))); - let h = spawn(move || mock.client.get_round_vote_weight(0, 0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), Some(weight as u128)); - - let mock = MockServerClient::new(); - let round_response = build_get_round_info_response(None); - let h = spawn(move || mock.client.get_round_vote_weight(0, 0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), None); - } - - #[test] - fn get_vote_threshold_weight_should_succeed() { - let mock = MockServerClient::new(); - let weight = rand::thread_rng().next_u64(); - let round_response = build_get_weight_threshold_response(weight); - let h = spawn(move || mock.client.get_vote_threshold_weight(0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), weight as u128); - } - - #[test] - fn get_medium_estimated_fee_ustx_should_succeed() { - let mock = MockServerClient::new(); - let private_key = StacksPrivateKey::new(); - let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( - &mock.client.stacks_address, - ContractName::from("contract-name"), - ClarityName::from("function-name"), - &[], - &private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, - ) - .unwrap(); - - let estimate = thread_rng().next_u64(); - let response = build_get_medium_estimated_fee_ustx_response(estimate).0; - let h = spawn(move || mock.client.get_medium_estimated_fee_ustx(&unsigned_tx)); - write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), estimate); - } - #[test] fn get_tenure_tip_should_succeed() { let mock = MockServerClient::new(); diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 802c362b86..843645945b 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -35,8 +35,6 @@ use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; -// Default transaction fee to use in microstacks (if unspecificed in the config file) -const TX_FEE_USTX: u64 = 10_000; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -116,7 +114,7 @@ impl Network { pub struct SignerConfig { /// The reward cycle of the configuration pub reward_cycle: u64, - /// The signer ID assigned to this signer to be used in DKG and Sign rounds + /// The signer ID assigned to this signer (may be different from signer_slot_id) pub signer_id: u32, /// The signer stackerdb slot id (may be different from signer_id) pub signer_slot_id: SignerSlotID, @@ -130,20 +128,6 @@ pub struct SignerConfig { pub node_host: String, /// Whether this signer is running on mainnet or not pub mainnet: bool, - /// timeout to gather DkgPublicShares messages - pub dkg_public_timeout: Option, - /// timeout to gather DkgPrivateShares messages - pub dkg_private_timeout: Option, - /// timeout to gather DkgEnd messages - pub dkg_end_timeout: Option, - /// timeout to gather nonces - pub nonce_timeout: Option, - /// timeout to gather signature shares - pub sign_timeout: Option, - /// the STX tx fee to use in uSTX. - pub tx_fee_ustx: u64, - /// If set, will use the estimated fee up to this amount. - pub max_tx_fee_ustx: Option, /// The path to the signer's database file pub db_path: PathBuf, /// How much time must pass between the first block proposal in a tenure and the next bitcoin block @@ -168,20 +152,6 @@ pub struct GlobalConfig { pub network: Network, /// The time to wait for a response from the stacker-db instance pub event_timeout: Duration, - /// timeout to gather DkgPublicShares messages - pub dkg_public_timeout: Option, - /// timeout to gather DkgPrivateShares messages - pub dkg_private_timeout: Option, - /// timeout to gather DkgEnd messages - pub dkg_end_timeout: Option, - /// timeout to gather nonces - pub nonce_timeout: Option, - /// timeout to gather signature shares - pub sign_timeout: Option, - /// the STX tx fee to use in uSTX. - pub tx_fee_ustx: u64, - /// the max STX tx fee to use in uSTX when estimating fees - pub max_tx_fee_ustx: Option, /// the authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file @@ -209,21 +179,6 @@ struct RawConfigFile { pub network: Network, /// The time to wait (in millisecs) for a response from the stacker-db instance pub event_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgPublicShares messages - pub dkg_public_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgPrivateShares messages - pub dkg_private_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgEnd messages - pub dkg_end_timeout_ms: Option, - /// timeout in (millisecs) to gather nonces - pub nonce_timeout_ms: Option, - /// timeout in (millisecs) to gather signature shares - pub sign_timeout_ms: Option, - /// the STX tx fee to use in uSTX. If not set, will default to TX_FEE_USTX - pub tx_fee_ustx: Option, - /// the max STX tx fee to use in uSTX when estimating fees. - /// If not set, will use tx_fee_ustx. - pub max_tx_fee_ustx: Option, /// The authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file or :memory: for an in-memory database @@ -293,11 +248,6 @@ impl TryFrom for GlobalConfig { StacksAddress::p2pkh_from_hash(raw_data.network.is_mainnet(), signer_hash); let event_timeout = Duration::from_millis(raw_data.event_timeout_ms.unwrap_or(EVENT_TIMEOUT_MS)); - let dkg_end_timeout = raw_data.dkg_end_timeout_ms.map(Duration::from_millis); - let dkg_public_timeout = raw_data.dkg_public_timeout_ms.map(Duration::from_millis); - let dkg_private_timeout = raw_data.dkg_private_timeout_ms.map(Duration::from_millis); - let nonce_timeout = raw_data.nonce_timeout_ms.map(Duration::from_millis); - let sign_timeout = raw_data.sign_timeout_ms.map(Duration::from_millis); let first_proposal_burn_block_timing = Duration::from_secs(raw_data.first_proposal_burn_block_timing_secs.unwrap_or(30)); let db_path = raw_data.db_path.into(); @@ -328,13 +278,6 @@ impl TryFrom for GlobalConfig { stacks_address, network: raw_data.network, event_timeout, - dkg_end_timeout, - dkg_public_timeout, - dkg_private_timeout, - nonce_timeout, - sign_timeout, - tx_fee_ustx: raw_data.tx_fee_ustx.unwrap_or(TX_FEE_USTX), - max_tx_fee_ustx: raw_data.max_tx_fee_ustx, auth_password: raw_data.auth_password, db_path, metrics_endpoint, @@ -366,10 +309,6 @@ impl GlobalConfig { /// Return a string with non-sensitive configuration /// information for logging purposes pub fn config_to_log_string(&self) -> String { - let tx_fee = match self.tx_fee_ustx { - 0 => "default".to_string(), - _ => (self.tx_fee_ustx as f64 / 1_000_000.0).to_string(), - }; let metrics_endpoint = match &self.metrics_endpoint { Some(endpoint) => endpoint.to_string(), None => "None".to_string(), @@ -382,7 +321,6 @@ Stacks address: {stacks_address} Public key: {public_key} Network: {network} Database path: {db_path} -DKG transaction fee: {tx_fee} uSTX Metrics endpoint: {metrics_endpoint} "#, node_host = self.node_host, @@ -393,7 +331,6 @@ Metrics endpoint: {metrics_endpoint} ), network = self.network, db_path = self.db_path.to_str().unwrap_or_default(), - tx_fee = tx_fee, metrics_endpoint = metrics_endpoint, ) } @@ -527,119 +464,9 @@ mod tests { RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); assert_eq!(config.auth_password, "melon"); - assert!(config.max_tx_fee_ustx.is_none()); - assert!(config.tx_fee_ustx.is_none()); assert_eq!(config.metrics_endpoint, Some("localhost:4000".to_string())); } - #[test] - fn fee_options_should_deserialize_correctly() { - let pk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let node_host = "localhost"; - let network = Network::Testnet; - let password = "melon"; - - // Test both max_tx_fee_ustx and tx_fee_ustx are unspecified - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - None, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert!(config.max_tx_fee_ustx.is_none()); - assert!(config.tx_fee_ustx.is_none()); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); - - // Test both max_tx_fee_ustx and tx_fee_ustx are specified - let max_tx_fee_ustx = Some(1000); - let tx_fee_ustx = Some(2000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - max_tx_fee_ustx, - tx_fee_ustx, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert_eq!(config.tx_fee_ustx, tx_fee_ustx); - - // Test only max_tx_fee_ustx is specified - let max_tx_fee_ustx = Some(1000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - max_tx_fee_ustx, - None, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert!(config.tx_fee_ustx.is_none()); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); - - // Test only tx_fee_ustx is specified - let tx_fee_ustx = Some(1000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - tx_fee_ustx, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(config.tx_fee_ustx, tx_fee_ustx); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(Some(config.tx_fee_ustx), tx_fee_ustx); - } - #[test] fn test_config_to_string() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); @@ -652,7 +479,6 @@ Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet Database path: :memory: -DKG transaction fee: 0.01 uSTX Metrics endpoint: 0.0.0.0:9090 "#; @@ -663,7 +489,6 @@ Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet Database path: :memory: -DKG transaction fee: 0.01 uSTX Metrics endpoint: 0.0.0.0:9090 "#; diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 5b118db646..520d455258 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -1,4 +1,4 @@ -//! # stacks-signer: Stacks signer binary for executing DKG rounds, signing transactions and blocks, and more. +//! # stacks-signer: Stacks signer binary for signing block proposals, interacting with stackerdb, and more. //! //! Usage documentation can be found in the [README]("https://github.com/blockstack/stacks-blockchain/stacks-signer/README.md). //! diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 0ecc99b5f8..e03b03d47a 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -71,52 +71,6 @@ pub fn increment_block_responses_sent(accepted: bool) { } } -/// Increment the signer inbound messages counter -#[allow(unused_variables)] -pub fn increment_signer_inbound_messages(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::SIGNER_INBOUND_MESSAGES.inc_by(amount); -} - -/// Increment the coordinator inbound messages counter -#[allow(unused_variables)] -pub fn increment_coordinator_inbound_messages(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::COORDINATOR_INBOUND_MESSAGES.inc_by(amount); -} - -/// Increment the number of inbound packets received -#[allow(unused_variables)] -pub fn increment_inbound_packets(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::INBOUND_PACKETS_RECEIVED.inc_by(amount); -} - -/// Increment the number of commands processed -#[allow(unused_variables)] -pub fn increment_commands_processed(command_type: &str) { - #[cfg(feature = "monitoring_prom")] - prometheus::COMMANDS_PROCESSED - .with_label_values(&[command_type]) - .inc(); -} - -/// Increment the number of DKG votes submitted -#[allow(unused_variables)] -pub fn increment_dkg_votes_submitted() { - #[cfg(feature = "monitoring_prom")] - prometheus::DGK_VOTES_SUBMITTED.inc(); -} - -/// Increment the number of commands processed -#[allow(unused_variables)] -pub fn increment_operation_results(operation_type: &str) { - #[cfg(feature = "monitoring_prom")] - prometheus::OPERATION_RESULTS - .with_label_values(&[operation_type]) - .inc(); -} - /// Increment the number of block proposals received #[allow(unused_variables)] pub fn increment_block_proposals_received() { diff --git a/stacks-signer/src/monitoring/prometheus.rs b/stacks-signer/src/monitoring/prometheus.rs index c78db1299d..247a9f00f5 100644 --- a/stacks-signer/src/monitoring/prometheus.rs +++ b/stacks-signer/src/monitoring/prometheus.rs @@ -39,38 +39,6 @@ lazy_static! { &["response_type"] ) .unwrap(); - pub static ref SIGNER_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( - "stacks_signer_inbound_messages", - "The number of inbound messages received by the signer" - )) - .unwrap(); - pub static ref COORDINATOR_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( - "stacks_signer_coordinator_inbound_messages", - "The number of inbound messages received as a coordinator" - )) - .unwrap(); - pub static ref INBOUND_PACKETS_RECEIVED: IntCounter = register_int_counter!(opts!( - "stacks_signer_inbound_packets_received", - "The number of inbound packets received by the signer" - )) - .unwrap(); - pub static ref COMMANDS_PROCESSED: IntCounterVec = register_int_counter_vec!( - "stacks_signer_commands_processed", - "The number of commands processed by the signer", - &["command_type"] - ) - .unwrap(); - pub static ref DGK_VOTES_SUBMITTED: IntCounter = register_int_counter!(opts!( - "stacks_signer_dgk_votes_submitted", - "The number of DGK votes submitted by the signer" - )) - .unwrap(); - pub static ref OPERATION_RESULTS: IntCounterVec = register_int_counter_vec!( - "stacks_signer_operation_results_dkg", - "The number of DKG operation results", - &["operation_type"] - ) - .unwrap(); pub static ref BLOCK_PROPOSALS_RECEIVED: IntCounter = register_int_counter!(opts!( "stacks_signer_block_proposals_received", "The number of block proposals received by the signer" diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 855957a70a..a0e2b739e9 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -281,13 +281,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo stacks_private_key: self.config.stacks_private_key, node_host: self.config.node_host.to_string(), mainnet: self.config.network.is_mainnet(), - dkg_end_timeout: self.config.dkg_end_timeout, - dkg_private_timeout: self.config.dkg_private_timeout, - dkg_public_timeout: self.config.dkg_public_timeout, - nonce_timeout: self.config.nonce_timeout, - sign_timeout: self.config.sign_timeout, - tx_fee_ustx: self.config.tx_fee_ustx, - max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, })) From ff580187afde2ade7c4d15b523ddb68fc1bf0e7c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 27 Sep 2024 09:42:40 -0700 Subject: [PATCH 1233/1400] Do not leak a private key that has a small typo Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 66cf5a5f7d..0ae5be2a22 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -288,21 +288,11 @@ impl TryFrom for GlobalConfig { ConfigError::BadField("endpoint".to_string(), raw_data.endpoint.clone()) })?; - let stacks_private_key = - StacksPrivateKey::from_hex(&raw_data.stacks_private_key).map_err(|_| { - ConfigError::BadField( - "stacks_private_key".to_string(), - raw_data.stacks_private_key.clone(), - ) - })?; + let stacks_private_key = StacksPrivateKey::from_hex(&raw_data.stacks_private_key) + .map_err(|e| ConfigError::BadField("stacks_private_key".to_string(), e.into()))?; - let ecdsa_private_key = - Scalar::try_from(&stacks_private_key.to_bytes()[..32]).map_err(|_| { - ConfigError::BadField( - "stacks_private_key".to_string(), - raw_data.stacks_private_key.clone(), - ) - })?; + let ecdsa_private_key = Scalar::try_from(&stacks_private_key.to_bytes()[..32]) + .map_err(|e| ConfigError::BadField("stacks_private_key".to_string(), e.to_string()))?; let stacks_public_key = StacksPublicKey::from_private(&stacks_private_key); let signer_hash = Hash160::from_data(stacks_public_key.to_bytes_compressed().as_slice()); let stacks_address = From c60e4a63c83c95646b1a7aa6a8d4191a62b8e821 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 27 Sep 2024 10:22:05 -0700 Subject: [PATCH 1234/1400] Test: try increasing a timeout to see what CI does Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d77ea1ef11..4d55c4d0ce 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3693,7 +3693,7 @@ fn partial_tenure_fork() { let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - wait_for(120, || { + wait_for(200, || { let Some(node_1_info) = get_chain_info_opt(&conf) else { return Ok(false); }; From 0feddea3c7085f4479eac77c4afb77b99adcd3a6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 27 Sep 2024 13:29:27 -0500 Subject: [PATCH 1235/1400] fix: /v3/sortitions over optimistic in finding last sortition --- stackslib/src/net/api/getsortition.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 7b594530c2..ccfa4efec9 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -182,7 +182,11 @@ impl GetSortitionHandler { // try to figure out what the last snapshot in this fork was with a successful // sortition. // optimization heuristic: short-circuit the load if its just `stacks_parent_sn` - let last_sortition_ch = if stacks_parent_sn.sortition { + // if the sortition count incremented by exactly 1 between us and our **stacks** parent, + // then the stacks parent's sortition *must* be the last one with a winner. + let sortitions_incremented_by_1 = + sortition_sn.num_sortitions == stacks_parent_sn.num_sortitions + 1; + let last_sortition_ch = if sortitions_incremented_by_1 { stacks_parent_sn.consensus_hash.clone() } else { // we actually need to perform the marf lookup From 8809f919f2de4cc5cc9285b380fd61320bca03d2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 27 Sep 2024 13:32:29 -0500 Subject: [PATCH 1236/1400] fix: tenure extend logic. only include tx in first block after extension --- testnet/stacks-node/src/nakamoto_node/miner.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ecc30a9c19..5e3f72ee20 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1260,6 +1260,12 @@ impl BlockMinerThread { tenure_change_tx: None, }); }; + if self.last_block_mined.is_some() { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { @@ -1289,10 +1295,10 @@ impl BlockMinerThread { &parent_block_id, ) .map_err(NakamotoNodeError::MiningFailure)?; - debug!("Miner: Extending tenure"; - "burn_view_consensus_hash" => %burn_view_consensus_hash, - "parent_block_id" => %parent_block_id, - "num_blocks_so_far" => num_blocks_so_far, + info!("Miner: Extending tenure"; + "burn_view_consensus_hash" => %burn_view_consensus_hash, + "parent_block_id" => %parent_block_id, + "num_blocks_so_far" => num_blocks_so_far, ); payload = payload.extend( *burn_view_consensus_hash, From a5b5f29fa9147aa23226b4169973690129a8c2b6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 27 Sep 2024 15:23:02 -0400 Subject: [PATCH 1237/1400] feat: helper method to get the tenure ID for a block --- stackslib/src/chainstate/nakamoto/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e97fefafff..b7e95ff14b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2534,6 +2534,18 @@ impl NakamotoChainState { Ok(result) } + /// Load a consensus hash for a Nakamoto header + pub fn get_block_header_nakamoto_tenure_id( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = "SELECT consensus_hash FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + "FATAL: multiple rows for the same block hash".to_string() + })?; + Ok(result) + } + /// Load an epoch2 header pub fn get_block_header_epoch2( chainstate_conn: &Connection, From 475ed44047f94cc7bcef898598bd0907ba4af7c8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 27 Sep 2024 15:23:31 -0400 Subject: [PATCH 1238/1400] feat: tool for generating invs for nakamoto testnet --- stackslib/src/main.rs | 58 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 029ee16c6b..52d481affb 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -63,9 +63,10 @@ use blockstack_lib::core::{MemPoolDB, *}; use blockstack_lib::cost_estimates::metrics::UnitMetric; use blockstack_lib::cost_estimates::UnitEstimator; use blockstack_lib::net::db::LocalPeer; +use blockstack_lib::net::inv::nakamoto::InvGenerator; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; -use blockstack_lib::net::StacksMessage; +use blockstack_lib::net::{NakamotoInvData, StacksMessage}; use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; use blockstack_lib::{clarity_cli, cli}; @@ -974,6 +975,61 @@ simulating a miner. process::exit(1); } + if argv[1] == "get-tenure-inv" { + let chainstate_root_path = &argv[2]; + let tip_block_ids = &argv[3..]; + let chainstate_path = format!("{}/chainstate", &chainstate_root_path); + let sortition_path = format!("{}/burnchain/sortition", &chainstate_root_path); + + let (chainstate, _) = + StacksChainState::open(false, 0x80000000, &chainstate_path, None).unwrap(); + let pox_consts = + PoxConstants::new(900, 100, 80, 0, 0, u64::MAX, u64::MAX, 240, 241, 242, 242); + let sortition_db = SortitionDB::open(&sortition_path, true, pox_consts).unwrap(); + + let mut invgen = InvGenerator::new(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortition_db.conn()).unwrap(); + + for tip_block_id in tip_block_ids.iter() { + let tip_block_id = StacksBlockId::from_hex(tip_block_id).unwrap(); + let header = + NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &tip_block_id) + .unwrap() + .unwrap(); + let sn = SortitionDB::get_block_snapshot_consensus( + sortition_db.conn(), + &header.consensus_hash, + ) + .unwrap() + .unwrap(); + + let reward_cycle = sortition_db + .pox_constants + .block_height_to_reward_cycle(230, sn.block_height) + .unwrap(); + + let bitvec_bools = invgen + .make_tenure_bitvector( + &tip, + &sortition_db, + &chainstate, + &header.consensus_hash, + &header.anchored_header.block_hash(), + reward_cycle, + ) + .unwrap(); + let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools) + .map_err(|e| { + warn!("Failed to create a NakamotoInv response: {:?}", &e); + e + }) + .unwrap(); + + println!("{}: {:?}", tip_block_id, &nakamoto_inv); + } + process::exit(0); + } + if argv[1] == "replay-chainstate" { if argv.len() < 7 { eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); From d70c223eff2632cc50e3a52e1935935d030717f3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 27 Sep 2024 15:24:17 -0400 Subject: [PATCH 1239/1400] fix: when advancing the tip height, invalidate all cached inventory state between (and including) the new and old tips, since cached data from the old tip will have stored a negative cache result that ought now to be positive in the context of the new tip. --- stackslib/src/net/inv/nakamoto.rs | 66 +++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 3b7a5050ba..08cd795ad2 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -17,7 +17,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use stacks_common::bitvec::BitVec; -use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::StacksEpochId; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; @@ -155,16 +155,34 @@ impl InvGenerator { &self, chainstate: &StacksChainState, tip_block_id: &StacksBlockId, - ) -> Result, NetError> { + ) -> Result)>, NetError> { let mut cursor = tip_block_id.clone(); + let mut chs = vec![]; + let Some(ch) = + NakamotoChainState::get_block_header_nakamoto_tenure_id(chainstate.db(), &cursor)? + else { + return Ok(None); + }; + chs.push(ch); for _ in 0..self.tip_ancestor_search_depth { let parent_id_opt = NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor)?; + let Some(parent_id) = parent_id_opt else { return Ok(None); }; + + let Some(parent_ch) = NakamotoChainState::get_block_header_nakamoto_tenure_id( + chainstate.db(), + &parent_id, + )? + else { + return Ok(None); + }; + chs.push(parent_ch); + if self.processed_tenures.contains_key(&parent_id) { - return Ok(Some(parent_id)); + return Ok(Some((parent_id, chs))); } cursor = parent_id; } @@ -188,32 +206,40 @@ impl InvGenerator { pub(crate) fn get_processed_tenure( &mut self, chainstate: &StacksChainState, - tip_block_id: &StacksBlockId, + tip_block_ch: &ConsensusHash, + tip_block_bh: &BlockHeaderHash, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, NetError> { - if self.processed_tenures.get(tip_block_id).is_none() { + let tip_block_id = StacksBlockId::new(tip_block_ch, tip_block_bh); + if self.processed_tenures.get(&tip_block_id).is_none() { // this tip has no known table. // does it have an ancestor with a table? If so, then move its ancestor's table to this // tip. Otherwise, make a new table. - if let Some(ancestor_tip_id) = - self.find_ancestor_processed_tenures(chainstate, tip_block_id)? + if let Some((ancestor_tip_id, intermediate_tenures)) = + self.find_ancestor_processed_tenures(chainstate, &tip_block_id)? { - let ancestor_tenures = self + let mut ancestor_tenures = self .processed_tenures .remove(&ancestor_tip_id) .unwrap_or_else(|| { panic!("FATAL: did not have ancestor tip reported by search"); }); + for ch in intermediate_tenures.into_iter() { + ancestor_tenures.remove(&ch); + } + ancestor_tenures.remove(tip_block_ch); + self.processed_tenures .insert(tip_block_id.clone(), ancestor_tenures); } else { self.processed_tenures .insert(tip_block_id.clone(), HashMap::new()); } + } else { } - let Some(tenure_infos) = self.processed_tenures.get_mut(tip_block_id) else { + let Some(tenure_infos) = self.processed_tenures.get_mut(&tip_block_id) else { unreachable!("FATAL: inserted table for chain tip, but didn't get it back"); }; @@ -224,9 +250,9 @@ impl InvGenerator { } else { // we have not loaded the tenure info for this tip, so go get it let loaded_info_opt = - InvTenureInfo::load(chainstate, tip_block_id, &tenure_id_consensus_hash)?; - tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); + InvTenureInfo::load(chainstate, &tip_block_id, &tenure_id_consensus_hash)?; + tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); self.cache_misses = self.cache_misses.saturating_add(1); return Ok(loaded_info_opt); } @@ -269,9 +295,11 @@ impl InvGenerator { tip: &BlockSnapshot, sortdb: &SortitionDB, chainstate: &StacksChainState, - nakamoto_tip: &StacksBlockId, + nakamoto_tip_ch: &ConsensusHash, + nakamoto_tip_bh: &BlockHeaderHash, reward_cycle: u64, ) -> Result, NetError> { + let nakamoto_tip = StacksBlockId::new(nakamoto_tip_ch, nakamoto_tip_bh); let ih = sortdb.index_handle(&tip.sortition_id); // N.B. reward_cycle_to_block_height starts at reward index 1 @@ -290,8 +318,12 @@ impl InvGenerator { let mut cur_height = reward_cycle_end_tip.block_height; let mut cur_consensus_hash = reward_cycle_end_tip.consensus_hash; - let mut cur_tenure_opt = - self.get_processed_tenure(chainstate, &nakamoto_tip, &cur_consensus_hash)?; + let mut cur_tenure_opt = self.get_processed_tenure( + chainstate, + nakamoto_tip_ch, + nakamoto_tip_bh, + &cur_consensus_hash, + )?; // loop variables and invariants: // @@ -342,7 +374,8 @@ impl InvGenerator { tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, - &nakamoto_tip, + nakamoto_tip_ch, + nakamoto_tip_bh, &cur_tenure_info.parent_tenure_id_consensus_hash, )?; } else { @@ -363,7 +396,8 @@ impl InvGenerator { tenure_status.push(false); cur_tenure_opt = self.get_processed_tenure( chainstate, - &nakamoto_tip, + nakamoto_tip_ch, + nakamoto_tip_bh, &parent_sortition_consensus_hash, )?; } From 61713ca932aa44bba35c81d72ad7255bf2763372 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 27 Sep 2024 15:26:59 -0400 Subject: [PATCH 1240/1400] chore: API sync --- stackslib/src/net/chat.rs | 3 +- stackslib/src/net/tests/inv/nakamoto.rs | 260 +++++++++++++++++++++--- 2 files changed, 230 insertions(+), 33 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index ba0b70b1a5..1cb9c76dbe 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -1744,7 +1744,8 @@ impl ConversationP2P { &tip, sortdb, chainstate, - &network.stacks_tip.block_id(), + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, reward_cycle, )?; let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).map_err(|e| { diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 2f60027207..951ddefe72 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -172,7 +172,8 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let stacks_tip = peer.network.stacks_tip.block_id(); + let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); @@ -183,7 +184,14 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { // check the reward cycles for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) .unwrap(); debug!( "At reward cycle {}: {:?}, mesasge = {:?}", @@ -234,7 +242,8 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let stacks_tip = peer.network.stacks_tip.block_id(); + let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); @@ -244,7 +253,14 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) .unwrap(); debug!( "At reward cycle {}: {:?}, mesasge = {:?}", @@ -287,7 +303,8 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_mut().unwrap(); - let stacks_tip = peer.network.stacks_tip.block_id(); + let stacks_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); @@ -297,7 +314,14 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { for (rc, inv) in reward_cycle_invs.into_iter().enumerate() { let bitvec = inv_generator - .make_tenure_bitvector(&tip, sort_db, chainstate, &stacks_tip, rc as u64) + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) .unwrap(); debug!("At reward cycle {}: {:?}", rc, &bitvec); @@ -1119,9 +1143,13 @@ fn test_nakamoto_make_tenure_inv_in_forks() { .block_height_to_reward_cycle(first_burn_block_height, sort_tip.block_height) .unwrap(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let naka_tip = peer.network.stacks_tip.block_id(); let first_naka_tip = naka_tip.clone(); let first_sort_tip = sort_tip.clone(); + let first_naka_tip_ch = naka_tip_ch.clone(); + let first_naka_tip_bh = naka_tip_bh.clone(); // find the first block in this tenure let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) @@ -1143,7 +1171,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { assert_eq!(invgen.cache_misses(), 0); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1152,7 +1187,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { assert_eq!(invgen.cache_misses(), 3); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1161,7 +1203,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { assert_eq!(invgen.cache_misses(), 3); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1173,7 +1222,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { assert_eq!(invgen.cache_misses(), 13); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1202,6 +1258,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.refresh_burnchain_view(); let naka_tip = peer.network.stacks_tip.block_id(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let tip_rc = sortdb .pox_constants @@ -1209,7 +1267,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { .unwrap(); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1228,6 +1293,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.refresh_burnchain_view(); let naka_tip = peer.network.stacks_tip.block_id(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); // // ---------------------- the inv generator can track multiple forks at once ---------------------- @@ -1255,7 +1322,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // load inv off of the canonical tip. // It should show a missed sortition. let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!( "test: Bits in fork on {} at rc {}: {:?}", @@ -1276,7 +1350,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { &sort_tip, &sortdb, &chainstate, - &fork_naka_block.block_id(), + &fork_naka_block.header.consensus_hash, + &fork_naka_block.header.block_hash(), tip_rc, ) .unwrap(); @@ -1306,6 +1381,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.refresh_burnchain_view(); let new_naka_tip = peer.network.stacks_tip.block_id(); + let new_naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let new_naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let tip_rc = sortdb .pox_constants @@ -1319,7 +1396,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // It should show two missed sortitions, for each fork. // only one additional cache miss let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!( "test: Bits in fork on {} at rc {}: {:?}", @@ -1341,7 +1425,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { &sort_tip, &sortdb, &chainstate, - &fork_naka_block.block_id(), + &fork_naka_block.header.consensus_hash, + &fork_naka_block.header.block_hash(), tip_rc, ) .unwrap(); @@ -1363,7 +1448,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // load inv off of the canonical tip again. // It should show two missed sortitions. let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!( "test: Bits in fork on {} at rc {}: {:?}", @@ -1397,6 +1489,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.mine_nakamoto_on(vec![naka_block.clone()]); } let naka_tip = peer.network.stacks_tip.block_id(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); // new inv generator with a search depth of 3 @@ -1408,7 +1502,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { &first_sort_tip, &sortdb, &chainstate, - &first_naka_tip, + &first_naka_tip_ch, + &first_naka_tip_bh, tip_rc, ) .unwrap(); @@ -1417,7 +1512,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // load a descendant that is 6 blocks higher let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); assert_eq!( bits, @@ -1436,7 +1538,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { &first_sort_tip, &sortdb, &chainstate, - &first_naka_tip, + &first_naka_tip_ch, + &first_naka_tip_bh, tip_rc, ) .unwrap(); @@ -1445,7 +1548,14 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // load a descendant that is 6 blocks higher let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); assert_eq!( bits, @@ -1508,6 +1618,8 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { .unwrap(); let naka_tip = peer.network.stacks_tip.block_id(); + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); let first_naka_tip = naka_tip.clone(); let first_sort_tip = sort_tip.clone(); @@ -1531,7 +1643,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 0); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1540,7 +1659,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 3); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1552,7 +1678,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 13); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 2) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 2, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1564,7 +1697,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 17); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 3) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 3, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1576,7 +1716,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 23); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 4) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 4, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1588,7 +1735,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 27); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 5) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 5, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1601,7 +1755,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { // load them all again. cache misses should remain the same. let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1610,7 +1771,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 1) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1622,7 +1790,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 2) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 2, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1634,7 +1809,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 3) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 3, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1646,7 +1828,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 4) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 4, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1658,7 +1847,14 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { assert_eq!(invgen.cache_misses(), 37); let bits = invgen - .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &naka_tip, tip_rc - 5) + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 5, + ) .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); From 95985651f33c3996c3be337570d1b20749ae5772 Mon Sep 17 00:00:00 2001 From: ASuciuX Date: Mon, 30 Sep 2024 15:30:30 +0300 Subject: [PATCH 1241/1400] add Nakamoto block heights to info logs for easier integration with log analysis tools --- stacks-signer/src/chainstate.rs | 5 +++++ stacks-signer/src/v0/signer.rs | 2 ++ 2 files changed, 7 insertions(+) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4bbb9741a5..2f9249ae89 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -194,6 +194,7 @@ impl SortitionsView { { info!( "Current miner timed out, marking as invalid."; + "block_height" => block.header.chain_length, "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, ); self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -202,6 +203,7 @@ impl SortitionsView { if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { info!( "Last miner timed out, marking as invalid."; + "block_height" => block.header.chain_length, "last_sortition_consensus_hash" => ?last_sortition.consensus_hash, ); last_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -347,6 +349,7 @@ impl SortitionsView { "sortition_state.consensus_hash" => %sortition_state.consensus_hash, "sortition_state.prior_sortition" => %sortition_state.prior_sortition, "sortition_state.parent_tenure_id" => %sortition_state.parent_tenure_id, + "block_height" => block.header.chain_length, ); let tenures_reorged = client.get_tenure_forking_info( @@ -406,6 +409,7 @@ impl SortitionsView { "Miner is not building off of most recent tenure. A tenure they reorg has already mined blocks, but the block was poorly timed, allowing the reorg."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_height" => block.header.chain_length, "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -578,6 +582,7 @@ impl SortitionsView { "Have no accepted blocks in the tenure, assuming block confirmation is correct"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_height" => block.header.chain_length, ); return Ok(true); }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fa34cc4b42..510ae26e39 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -197,6 +197,7 @@ impl SignerTrait for Signer { info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), + "block_height" => b.header.chain_length, "signer_sighash" => %b.header.signer_signature_hash(), ); loop { @@ -408,6 +409,7 @@ impl Signer { "{self}: received a block proposal for a new block. Submit block for validation. "; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, ); crate::monitoring::increment_block_proposals_received(); From 5327a59431359294fdbe142a330f9324c2fca893 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 30 Sep 2024 09:38:59 -0500 Subject: [PATCH 1242/1400] chore: fix merge artifacts --- stackslib/src/net/relay.rs | 2 +- stackslib/src/net/stackerdb/sync.rs | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 9f8bbdb011..575e96138e 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -931,7 +931,7 @@ impl Relayer { &block.header.consensus_hash, &block.header.block_hash(), &obtained_method; - "block_id" => &block.header.block_id(), + "block_id" => %block.header.block_id(), ); if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 6a46c19656..08e6e978ea 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -568,7 +568,6 @@ impl StackerDBSync { old_slot_id, old_version, new_inv.slot_versions[old_slot_id], - &self.smart_contract_id, ); resync = true; break; @@ -913,7 +912,6 @@ impl StackerDBSync { network.get_local_peer(), &self.smart_contract_id, &naddr, - &self.smart_contract_id, data.error_code ); if data.error_code == NackErrorCodes::StaleView @@ -1075,7 +1073,6 @@ impl StackerDBSync { network.get_local_peer(), &self.smart_contract_id, &naddr, - &self.smart_contract_id, data.error_code ); if data.error_code == NackErrorCodes::StaleView From cc326c4b4f5df74a23c2883bf7f759f4d9a1084a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 30 Sep 2024 11:10:30 -0400 Subject: [PATCH 1243/1400] chore: expand test coverage to verify that caching behavior is consistent with no-caching behavior --- stackslib/src/net/inv/nakamoto.rs | 25 +- stackslib/src/net/tests/inv/nakamoto.rs | 539 +++++++++++++++++++++++- 2 files changed, 549 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 08cd795ad2..5a1fdc410d 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -113,6 +113,8 @@ pub struct InvGenerator { tip_ancestor_search_depth: u64, /// count cache misses for `processed_tenures` cache_misses: u128, + /// Disable caching (test only) + no_cache: bool, } impl InvGenerator { @@ -122,6 +124,18 @@ impl InvGenerator { sortitions: HashMap::new(), tip_ancestor_search_depth: TIP_ANCESTOR_SEARCH_DEPTH, cache_misses: 0, + no_cache: false, + } + } + + #[cfg(test)] + pub fn new_no_cache() -> Self { + Self { + processed_tenures: HashMap::new(), + sortitions: HashMap::new(), + tip_ancestor_search_depth: TIP_ANCESTOR_SEARCH_DEPTH, + cache_misses: 0, + no_cache: true, } } @@ -236,7 +250,6 @@ impl InvGenerator { self.processed_tenures .insert(tip_block_id.clone(), HashMap::new()); } - } else { } let Some(tenure_infos) = self.processed_tenures.get_mut(&tip_block_id) else { @@ -244,9 +257,9 @@ impl InvGenerator { }; // this tip has a known table - if let Some(loaded_tenure_info) = tenure_infos.get(tenure_id_consensus_hash) { + let ret = if let Some(loaded_tenure_info) = tenure_infos.get(tenure_id_consensus_hash) { // we've loaded this tenure info before for this tip - return Ok(loaded_tenure_info.clone()); + Ok(loaded_tenure_info.clone()) } else { // we have not loaded the tenure info for this tip, so go get it let loaded_info_opt = @@ -254,8 +267,12 @@ impl InvGenerator { tenure_infos.insert(tenure_id_consensus_hash.clone(), loaded_info_opt.clone()); self.cache_misses = self.cache_misses.saturating_add(1); - return Ok(loaded_info_opt); + Ok(loaded_info_opt) + }; + if self.no_cache { + self.processed_tenures.clear(); } + ret } /// Get sortition info, loading it from our cache if needed diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 951ddefe72..fac9623d3f 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -176,6 +176,7 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); + let mut inv_generator_no_cache = InvGenerator::new_no_cache(); // processed 10 tenures let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -193,6 +194,20 @@ fn test_nakamoto_inv_10_tenures_10_sortitions() { rc as u64, ) .unwrap(); + + let bitvec_no_cache = inv_generator_no_cache + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) + .unwrap(); + + assert_eq!(bitvec, bitvec_no_cache); + debug!( "At reward cycle {}: {:?}, mesasge = {:?}", rc, &bitvec, &inv @@ -246,6 +261,7 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); + let mut inv_generator_no_cache = InvGenerator::new_no_cache(); // processed 3 sortitions let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -262,6 +278,19 @@ fn test_nakamoto_inv_2_tenures_3_sortitions() { rc as u64, ) .unwrap(); + + let bitvec_no_cache = inv_generator_no_cache + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) + .unwrap(); + assert_eq!(bitvec, bitvec_no_cache); + debug!( "At reward cycle {}: {:?}, mesasge = {:?}", rc, &bitvec, &inv @@ -307,6 +336,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { let stacks_tip_bh = peer.network.stacks_tip.block_hash.clone(); let mut inv_generator = InvGenerator::new(); + let mut inv_generator_no_cache = InvGenerator::new_no_cache(); // processed 10 tenures let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -323,6 +353,18 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { rc as u64, ) .unwrap(); + let bitvec_no_cache = inv_generator_no_cache + .make_tenure_bitvector( + &tip, + sort_db, + chainstate, + &stacks_tip_ch, + &stacks_tip_bh, + rc as u64, + ) + .unwrap(); + assert_eq!(bitvec, bitvec_no_cache); + debug!("At reward cycle {}: {:?}", rc, &bitvec); if rc <= 6 { @@ -1127,6 +1169,7 @@ fn test_nakamoto_make_tenure_inv_in_forks() { peer.mine_malleablized_blocks = false; let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); // // ---------------------- basic operations ---------------------- @@ -1180,6 +1223,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1196,6 +1251,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1231,6 +1298,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc - 1, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); @@ -1279,9 +1358,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); - // only one additional cache miss expected_bits.push(true); - expected_cache_misses += 1; + expected_cache_misses += 2; assert_eq!(bits, expected_bits); assert_eq!(invgen.cache_misses(), expected_cache_misses); @@ -1331,6 +1409,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &naka_tip, tip_rc, &bits @@ -1341,7 +1431,7 @@ fn test_nakamoto_make_tenure_inv_in_forks() { ); assert_eq!(bits, [true, true, true, true, true, false]); - assert_eq!(invgen.cache_misses(), 17); + assert_eq!(invgen.cache_misses(), 20); // load inv off of the non-canonical tip. // it should show the last 3 canonical tenures as missing, and this forked block as present @@ -1355,6 +1445,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &fork_naka_block.header.consensus_hash, + &fork_naka_block.header.block_hash(), + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &fork_naka_block.block_id(), @@ -1367,7 +1469,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { ); assert_eq!(bits, [true, true, false, false, false, true]); - assert_eq!(invgen.cache_misses(), 21); + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 24); // add more to the fork peer.mine_nakamoto_on(vec![fork_naka_block.clone()]); @@ -1405,6 +1508,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &naka_tip, tip_rc, &bits @@ -1415,11 +1530,13 @@ fn test_nakamoto_make_tenure_inv_in_forks() { ); assert_eq!(bits, [true, true, true, true, true, false, false]); - assert_eq!(invgen.cache_misses(), 22); + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 25); // load inv off of the non-canonical tip again. // it should show the last 3 last canonical tenures as missing, and this forked block as - // present. Only one additional cache miss should manifest. + // present. Two additional cache misses should manifest, since we invalidate the common + // parent's tenure data. let bits = invgen .make_tenure_bitvector( &sort_tip, @@ -1430,6 +1547,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &fork_naka_block.header.consensus_hash, + &fork_naka_block.header.block_hash(), + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &fork_naka_block.block_id(), @@ -1443,7 +1572,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // only one more cache miss assert_eq!(bits, [true, true, false, false, false, true, true]); - assert_eq!(invgen.cache_misses(), 23); + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 27); // load inv off of the canonical tip again. // It should show two missed sortitions. @@ -1457,6 +1587,18 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + debug!( "test: Bits in fork on {} at rc {}: {:?}", &naka_tip, tip_rc, &bits @@ -1468,7 +1610,8 @@ fn test_nakamoto_make_tenure_inv_in_forks() { // no new cache misses assert_eq!(bits, [true, true, true, true, true, false, false]); - assert_eq!(invgen.cache_misses(), 23); + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 27); // // ---------------------- the inv generator will search only a maximum depth before giving up ---------------------- @@ -1507,7 +1650,20 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &first_sort_tip, + &sortdb, + &chainstate, + &first_naka_tip_ch, + &first_naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + assert_eq!(bits, [true, true]); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 3); // load a descendant that is 6 blocks higher @@ -1521,12 +1677,25 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + assert_eq!( bits, [true, true, true, true, true, false, false, true, true, true] ); // all 10 tenures were loaded, because we had to search more than 5 blocks back + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 12); // new inv generator with a search depth of 10 @@ -1543,7 +1712,20 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &first_sort_tip, + &sortdb, + &chainstate, + &first_naka_tip_ch, + &first_naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + assert_eq!(bits, [true, true]); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 3); // load a descendant that is 6 blocks higher @@ -1557,13 +1739,26 @@ fn test_nakamoto_make_tenure_inv_in_forks() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); + assert_eq!(bits, bits_no_cache); + assert_eq!( bits, [true, true, true, true, true, false, false, true, true, true] ); - // reused old canonical tip information - assert_eq!(invgen.cache_misses(), 9); + // reused old canonical tip information, but still had an additional cache miss from the parent + debug!("cache misses = {}", invgen.cache_misses()); + assert_eq!(invgen.cache_misses(), 10); } #[test] @@ -1576,7 +1771,7 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { let bitvecs = vec![ // full rc vec![true, true, true, true, true, true, true, true, true, true], - // sparce rc + // sparse rc vec![ true, false, false, false, false, false, false, true, true, true, ], @@ -1605,6 +1800,7 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { peer.mine_malleablized_blocks = false; let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); let sortdb = peer.sortdb_ref().reopen().unwrap(); let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); @@ -1652,10 +1848,22 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!(bits, [true, true]); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 3); let bits = invgen @@ -1668,13 +1876,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 1, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, true, true, true, true, true, true, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 13); let bits = invgen @@ -1687,13 +1907,26 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 2, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 2, + ) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, false, false, false, false, false, false, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 17); let bits = invgen @@ -1706,9 +1939,21 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 3, ) .unwrap(); + + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 3, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, false, true, false, true, false, true, false, true] @@ -1725,13 +1970,27 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 4, ) .unwrap(); + + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 4, + ) + .unwrap(); + debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, true, false, false, false, false, false, false, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 27); let bits = invgen @@ -1744,13 +2003,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 5, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 5, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [false, false, true, true, true, true, true, true, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); // load them all again. cache misses should remain the same. @@ -1764,10 +2035,22 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!(bits, [true, true]); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1780,13 +2063,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 1, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 1, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 1, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, true, true, true, true, true, true, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1799,13 +2094,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 2, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 2, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 2, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, false, false, false, false, false, false, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1818,13 +2125,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 3, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 3, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 3, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, false, true, false, true, false, true, false, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1837,13 +2156,25 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 4, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 4, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 4, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [true, true, true, false, false, false, false, false, false, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); let bits = invgen @@ -1856,12 +2187,198 @@ fn test_nakamoto_make_tenure_inv_in_many_reward_cycles() { tip_rc - 5, ) .unwrap(); + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector( + &sort_tip, + &sortdb, + &chainstate, + &naka_tip_ch, + &naka_tip_bh, + tip_rc - 5, + ) + .unwrap(); debug!("test: Bits at rc {}: {:?}", tip_rc - 5, &bits); debug!("test: invgen.cache_misses() = {}", invgen.cache_misses()); + assert_eq!(bits, bits_no_cache); assert_eq!( bits, [false, false, true, true, true, true, true, true, true, true] ); + debug!("cache misses = {}", invgen.cache_misses()); assert_eq!(invgen.cache_misses(), 37); } + +#[test] +fn test_nakamoto_make_tenure_inv_from_old_tips() { + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + // item 0 is sortition 42 + vec![true, true, true, true, true, true, true, true, true, true], + // sparse rc + // item 0 is sortition 52 + vec![ + true, false, false, false, false, false, false, true, true, true, + ], + // alternating rc + // item 0 is sortition 62 + vec![ + false, true, false, true, false, true, false, true, true, true, + ], + // sparse rc + // item 0 is sortition 72 + vec![ + false, false, false, false, false, false, true, true, true, true, + ], + // full rc + // item 0 is sortition 82 + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + // compute the rc-aligned bitvecs. + // bitvecs[i][0] starts at reward cycle index 2. + // aligned_bitvecs[i][0] starts at reward cycle index 0. + let mut aligned_bitvecs = vec![vec![false, false]]; + let mut i = 2; + loop { + let bitvec_idx = (i - 2) / 10; + let bitvec_bit = (i - 2) % 10; + if bitvec_idx >= bitvecs.len() { + if let Some(ref mut last_bitvec) = aligned_bitvecs.last_mut() { + // last aligned bitvec has all `false`s + while last_bitvec.len() < 10 { + last_bitvec.push(false); + } + } + break; + } + + let aligned_bitvec_idx = i / 10; + let aligned_bitvec_bit = i % 10; + if aligned_bitvec_bit == 0 { + aligned_bitvecs.push(vec![]); + } + + let bit = bitvecs[bitvec_idx][bitvec_bit]; + aligned_bitvecs[aligned_bitvec_idx].push(bit); + + i += 1; + } + + assert_eq!( + aligned_bitvecs[0], + vec![false, false, true, true, true, true, true, true, true, true] + ); + assert_eq!( + aligned_bitvecs[1], + vec![true, true, true, false, false, false, false, false, false, true] + ); + assert_eq!( + aligned_bitvecs[2], + vec![true, true, false, true, false, true, false, true, false, true] + ); + assert_eq!( + aligned_bitvecs[3], + vec![true, true, false, false, false, false, false, false, true, true] + ); + assert_eq!( + aligned_bitvecs[4], + vec![true, true, true, true, true, true, true, true, true, true] + ); + assert_eq!( + aligned_bitvecs[5], + vec![true, true, false, false, false, false, false, false, false, false] + ); + + let (mut peer, _) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 0, + initial_balances, + ); + peer.refresh_burnchain_view(); + peer.mine_malleablized_blocks = false; + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut invgen = InvGenerator::new().with_tip_ancestor_search_depth(5); + let mut invgen_no_cache = InvGenerator::new_no_cache().with_tip_ancestor_search_depth(5); + + // + // ---------------------- querying each tip will report the successive inv bits ---------------------- + // + let naka_tip = peer.network.stacks_tip.block_id(); + let mut ancestor_tips = vec![]; + let mut cursor = naka_tip.clone(); + loop { + ancestor_tips.push(cursor.clone()); + let Some(parent) = + NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor).unwrap() + else { + break; + }; + cursor = parent; + } + // last item is an epoch2 block, which we don't care about + ancestor_tips.pop(); + ancestor_tips.reverse(); + + for tip in ancestor_tips.into_iter() { + debug!("load tip {}", &tip); + let hdr = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &tip) + .unwrap() + .unwrap(); + let tip_ch = hdr.consensus_hash; + let tip_bh = hdr.anchored_header.block_hash(); + let sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tip_ch) + .unwrap() + .unwrap(); + let rc = sortdb + .pox_constants + .block_height_to_reward_cycle(sortdb.first_block_height, sn.block_height) + .unwrap(); + let rc_start_height = sortdb + .pox_constants + .reward_cycle_to_block_height(sortdb.first_block_height, rc) + - 1; + let bits = invgen + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &tip_ch, &tip_bh, rc) + .unwrap(); + + let bits_no_cache = invgen_no_cache + .make_tenure_bitvector(&sort_tip, &sortdb, &chainstate, &tip_ch, &tip_bh, rc) + .unwrap(); + + debug!("tip {}: consensus_hash={}, burn_height={}, reward_cycle={}, bits={:?}, bits_no_cache={:?}", &tip, &tip_ch, sn.block_height, rc, &bits, &bits_no_cache); + assert_eq!(bits, bits_no_cache); + + // nakamoto starts at burn height 42, and has a reward cycle length of 10, so compute the range of bitvecs we need + assert_eq!(sortdb.pox_constants.reward_cycle_length, 10); + assert!(rc >= 4); + + let mut expected_bits = aligned_bitvecs[(rc - 4) as usize].clone(); + let from_bits = expected_bits.clone(); + + for i in (sn.block_height + 1 - rc_start_height)..10 { + expected_bits[i as usize] = false; + } + + let bit_len = bits.len(); + debug!( + "tip {}: from_bits={:?}, expected_bits={:?}, inv_bits={:?}, rc={}, block_height={}", + &tip, &from_bits, &expected_bits, &bits, rc, sn.block_height + ); + + assert_eq!(bits, expected_bits[0..bit_len]); + } +} From d805368126160a92a4f4fd0ca8046c963fd093d7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 30 Sep 2024 10:27:40 -0500 Subject: [PATCH 1244/1400] ci: "at least" means >=, not > in tests --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17b829557f..3312b45b3a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -8486,8 +8486,10 @@ fn mock_mining() { let mock_mining_blocks_end = follower_naka_mined_blocks.load(Ordering::SeqCst); let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; assert!( - blocks_mock_mined > tenure_count, - "Should have mock mined at least `tenure_count` nakamoto blocks" + blocks_mock_mined >= tenure_count, + "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {}. Expected = {}", + blocks_mock_mined, + tenure_count, ); // wait for follower to reach the chain tip From 8b36b8852010e450b0e173bb8891a36f5c6ab6c1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 30 Sep 2024 13:00:41 -0500 Subject: [PATCH 1245/1400] fix: need interim blocks in the nakamoto integration tests --- .github/workflows/bitcoin-tests.yml | 2 +- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 0eb676781e..cd867340ec 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -81,7 +81,7 @@ jobs: - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration - - tests::nakamoto_integrations::simple_neon_integration_with_flash_blocks_on_epoch_3 + - tests::nakamoto_integrations::flash_blocks_on_epoch_3 - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2bc0082fed..a7e7e8cbfe 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1718,7 +1718,7 @@ fn simple_neon_integration() { /// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 /// * The final chain tip is a nakamoto block -fn simple_neon_integration_with_flash_blocks_on_epoch_3() { +fn flash_blocks_on_epoch_3() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1726,7 +1726,7 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let prom_bind = format!("{}:{}", "127.0.0.1", 6000); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); From 9c89e46d39b3bf657888e713b81c5a91da7e3e5f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 30 Sep 2024 11:49:33 -0700 Subject: [PATCH 1246/1400] Do not wait for an exact number of block rejections Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index b7f39feba6..603525a323 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -744,7 +744,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>(); - Ok(block_rejections.len() == expected_signers.len()) + Ok(block_rejections.len() >= expected_signers.len()) }) } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4d55c4d0ce..50e942a81a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4080,6 +4080,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .lock() .unwrap() .replace(Vec::new()); + wait_for(short_timeout_secs, || { Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before && signer_test From 8a8c5b09408056b533649a558c9b52fb8d44dfce Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 30 Sep 2024 12:59:30 -0700 Subject: [PATCH 1247/1400] Addressing some PR comments/other improvements to language --- CONTRIBUTING.md | 4 +- docs/branching.md | 4 +- docs/ci-workflow.md | 66 ++----------------- docs/mining.md | 2 - docs/profiling.md | 4 +- docs/release-process.md | 7 +- .../conf/mainnet-follower-conf.toml | 1 - .../stacks-node/conf/mainnet-miner-conf.toml | 3 - .../conf/mainnet-mockminer-conf.toml | 1 - 9 files changed, 16 insertions(+), 76 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 53fcf8a168..8d6c3aabba 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -50,8 +50,8 @@ For an example of this process, see PRs - Any major changes should be added to the [CHANGELOG](CHANGELOG.md). - Mention any required documentation changes in the description of your pull request. -- If adding an RPC endpoint, add an entry for the new endpoint to the - OpenAPI spec `./docs/rpc/openapi.yaml`. +- If adding or updating an RPC endpoint, ensure the change is documented in the + OpenAPI spec: [`./docs/rpc/openapi.yaml`](./docs/rpc/openapi.yaml). - If your code adds or modifies any major features (struct, trait, test, module, function, etc.), each should be documented according to our [coding guidelines](#Coding-Guidelines). diff --git a/docs/branching.md b/docs/branching.md index 5b9a96b12a..04c1e6fd3d 100644 --- a/docs/branching.md +++ b/docs/branching.md @@ -1,12 +1,12 @@ # Git Branching -The following is a modified version of the gitflow branching strategy described in +The following is a slightly modified version of the gitflow branching strategy described in ## Main Branches - **master** - `master` is the main branch where the source code of HEAD always reflects a production-ready state. - **develop** - `develop` is the branch where the source code of HEAD always reflects a state with the latest delivered development changes for the next release. -- **next** - `next` may contain consensus-breaking changes. +- **next** - `next` may contain consensus-breaking changes for a future release. - **release/X.Y.Z.A.n** is the release branch. When the source code in the develop branch reaches a stable point and is ready to be released, a release branch is created as `release/X.Y.Z.A.n` (see [release-process.md](./release-process.md)). diff --git a/docs/ci-workflow.md b/docs/ci-workflow.md index 16d020985d..0b1ed2b170 100644 --- a/docs/ci-workflow.md +++ b/docs/ci-workflow.md @@ -4,12 +4,13 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor - Verifying code is formatted correctly - Integration tests +- Unit tests - [Mutation tests](https://en.wikipedia.org/wiki/Mutation_testing) - Creating releases - Building binary archives and calculating checksums - Publishing Docker images -1. Releases are only created when the [CI workflow](../.github/workflows/ci.yml) is triggered against a release branch (ex: `release/X.Y.Z.A.n`). +1. Releases are only created when the [CI workflow](../.github/workflows/ci.yml) is triggered against a release branch (ex: `release/X.Y.Z.A.n`, or `release/signer-X.Y.Z.A.n.x`). 2. [Caching](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows) is used to speed up testing - a cache is created based on the type of data (i.e. cargo) and the commit sha. Tests can be retried quickly since the cache will persist until the cleanup job is run or the cache is evicted. 3. [Nextest](https://nexte.st/) is used to run the tests from a cached build archive file (using commit sha as the cache key). @@ -20,7 +21,7 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor ## TL;DR - Pushing a new branch will not trigger a workflow -- An open/re-opened/synchronized PR will produce a docker image built from source on Debian with glibc with the following tags: +- A PR that is opened/re-opened/synchronized will produce an amd64 docker image built from source on Debian with glibc with the following tags: - `stacks-core:` - `stacks-core:` - An untagged build of any branch will produce a single image built from source on Debian with glibc: @@ -29,7 +30,7 @@ All releases are built via a Github Actions workflow named [`CI`](../.github/wor - Github Release of the branch with: - Binary archives for several architectures - Checksum file containing hashes for each archive - - Tag of the `release/X.Y.Z.A.n` version, in the format of: `X.Y.Z.A.n` + - Git tag of the `release/X.Y.Z.A.n` version, in the format of: `X.Y.Z.A.n` - Docker Debian images for several architectures tagged with: - `stacks-core:latest` - `stacks-core:X.Y.Z.A.n` @@ -93,6 +94,7 @@ ex: - `Atlas Tests`: Tests related to Atlas - `Bitcoin Tests`: Tests relating to burnchain operations - `Epoch Tests`: Tests related to epoch changes +- `P2P Tests`: Tests P2P operations - `Slow Tests`: These tests have been identified as taking longer than others. The threshold used is if a test takes longer than `10 minutes` to complete successfully (or even times out intermittently), it should be added here. - `Stacks Core Tests`: - `full-genesis`: Tests related to full genesis @@ -100,7 +102,7 @@ ex: ### Checking the result of multiple tests at once -You can use the [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action in order to check that multiple tests are successful in a workflow job. +The [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action may be used in order to check that multiple tests are successful in a workflow job. If any of the tests given to the action (JSON string of `needs` field) fails, the step that calls the action will also fail. If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. @@ -127,62 +129,6 @@ check-tests: summary_print: "true" ``` -## Triggering a workflow - -### Opening/Updating a PR - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker Image (Source)](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name and PR number as tags - - Creates the following images (where branch is named `feat/fix-something` and the PR is numbered `5446`): - - `stacks-core:feat-fix-something` - - `stacks-core:pr-5446` - ---- - -### Merging a branch to develop - -Once a PR is added to the merge queue, the target branch is merged into the source branch. -Then, the same workflows are triggered as in the [previous step](#openingupdating-a-pr). - ---- - -### Manually triggering CI workflow (any branch not named `release/X.Y.Z.A.n`) - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Docker Image (Source)](../.github/workflows/image-build-source.yml) is built from source on a debian distribution and pushed with the branch name as a tag - - Creates the following images: - - `stacks-core:` - ---- - -### Manually triggering CI workflow with tag on a release branch - -ex: running the [`CI`](../.github/workflows/ci.yml) on a branch named `release/X.Y.Z.A.n` - -- [Rust format](../.github/workflows/ci.yml) -- [Create Test Cache](../.github/workflows/create-cache.yml) -- [Stacks Core Tests](../.github/workflows/stacks-core-tests.yml) -- [Bitcoin Tests](../.github/workflows/bitcoin-tests.yml) -- [Atlas Tests](../.github/workflows/atlas-tests.yml) -- [Epoch Tests](../.github/workflows/epoch-tests.yml) -- [Slow Tests](../.github/workflows/slow-tests.yml) -- [Github release](../.github/workflows/github-release.yml) (with artifacts/checksum) is created using the manually input tag -- [Binaries built for specified architectures](../.github/workflows/create-source-binary.yml) - - Archive and checksum files will be uploaded to the versioned github release. -- [Docker Image (Binary)](../.github/workflows/image-build-binary.yml) - - Built from binaries on debian/alpine distributions and pushed with a verrsion and `latest` tags. - - Creates the following images: - - `stacks-core:X.Y.Z.A.n` - - `stacks-core:X.Y.Z.A.n-alpine` - - `stacks-core:latest` - - `stacks-core:latest-alpine` - ## Mutation Testing When a new Pull Request (PR) is submitted, this feature evaluates the quality of the tests added or modified in the PR. diff --git a/docs/mining.md b/docs/mining.md index 8b40eb8cc8..a2a914c998 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -9,8 +9,6 @@ you should make sure to add the following config fields to your [config file](.. miner = True # Bitcoin private key to spend seed = "YOUR PRIVATE KEY" -# Disable microblocks (ref: https://github.com/stacks-network/stacks-core/pull/4561 ) -mine_microblocks = false # Run as a mock-miner, to test mining without spending BTC. Needs miner=True. #mock_mining = True diff --git a/docs/profiling.md b/docs/profiling.md index 26d1c119ae..4b8343aae9 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -17,7 +17,7 @@ Validating the config file using `stacks-node check-config`: ``` $ cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml -INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) +INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (:, release build, linux [x86_64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` @@ -28,7 +28,7 @@ Enabling debug logging using environment variable `STACKS_LOG_DEBUG=1`: $ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml -DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoin.hiro.so"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } +DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("localhost"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("btcuser"), password: Some("btcpass"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` diff --git a/docs/release-process.md b/docs/release-process.md index 46b4bae621..b96d3d2beb 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -33,7 +33,7 @@ This repository uses a 5 part version number: ``` X.Y.Z.A.n -X major version - does not change in practice unless there’s another Stacks 2.0 type event +X major version - in practice, this does not change unless there’s another significant network update (e.g. a Stacks 3.0 type of event) Y increments on consensus-breaking changes Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) @@ -45,14 +45,14 @@ Optionally, an extra pre-release field may be appended to the version to specify ## Non-Consensus Breaking Release Process The release must be timed so that it does not interfere with a _prepare phase_. -The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2); to avoid interfering with the prepare phase, releases should happen at least 24 hours before the start of a new cycle. +The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/tools?tool=2); to avoid interfering with the prepare phase, all releases should happen at least 24 hours before the start of a new cycle. 1. Before creating the release, the _version number_ must be determined, where the factors that determine the version number are discussed in [Versioning](#versioning). - First determine whether there are any "non-consensus-breaking changes that require a fresh chainstate". - In other words, the database schema has changed, but an automatic migration was not implemented. - Determine whether this a feature release, as opposed to a hotfix or a patch. - - A new branch in the format `release/X.Y.Z.A.n` is created from the base branch `develop`. + - A new branch in the format `release/X.Y.Z.A.n(-rc[0-9])` is created from the base branch `develop`. 2. Enumerate PRs and/or issues that would _block_ the release. @@ -68,6 +68,7 @@ The timing of the next Stacking cycle can be found [here](https://stx.eco/dao/to - Create a chore branch from `release/X.Y.Z.A.n`, ex: `chore/X.Y.Z.A.n-changelog`. - Add summaries of all Pull Requests to the `Added`, `Changed` and `Fixed` sections. + - Pull requests merged into `develop` can be found [here](https://github.com/stacks-network/stacks-core/pulls?q=is%3Apr+is%3Aclosed+base%3Adevelop+sort%3Aupdated-desc). **Note**: GitHub does not allow sorting by _merge time_, so, when sorting by some proxy criterion, some care should be used to understand which PR's were _merged_ after the last release. diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index c309463389..291f333523 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -2,7 +2,6 @@ # working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" -bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" prometheus_bind = "0.0.0.0:9153" [burnchain] diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index 4d258b33f0..1ecfbc3508 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -2,7 +2,6 @@ # working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* rpc_bind = "127.0.0.1:20443" p2p_bind = "127.0.0.1:20444" -bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" prometheus_bind = "127.0.0.1:9153" seed = "" local_peer_seed = "" @@ -14,8 +13,6 @@ mode = "mainnet" peer_host = "127.0.0.1" username = "" password = "" -rpc_port = -peer_port = # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 # Amount (in sats) per byte - Used to calculate the transaction fees diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 8e966a8a0f..9d583d218b 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -4,7 +4,6 @@ rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" miner = true mock_mining = true -bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444,02539449ad94e6e6392d8c1deb2b4e61f80ae2a18964349bc14336d8b903c46a8c@cet.stacksnodes.org:20444,02ececc8ce79b8adf813f13a0255f8ae58d4357309ba0cedd523d9f1a306fcfb79@sgt.stacksnodes.org:20444,0303144ba518fe7a0fb56a8a7d488f950307a4330f146e1e1458fc63fb33defe96@est.stacksnodes.org:20444" prometheus_bind = "0.0.0.0:9153" [burnchain] From b461394c91b69c002d21924aa3b6ca320c87fd43 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 30 Sep 2024 16:18:39 -0400 Subject: [PATCH 1248/1400] chore: document cache maintenance --- stackslib/src/net/inv/nakamoto.rs | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 5a1fdc410d..d771848fec 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -163,7 +163,10 @@ impl InvGenerator { /// the maximum expected number of blocks to be processed in-between handling `GetNakamotoInv` /// messages. /// - /// If found, then return the ancestor block ID represented in `self.processed_tenures`. + /// If found, then return the ancestor block ID represented in `self.processed_tenures`, as + /// well as the list of any intermediate tenures between (and including) that of `tip_block_id` + /// and that of (and including) the highest-found ancestor. + /// /// If not, then return None. pub(crate) fn find_ancestor_processed_tenures( &self, @@ -232,6 +235,18 @@ impl InvGenerator { if let Some((ancestor_tip_id, intermediate_tenures)) = self.find_ancestor_processed_tenures(chainstate, &tip_block_id)? { + // The table removals here are for cache maintenance. + // + // Between successive calls to this function, the Stacks tip (identified by + // `tip_block_ch` and `tip_block_bh) can advance as more blocks are discovered. + // This means that tenures that had previously been treated as absent could now be + // present. By evicting cached data for all tenures between (and including) the + // highest ancestor of the current Stacks tip, and the current Stacks tip, we force + // this code to re-evaluate the presence or absence of each potentially-affected + // tenure. + // + // First, remove the highest ancestor's table, so we can re-assign it to the new + // tip. let mut ancestor_tenures = self .processed_tenures .remove(&ancestor_tip_id) @@ -239,11 +254,14 @@ impl InvGenerator { panic!("FATAL: did not have ancestor tip reported by search"); }); + // Clear out any intermediate cached results for tenure presence/absence, including + // both that of the highest ancestor and the current tip. for ch in intermediate_tenures.into_iter() { ancestor_tenures.remove(&ch); } ancestor_tenures.remove(tip_block_ch); + // Update the table so it is pointed to by the new tip. self.processed_tenures .insert(tip_block_id.clone(), ancestor_tenures); } else { @@ -256,12 +274,12 @@ impl InvGenerator { unreachable!("FATAL: inserted table for chain tip, but didn't get it back"); }; - // this tip has a known table let ret = if let Some(loaded_tenure_info) = tenure_infos.get(tenure_id_consensus_hash) { // we've loaded this tenure info before for this tip Ok(loaded_tenure_info.clone()) } else { - // we have not loaded the tenure info for this tip, so go get it + // we have not loaded the tenure info for this tip, or it was cleared via cache + // maintenance. Either way, got get it from disk. let loaded_info_opt = InvTenureInfo::load(chainstate, &tip_block_id, &tenure_id_consensus_hash)?; From fdcfcdf16d52fbc8dbd75282352b4b755719f9fe Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 30 Sep 2024 13:20:08 -0700 Subject: [PATCH 1249/1400] Add some logging to bitcoind test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 23 ++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 57222be981..28ddad97cf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1227,8 +1227,9 @@ fn bitcoind_forking_test() { let pre_epoch_3_nonce = get_account(&http_origin, &miner_address).nonce; let pre_fork_tenures = 10; - for _i in 0..pre_fork_tenures { - let _mined_block = signer_test.mine_nakamoto_block(Duration::from_secs(30)); + for i in 0..pre_fork_tenures { + debug!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); + signer_test.mine_nakamoto_block(Duration::from_secs(30)); } let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -1255,7 +1256,11 @@ fn bitcoind_forking_test() { thread::sleep(Duration::from_secs(15)); // we need to mine some blocks to get back to being considered a frequent miner - for _i in 0..3 { + for i in 0..3 { + debug!( + "Mining block {} of 3 to be considered a frequent miner", + i + 1 + ); let commits_count = signer_test .running_nodes .commits_submitted @@ -1278,7 +1283,8 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); - for _i in 0..5 { + for i in 0..5 { + debug!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } @@ -1310,7 +1316,11 @@ fn bitcoind_forking_test() { thread::sleep(Duration::from_secs(15)); // we need to mine some blocks to get back to being considered a frequent miner - for _i in 0..3 { + for i in 0..3 { + debug!( + "Mining block {} of 3 to be considered a frequent miner", + i + 1 + ); let commits_count = signer_test .running_nodes .commits_submitted @@ -1333,7 +1343,8 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_2_nonce, pre_fork_2_nonce - 4 * 2); - for _i in 0..5 { + for i in 0..5 { + debug!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } From 51d600031ec4980286762b3873e87fc5f12270e7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 30 Sep 2024 14:04:52 -0700 Subject: [PATCH 1250/1400] Fix microblocks disabled test to allow at least one rather than strictly one microblocks Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/epoch_25.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 42369b800a..2b2a9a640f 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -211,11 +211,12 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - info!( - "Microblocks assembled: {}", - test_observer::get_microblocks().len() + let microblocks_assembled = test_observer::get_microblocks().len(); + info!("Microblocks assembled: {microblocks_assembled}",); + assert!( + microblocks_assembled > 0, + "There should be at least 1 microblock assembled" ); - assert_eq!(test_observer::get_microblocks().len(), 1); let miner_nonce_before_microblock_assembly = get_account(&http_origin, &miner_account).nonce; @@ -244,8 +245,8 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - // but we should have assembled and announced at least 1 to the observer - assert!(test_observer::get_microblocks().len() >= 2); + // but we should have assembled and announced at least 1 more block to the observer + assert!(test_observer::get_microblocks().len() > microblocks_assembled); info!( "Microblocks assembled: {}", test_observer::get_microblocks().len() From 4c311bb2c9e1b5dde1e6f60534d2180a82ff14ff Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 30 Sep 2024 14:09:18 -0700 Subject: [PATCH 1251/1400] Convert logs to info in test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 28ddad97cf..f6edcc572a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1228,7 +1228,7 @@ fn bitcoind_forking_test() { let pre_fork_tenures = 10; for i in 0..pre_fork_tenures { - debug!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); + info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } @@ -1257,7 +1257,7 @@ fn bitcoind_forking_test() { // we need to mine some blocks to get back to being considered a frequent miner for i in 0..3 { - debug!( + info!( "Mining block {} of 3 to be considered a frequent miner", i + 1 ); @@ -1284,7 +1284,7 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); for i in 0..5 { - debug!("Mining post-fork tenure {} of 5", i + 1); + info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } @@ -1317,7 +1317,7 @@ fn bitcoind_forking_test() { // we need to mine some blocks to get back to being considered a frequent miner for i in 0..3 { - debug!( + info!( "Mining block {} of 3 to be considered a frequent miner", i + 1 ); @@ -1344,7 +1344,7 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_2_nonce, pre_fork_2_nonce - 4 * 2); for i in 0..5 { - debug!("Mining post-fork tenure {} of 5", i + 1); + info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } From b1f813b31cd041dd02a21aaf9568a09090098371 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 30 Sep 2024 20:54:54 -0700 Subject: [PATCH 1252/1400] Add language around stacks 3.0 type event --- stacks-signer/release-process.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/release-process.md b/stacks-signer/release-process.md index 9d3f2cb5e1..71d47a3e26 100644 --- a/stacks-signer/release-process.md +++ b/stacks-signer/release-process.md @@ -33,7 +33,7 @@ When there are changes in-between `stacks-core` releases, the `stacks-signer` bi ``` X.Y.Z.A.n.x -X major version - does not change in practice unless there’s another Stacks 2.0 type event +X major version - in practice, this does not change unless there’s another significant network update (e.g. a Stacks 3.0 type of event) Y increments on consensus-breaking changes Z increments on non-consensus-breaking changes that require a fresh chainstate (akin to semantic MAJOR) A increments on non-consensus-breaking changes that do not require a fresh chainstate, but introduce new features (akin to semantic MINOR) From c5880c651cb793151a5ddd05f21e4d3ee13757d6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 09:10:10 -0500 Subject: [PATCH 1253/1400] chore: remove infinite loop in signer during tests --- stacks-signer/src/v0/signer.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index d6eaa37af8..3fbeb9b809 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,6 +15,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; +use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ @@ -185,20 +186,13 @@ impl SignerTrait for Signer { ); } SignerMessage::BlockPushed(b) => { - let block_push_result = stacks_client.post_block(b); - if let Err(ref e) = &block_push_result { - warn!( - "{self}: Failed to post block {} (id {}): {e:?}", - &b.header.signer_signature_hash(), - &b.block_id() - ); - }; // This will infinitely loop until the block is acknowledged by the node info!( "{self}: Got block pushed message"; "block_id" => %b.block_id(), "signer_sighash" => %b.header.signer_signature_hash(), ); + let start_time = Instant::now(); loop { match stacks_client.post_block(b) { Ok(block_push_result) => { @@ -206,6 +200,11 @@ impl SignerTrait for Signer { break; } Err(e) => { + if cfg!(test) + && start_time.elapsed() > Duration::from_secs(30) + { + panic!("{self}: Timed out in test while pushing block to stacks node: {e}"); + } warn!("{self}: Failed to push block to stacks node: {e}. Retrying..."); } }; From c60f91c20f358c3593290b8719b07f22b1e62e1c Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 10:51:16 -0500 Subject: [PATCH 1254/1400] ci: wait for block commits pointed at the correct burn block --- .../burn/operations/leader_block_commit.rs | 6 +- stackslib/src/chainstate/stacks/mod.rs | 9 ++ .../burnchains/bitcoin_regtest_controller.rs | 2 +- .../src/tests/nakamoto_integrations.rs | 13 ++- testnet/stacks-node/src/tests/signer/v0.rs | 109 +++++++++++++++--- 5 files changed, 115 insertions(+), 24 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index cea03d4435..910315f082 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -48,14 +48,14 @@ use crate::net::Error as net_error; // return type from parse_data below #[derive(Debug)] -struct ParsedData { +pub struct ParsedData { block_header_hash: BlockHeaderHash, new_seed: VRFSeed, parent_block_ptr: u32, parent_vtxindex: u16, key_block_ptr: u32, key_vtxindex: u16, - burn_parent_modulus: u8, + pub burn_parent_modulus: u8, memo: u8, } @@ -201,7 +201,7 @@ impl LeaderBlockCommitOp { StacksBlockId(self.block_header_hash.0.clone()) } - fn parse_data(data: &Vec) -> Option { + pub fn parse_data(data: &[u8]) -> Option { /* Wire format: 0 2 3 35 67 71 73 77 79 80 diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 127751abbb..2ce250d991 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -36,6 +36,7 @@ use stacks_common::address::AddressHashMode; use stacks_common::codec::{ read_next, write_next, Error as codec_error, StacksMessageCodec, MAX_MESSAGE_LEN, }; +use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, StacksWorkScore, TrieHash, TRIEHASH_ENCODED_SIZE, @@ -385,6 +386,14 @@ impl Txid { pub fn from_sighash_bytes(txdata: &[u8]) -> Txid { Txid::from_stacks_tx(txdata) } + + /// Create a Txid from the tx hash bytes used in bitcoin. + /// This just reverses the inner bytes of the input. + pub fn from_bitcoin_tx_hash(tx_hash: &Sha256dHash) -> Txid { + let mut txid_bytes = tx_hash.0.clone(); + txid_bytes.reverse(); + Self(txid_bytes) + } } /// How a transaction may be appended to the Stacks blockchain diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 92390095a2..82282926d3 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -106,7 +106,7 @@ pub struct BitcoinRegtestController { #[derive(Clone)] pub struct OngoingBlockCommit { - payload: LeaderBlockCommitOp, + pub payload: LeaderBlockCommitOp, utxos: UTXOSet, fees: LeaderBlockCommitFees, txids: Vec, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 717e33c9c9..1f5d4491cc 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -592,11 +592,22 @@ pub fn next_block_and( ) -> Result<(), String> where F: FnMut() -> Result, +{ + next_block_and_controller(btc_controller, timeout_secs, |_| check()) +} + +pub fn next_block_and_controller( + btc_controller: &mut BitcoinRegtestController, + timeout_secs: u64, + mut check: F, +) -> Result<(), String> +where + F: FnMut(&mut BitcoinRegtestController) -> Result, { eprintln!("Issuing bitcoin block"); btc_controller.build_next_block(1); let start = Instant::now(); - while !check()? { + while !check(btc_controller)? { if start.elapsed() > Duration::from_secs(timeout_secs) { error!("Timed out waiting for block to process, trying to continue test"); return Err("Timed out".into()); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3f0813b024..0bcabcc658 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -27,7 +27,9 @@ use libsigner::v0::messages::{ }; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; use stacks::address::AddressHashMode; +use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::operations::LeaderBlockCommitOp; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; @@ -68,15 +70,16 @@ use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, setup_epoch_3_reward_set, - wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_controller, + setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, + POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; use crate::tests::{self, make_stacks_transfer}; -use crate::{nakamoto_node, BurnchainController, Config, Keychain}; +use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { /// Run the test until the first epoch 2.5 reward cycle. @@ -1221,6 +1224,33 @@ fn bitcoind_forking_test() { let miner_address = Keychain::default(conf.node.seed.clone()) .origin_address(conf.is_mainnet()) .unwrap(); + let miner_pk = signer_test + .running_nodes + .btc_regtest_controller + .get_mining_pubkey() + .as_deref() + .map(Secp256k1PublicKey::from_hex) + .unwrap() + .unwrap(); + + let get_unconfirmed_commit_data = |btc_controller: &mut BitcoinRegtestController| { + let unconfirmed_utxo = btc_controller + .get_all_utxos(&miner_pk) + .into_iter() + .find(|utxo| utxo.confirmations == 0)?; + let unconfirmed_txid = Txid::from_bitcoin_tx_hash(&unconfirmed_utxo.txid); + let unconfirmed_tx = btc_controller.get_raw_transaction(&unconfirmed_txid); + let unconfirmed_tx_opreturn_bytes = unconfirmed_tx.output[0].script_pubkey.as_bytes(); + info!( + "Unconfirmed tx bytes: {}", + stacks::util::hash::to_hex(unconfirmed_tx_opreturn_bytes) + ); + let data = LeaderBlockCommitOp::parse_data( + &unconfirmed_tx_opreturn_bytes[unconfirmed_tx_opreturn_bytes.len() - 77..], + ) + .unwrap(); + Some(data) + }; signer_test.boot_to_epoch_3(); info!("------------------------- Reached Epoch 3.0 -------------------------"); @@ -1252,23 +1282,43 @@ fn bitcoind_forking_test() { .build_next_block(1); info!("Wait for block off of shallow fork"); - thread::sleep(Duration::from_secs(15)); // we need to mine some blocks to get back to being considered a frequent miner - for _i in 0..3 { + for i in 0..3 { + let current_burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + info!( + "Mining block #{i} to be considered a frequent miner"; + "current_burn_height" => current_burn_height, + ); let commits_count = signer_test .running_nodes .commits_submitted .load(Ordering::SeqCst); - next_block_and( + next_block_and_controller( &mut signer_test.running_nodes.btc_regtest_controller, 60, - || { - Ok(signer_test + |btc_controller| { + let commits_submitted = signer_test .running_nodes .commits_submitted - .load(Ordering::SeqCst) - > commits_count) + .load(Ordering::SeqCst); + if commits_submitted <= commits_count { + // wait until a commit was submitted + return Ok(false) + } + let Some(payload) = get_unconfirmed_commit_data(btc_controller) else { + warn!("Commit submitted, but bitcoin doesn't see it in the unconfirmed UTXO set, will try to wait."); + return Ok(false) + }; + let burn_parent_modulus = payload.burn_parent_modulus; + let current_modulus = u8::try_from((current_burn_height + 1) % 5).unwrap(); + info!( + "Ongoing Commit Operation check"; + "burn_parent_modulus" => burn_parent_modulus, + "current_modulus" => current_modulus, + "payload" => ?payload, + ); + Ok(burn_parent_modulus == current_modulus) }, ) .unwrap(); @@ -1306,24 +1356,44 @@ fn bitcoind_forking_test() { .btc_regtest_controller .build_next_block(4); - info!("Wait for block off of shallow fork"); - thread::sleep(Duration::from_secs(15)); + info!("Wait for block off of deep fork"); // we need to mine some blocks to get back to being considered a frequent miner - for _i in 0..3 { + for i in 0..3 { + let current_burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + info!( + "Mining block #{i} to be considered a frequent miner"; + "current_burn_height" => current_burn_height, + ); let commits_count = signer_test .running_nodes .commits_submitted .load(Ordering::SeqCst); - next_block_and( + next_block_and_controller( &mut signer_test.running_nodes.btc_regtest_controller, 60, - || { - Ok(signer_test + |btc_controller| { + let commits_submitted = signer_test .running_nodes .commits_submitted - .load(Ordering::SeqCst) - > commits_count) + .load(Ordering::SeqCst); + if commits_submitted <= commits_count { + // wait until a commit was submitted + return Ok(false) + } + let Some(payload) = get_unconfirmed_commit_data(btc_controller) else { + warn!("Commit submitted, but bitcoin doesn't see it in the unconfirmed UTXO set, will try to wait."); + return Ok(false) + }; + let burn_parent_modulus = payload.burn_parent_modulus; + let current_modulus = u8::try_from((current_burn_height + 1) % 5).unwrap(); + info!( + "Ongoing Commit Operation check"; + "burn_parent_modulus" => burn_parent_modulus, + "current_modulus" => current_modulus, + "payload" => ?payload, + ); + Ok(burn_parent_modulus == current_modulus) }, ) .unwrap(); @@ -1333,7 +1403,8 @@ fn bitcoind_forking_test() { assert_eq!(post_fork_2_nonce, pre_fork_2_nonce - 4 * 2); - for _i in 0..5 { + for i in 0..5 { + info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30)); } From 836a97a3c176846a6a248363e6d614724ae8d0e4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 1 Oct 2024 09:28:55 -0700 Subject: [PATCH 1255/1400] CRC: remove dead code Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 2 -- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 0b43bef749..89a3eda2d5 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -78,8 +78,6 @@ pub struct StacksClient { #[derive(Deserialize)] struct GetStackersErrorResp { - #[allow(dead_code)] - err_type: String, err_msg: String, } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f6edcc572a..6484882dc2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1312,7 +1312,7 @@ fn bitcoind_forking_test() { .btc_regtest_controller .build_next_block(4); - info!("Wait for block off of shallow fork"); + info!("Wait for block off of deeper fork"); thread::sleep(Duration::from_secs(15)); // we need to mine some blocks to get back to being considered a frequent miner From 61eab903dfd8718998e54c4dc6a129004a80e808 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 1 Oct 2024 11:52:56 -0700 Subject: [PATCH 1256/1400] Change vec to hashset in wait_for_block_rejections Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 97e7941580..5961298f2e 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -617,7 +617,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest None, } }) - .collect::>(); - Ok(block_rejections.len() >= expected_signers.len()) + .collect::>(); + Ok(block_rejections.len() == expected_signers.len()) }) } } From 4752a906b9d2fe0af36a448fb8cdecd6dcb78b5d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 1 Oct 2024 12:08:53 -0700 Subject: [PATCH 1257/1400] Do not attempt to process a block validation response for an already globally processed block Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 11 ++++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 14 ++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index d6eaa37af8..3b6a2a9180 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -530,7 +530,16 @@ impl Signer { .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) { - Ok(Some(block_info)) => block_info, + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + return None; + } else { + block_info + } + } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6484882dc2..658d480ac6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4001,7 +4001,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 2; + let nmb_txs = 3; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let short_timeout_secs = 20; let mut signer_test: SignerTest = SignerTest::new( @@ -4057,7 +4057,11 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected - let rejecting_signers: Vec<_> = all_signers.iter().cloned().take(num_signers / 2).collect(); + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers / 2 + num_signers % 2) + .collect(); TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() @@ -4066,6 +4070,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let transfer_tx = make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} to mine block N+1"); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -4090,6 +4095,11 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(Vec::new()); + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} to mine block N+1'"); + wait_for(short_timeout_secs, || { Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before && signer_test From 4a8e81983861cf1ffd84370a96f3d19d120f9a09 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 14:21:30 -0500 Subject: [PATCH 1258/1400] ci: test my actions feature branch --- .github/workflows/bitcoin-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 0eb676781e..98dd19d3aa 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -154,7 +154,7 @@ jobs: - name: Run Tests id: run_tests timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} - uses: stacks-network/actions/stacks-core/run-tests@main + uses: stacks-network/actions/stacks-core/run-tests@feat/no-capture-2 with: test-name: ${{ matrix.test-name }} threads: 1 From 9eb4e05c79192d9f6f11495674fd0421a8754be5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 1 Oct 2024 17:05:49 -0400 Subject: [PATCH 1259/1400] test: remove `signer_vote_if_needed` This is not used in signer v0. Fixes `tests::nakamoto_integrations::continue_tenure_extend` --- .../src/tests/nakamoto_integrations.rs | 154 ------------------ 1 file changed, 154 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b884edc66d..fcfdc012cb 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1155,69 +1155,6 @@ pub fn is_key_set_for_cycle( Ok(key.is_some()) } -fn signer_vote_if_needed( - btc_regtest_controller: &BitcoinRegtestController, - naka_conf: &Config, - signer_sks: &[StacksPrivateKey], // TODO: Is there some way to get this from the TestSigners? - signers: &TestSigners, -) { - // When we reach the next prepare phase, submit new voting transactions - let block_height = btc_regtest_controller.get_headers_height(); - let reward_cycle = btc_regtest_controller - .get_burnchain() - .block_height_to_reward_cycle(block_height) - .unwrap(); - let prepare_phase_start = btc_regtest_controller - .get_burnchain() - .pox_constants - .prepare_phase_start( - btc_regtest_controller.get_burnchain().first_block_height, - reward_cycle, - ); - - if block_height >= prepare_phase_start { - // If the key is already set, do nothing. - if is_key_set_for_cycle( - reward_cycle + 1, - naka_conf.is_mainnet(), - &naka_conf.node.rpc_bind, - ) - .unwrap_or(false) - { - return; - } - - // If we are self-signing, then we need to vote on the aggregate public key - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - - // Get the aggregate key - let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) - .expect("Failed to serialize aggregate public key"); - - for (i, signer_sk) in signer_sks.iter().enumerate() { - let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce; - - // Vote on the aggregate public key - let voting_tx = tests::make_contract_call( - &signer_sk, - signer_nonce, - 300, - &StacksAddress::burn_address(false), - SIGNERS_VOTING_NAME, - "vote-for-aggregate-public-key", - &[ - clarity::vm::Value::UInt(i as u128), - aggregate_public_key.clone(), - clarity::vm::Value::UInt(0), - clarity::vm::Value::UInt(reward_cycle as u128 + 1), - ], - ); - submit_tx(&http_origin, &voting_tx); - } - } -} - pub fn setup_epoch_3_reward_set( naka_conf: &Config, blocks_processed: &Arc, @@ -1553,13 +1490,6 @@ fn simple_neon_integration() { &commits_submitted, ) .unwrap(); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } // Submit a TX @@ -1595,13 +1525,6 @@ fn simple_neon_integration() { &commits_submitted, ) .unwrap(); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -1805,13 +1728,6 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { &commits_submitted, ) .unwrap(); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } // Submit a TX @@ -1847,13 +1763,6 @@ fn simple_neon_integration_with_flash_blocks_on_epoch_3() { &commits_submitted, ) .unwrap(); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2580,13 +2489,6 @@ fn correct_burn_outs() { &naka_conf, ); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - run_until_burnchain_height( &mut btc_regtest_controller, &blocks_processed, @@ -2646,13 +2548,6 @@ fn correct_burn_outs() { tip_sn.block_height > prior_tip, "The new burnchain tip must have been processed" ); - - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); } coord_channel @@ -4752,13 +4647,6 @@ fn forked_tenure_is_ignored() { }) .unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - info!("Commit op is submitted; unpause Tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to @@ -6199,13 +6087,6 @@ fn signer_chainstate() { make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); submit_tx(&http_origin, &transfer_tx); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - let timer = Instant::now(); while proposals_submitted.load(Ordering::SeqCst) <= before { thread::sleep(Duration::from_millis(5)); @@ -6682,13 +6563,6 @@ fn continue_tenure_extend() { ) .unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() @@ -6708,13 +6582,6 @@ fn continue_tenure_extend() { next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() @@ -6756,13 +6623,6 @@ fn continue_tenure_extend() { next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() @@ -6779,13 +6639,6 @@ fn continue_tenure_extend() { next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() @@ -6811,13 +6664,6 @@ fn continue_tenure_extend() { }) .unwrap(); - signer_vote_if_needed( - &btc_regtest_controller, - &naka_conf, - &[sender_signer_sk], - &signers, - ); - wait_for(5, || { let blocks_processed = coord_channel .lock() From 70b082a1a9c7b0b75feaee97c4d6e7c9ca146eac Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 16:12:05 -0500 Subject: [PATCH 1260/1400] Revert "ci: test my actions feature branch" This reverts commit 4a8e81983861cf1ffd84370a96f3d19d120f9a09. --- .github/workflows/bitcoin-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 98dd19d3aa..0eb676781e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -154,7 +154,7 @@ jobs: - name: Run Tests id: run_tests timeout-minutes: ${{ fromJSON(env.TEST_TIMEOUT) }} - uses: stacks-network/actions/stacks-core/run-tests@feat/no-capture-2 + uses: stacks-network/actions/stacks-core/run-tests@main with: test-name: ${{ matrix.test-name }} threads: 1 From 41048b160fe5c7e922ace69c718af29febfaf5d1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 1 Oct 2024 19:57:23 -0500 Subject: [PATCH 1261/1400] loop forever in both post_block uses --- stacks-signer/src/client/stacks_client.rs | 25 +++++++++++++++++++ stacks-signer/src/v0/signer.rs | 29 ++--------------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index f415896e86..8cddee08dc 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::{HashMap, VecDeque}; +use std::fmt::Display; +use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; @@ -564,6 +566,29 @@ impl StacksClient { Ok(account_entry) } + /// Post a block to the stacks-node, retry forever on errors. + /// + /// In tests, this panics if the retry takes longer than 30 seconds. + pub fn post_block_until_ok(&self, log_fmt: &F, block: &NakamotoBlock) -> bool { + let start_time = Instant::now(); + loop { + match self.post_block(block) { + Ok(block_push_result) => { + debug!("{log_fmt}: Block pushed to stacks node: {block_push_result:?}"); + return block_push_result; + } + Err(e) => { + if cfg!(test) && start_time.elapsed() > Duration::from_secs(30) { + panic!( + "{log_fmt}: Timed out in test while pushing block to stacks node: {e}" + ); + } + warn!("{log_fmt}: Failed to push block to stacks node: {e}. Retrying..."); + } + }; + } + } + /// Try to post a completed nakamoto block to our connected stacks-node /// Returns `true` if the block was accepted or `false` if the block /// was rejected. diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 3fbeb9b809..29c39797cf 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,7 +15,6 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; -use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ @@ -192,23 +191,7 @@ impl SignerTrait for Signer { "block_id" => %b.block_id(), "signer_sighash" => %b.header.signer_signature_hash(), ); - let start_time = Instant::now(); - loop { - match stacks_client.post_block(b) { - Ok(block_push_result) => { - debug!("{self}: Block pushed to stacks node: {block_push_result:?}"); - break; - } - Err(e) => { - if cfg!(test) - && start_time.elapsed() > Duration::from_secs(30) - { - panic!("{self}: Timed out in test while pushing block to stacks node: {e}"); - } - warn!("{self}: Failed to push block to stacks node: {e}. Retrying..."); - } - }; - } + stacks_client.post_block_until_ok(self, &b); } SignerMessage::MockProposal(mock_proposal) => { let epoch = match stacks_client.get_node_epoch() { @@ -907,15 +890,7 @@ impl Signer { "{self}: Broadcasting Stacks block {} to node", &block.block_id() ); - if let Err(e) = stacks_client.post_block(&block) { - warn!( - "{self}: Failed to post block {block_hash}: {e:?}"; - "stacks_block_id" => %block.block_id(), - "parent_block_id" => %block.header.parent_block_id, - "burnchain_consensus_hash" => %block.header.consensus_hash - ); - return; - } + stacks_client.post_block_until_ok(self, &block); if let Err(e) = self.signer_db.set_block_broadcasted( self.reward_cycle, From d611aa16ee11be78ef1bf41acd8b2222c5828683 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 1 Oct 2024 23:52:05 -0400 Subject: [PATCH 1262/1400] feat: load needful data for downloading a staging block, even if it isn't processed --- .../src/chainstate/nakamoto/staging_blocks.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 91aad5a325..382c708850 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -271,7 +271,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { .optional()?) } - /// Get the rowid of a Nakamoto block + /// Get the rowid of a staging Nakamoto block pub fn get_nakamoto_block_rowid( &self, index_block_hash: &StacksBlockId, @@ -282,6 +282,26 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { Ok(res) } + /// Get the tenure and parent block ID of a staging block. + /// Used for downloads + pub fn get_tenure_and_parent_block_id( + &self, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = "SELECT consensus_hash,parent_block_id FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; + let args = params![index_block_hash]; + + let mut stmt = self.deref().prepare(sql)?; + Ok(stmt + .query_row(args, |row| { + let ch: ConsensusHash = row.get(0)?; + let parent_id: StacksBlockId = row.get(1)?; + + Ok((ch, parent_id)) + }) + .optional()?) + } + /// Get a Nakamoto block by index block hash, as well as its size. /// Verifies its integrity. /// Returns Ok(Some(block, size)) if the block was present From bfb8667f1e4f186bb3506138a2a50d7a52dce2c3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 1 Oct 2024 23:52:28 -0400 Subject: [PATCH 1263/1400] fix: /v3/blocks/:block_id should load staging unprocessed blocks --- stackslib/src/net/api/getblock_v3.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/api/getblock_v3.rs b/stackslib/src/net/api/getblock_v3.rs index 0279d9dc0c..56e8063dda 100644 --- a/stackslib/src/net/api/getblock_v3.rs +++ b/stackslib/src/net/api/getblock_v3.rs @@ -181,20 +181,13 @@ impl RPCRequestHandler for RPCNakamotoBlockRequestHandler { let stream_res = node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { - let Some(header) = - NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &block_id)? + let Some((tenure_id, parent_block_id)) = chainstate + .nakamoto_blocks_db() + .get_tenure_and_parent_block_id(&block_id)? else { return Err(ChainError::NoSuchBlockError); }; - let Some(nakamoto_header) = header.anchored_header.as_stacks_nakamoto() else { - return Err(ChainError::NoSuchBlockError); - }; - NakamotoBlockStream::new( - chainstate, - block_id.clone(), - nakamoto_header.consensus_hash.clone(), - nakamoto_header.parent_block_id.clone(), - ) + NakamotoBlockStream::new(chainstate, block_id.clone(), tenure_id, parent_block_id) }); // start loading up the block From 5b12c2a23b3d8f8f3beabec2bd0fac646b9d71a8 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 1 Oct 2024 23:53:07 -0400 Subject: [PATCH 1264/1400] fix: if the remote peer returns blocks from an unexpected tenure, then terminate the downloader (so we can try again later) --- .../src/net/download/nakamoto/tenure_downloader.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 92e032fa38..66504e01e6 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -376,8 +376,20 @@ impl NakamotoTenureDownloader { let mut expected_block_id = block_cursor; let mut count = 0; for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { + // must be from this tenure + // This may not always be the case, since a remote peer could have processed a + // different Stacks micro-fork. The consequence of erroring here (or below) is that we + // disconnect from the peer that served this to us. + if block.header.consensus_hash != self.tenure_id_consensus_hash { warn!("Unexpected Nakamoto block -- not part of tenure"; + "block.header.consensus_hash" => %block.header.consensus_hash, + "self.tenure_id_consensus_hash" => %self.tenure_id_consensus_hash, + "state" => %self.state); + return Err(NetError::InvalidMessage); + } + + if &block.header.block_id() != expected_block_id { + warn!("Unexpected Nakamoto block -- does not match cursor"; "expected_block_id" => %expected_block_id, "block_id" => %block.header.block_id(), "state" => %self.state); From 90b98f05f56caa5693dfb388eb8a092758e3b54c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 1 Oct 2024 23:53:27 -0400 Subject: [PATCH 1265/1400] chore: fix failing (broken) unit test --- stackslib/src/net/tests/download/nakamoto.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index a6307b324b..60f7aeb7fc 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -230,7 +230,7 @@ fn test_nakamoto_tenure_downloader() { }; let mut td = NakamotoTenureDownloader::new( - ch, + tenure_start_block.header.consensus_hash.clone(), tenure_start_block.header.block_id(), next_tenure_start_block.header.block_id(), naddr.clone(), @@ -293,6 +293,7 @@ fn test_nakamoto_tenure_downloader() { .try_accept_tenure_blocks(vec![next_tenure_start_block.clone()]) .is_err()); + debug!("Try accept {:?}", &block); let res = td.try_accept_tenure_blocks(vec![block.clone()]); assert!(res.is_ok()); assert!(res.unwrap().is_none()); From db105e039449a130e52be1c2a599aa6b8e039c2e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 2 Oct 2024 07:54:56 -0700 Subject: [PATCH 1266/1400] Increase pox_sync_sample_secs to 5 to be on the safe side when waiting for anchor blocks to arrive Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index fcfdc012cb..0be76c5362 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2053,7 +2053,7 @@ fn multiple_miners() { let node_2_p2p = 51025; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 1; + naka_conf.node.pox_sync_sample_secs = 5; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c0f4b2f9cf..c76d988105 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1468,7 +1468,7 @@ fn multiple_miners() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 1; + config.node.pox_sync_sample_secs = 5; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3430,7 +3430,7 @@ fn multiple_miners_with_nakamoto_blocks() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 1; + config.node.pox_sync_sample_secs = 5; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3693,7 +3693,7 @@ fn partial_tenure_fork() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 1; + config.node.pox_sync_sample_secs = 5; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); From 90c2596fa98a0aec2d2e761216fafa896434919e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 2 Oct 2024 10:34:32 -0500 Subject: [PATCH 1267/1400] test: add assertions for empty block heuristics --- .../src/tests/nakamoto_integrations.rs | 35 +++++++++++++++++++ .../src/tests/neon_integrations.rs | 27 ++++++++++++++ testnet/stacks-node/src/tests/signer/mod.rs | 4 ++- 3 files changed, 65 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5893d81231..e5a6c87af0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -230,6 +230,32 @@ impl TestSigningChannel { } } +/// Assert that the block events captured by the test observer +/// all match the miner heuristic of *exclusively* including the +/// tenure change transaction in tenure changing blocks. +pub fn check_nakamoto_empty_block_heuristics() { + let blocks = test_observer::get_blocks(); + for block in blocks.iter() { + // if its not a nakamoto block, don't check anything + if block.get("miner_signature").is_none() { + continue; + } + let txs = test_observer::parse_transactions(block); + let has_tenure_change = txs + .iter() + .any(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))); + if has_tenure_change { + let only_coinbase_and_tenure_change = txs.iter().all(|tx| { + matches!( + tx.payload, + TransactionPayload::TenureChange(_) | TransactionPayload::Coinbase(..) + ) + }); + assert!(only_coinbase_and_tenure_change, "Nakamoto blocks with a tenure change in them should only have coinbase or tenure changes"); + } + } +} + pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); let path = format!("{http_origin}/v3/stacker_set/{cycle}"); @@ -1683,6 +1709,8 @@ fn simple_neon_integration() { assert!(res.contains(&expected_result)); } + check_nakamoto_empty_block_heuristics(); + coord_channel .lock() .expect("Mutex poisoned") @@ -1960,6 +1988,7 @@ fn flash_blocks_on_epoch_3() { // Verify blocks before and after the gap test_observer::contains_burn_block_range(220..=(gap_start - 1)).unwrap(); test_observer::contains_burn_block_range((gap_end + 1)..=bhh).unwrap(); + check_nakamoto_empty_block_heuristics(); info!("Verified burn block ranges, including expected gap for flash blocks"); info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); @@ -2141,6 +2170,8 @@ fn mine_multiple_per_tenure_integration() { "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" ); + check_nakamoto_empty_block_heuristics(); + coord_channel .lock() .expect("Mutex poisoned") @@ -2394,6 +2425,8 @@ fn multiple_miners() { "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" ); + check_nakamoto_empty_block_heuristics(); + coord_channel .lock() .expect("Mutex poisoned") @@ -2761,6 +2794,8 @@ fn correct_burn_outs() { assert_eq!(signer_weight, 1, "The signer should have a weight of 1, indicating they stacked the minimum stacking amount"); } + check_nakamoto_empty_block_heuristics(); + run_loop_thread.join().unwrap(); } diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 4ec3b311d4..2c2055bad9 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -197,7 +197,10 @@ pub mod test_observer { use stacks::chainstate::stacks::boot::RewardSet; use stacks::chainstate::stacks::events::StackerDBChunksEvent; + use stacks::chainstate::stacks::StacksTransaction; + use stacks::codec::StacksMessageCodec; use stacks::net::api::postblock_proposal::BlockValidateResponse; + use stacks::util::hash::hex_bytes; use stacks_common::types::chainstate::StacksBlockId; use warp::Filter; use {tokio, warp}; @@ -572,6 +575,30 @@ pub mod test_observer { PROPOSAL_RESPONSES.lock().unwrap().clear(); } + /// Parse the StacksTransactions from a block (does not include burn ops) + /// panics on any failures to parse + pub fn parse_transactions(block: &serde_json::Value) -> Vec { + block + .get("transactions") + .unwrap() + .as_array() + .unwrap() + .iter() + .filter_map(|tx_json| { + if let Some(burnchain_op_val) = tx_json.get("burnchain_op") { + if !burnchain_op_val.is_null() { + return None; + } + } + let tx_hex = tx_json.get("raw_tx").unwrap().as_str().unwrap(); + let tx_bytes = hex_bytes(tx_hex).unwrap(); + let tx = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + Some(tx) + }) + .collect() + } + pub fn contains_burn_block_range(range: impl RangeBounds) -> Result<(), String> { // Get set of all burn block heights let burn_block_heights = get_blocks() diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 47cfa9ed8a..671aae0778 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -59,7 +59,7 @@ use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerCon use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; -use super::nakamoto_integrations::wait_for; +use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::neon::{Counters, TestFlag}; @@ -549,6 +549,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Wed, 2 Oct 2024 09:50:19 -0700 Subject: [PATCH 1268/1400] Add pox_sync_sample_secs as a small positive integer to ensure we don't continue without an anchor block Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c76d988105..667d91730a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1763,6 +1763,7 @@ fn miner_forking() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + config.node.pox_sync_sample_secs = 5; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { From 9b2f5154d94114b84e66c6a5b9ac5e314b05d058 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 2 Oct 2024 11:10:07 -0700 Subject: [PATCH 1269/1400] Fix flakiness in problematic microblocks tests Signed-off-by: Jacinta Ferrant --- .../src/tests/neon_integrations.rs | 184 +++++++++--------- 1 file changed, 87 insertions(+), 97 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 3dd299c861..ab892f25b7 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -89,7 +89,7 @@ use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; use crate::syncctl::PoxSyncWatchdogComms; -use crate::tests::nakamoto_integrations::get_key_for_cycle; +use crate::tests::nakamoto_integrations::{get_key_for_cycle, wait_for}; use crate::util::hash::{MerkleTree, Sha512Trunc256Sum}; use crate::util::secp256k1::MessageSignature; use crate::{neon, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -9909,15 +9909,15 @@ fn test_problematic_blocks_are_not_mined() { cur_files = cur_files_new; } - let tip_info = get_chain_info(&conf); + // all blocks were processed + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for blocks to be processed"); - // blocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); // no blocks considered problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // one block contained tx_exceeds let blocks = test_observer::get_blocks(); @@ -9968,14 +9968,12 @@ fn test_problematic_blocks_are_not_mined() { btc_regtest_controller.build_next_block(1); // wait for runloop to advance - loop { - sleep_ms(1_000); + wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } + Ok(new_tip.block_height > tip.block_height) + }) + .expect("Failed waiting for blocks to be processed"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); @@ -10003,12 +10001,15 @@ fn test_problematic_blocks_are_not_mined() { cur_files = cur_files_new; } - let tip_info = get_chain_info(&conf); - // all blocks were processed - assert!(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for blocks to be processed"); + // none were problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) @@ -10047,18 +10048,15 @@ fn test_problematic_blocks_are_not_mined() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { + // Do not unwrap in case we were just slow + let _ = wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height { - break; - } eprintln!( "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); - sleep_ms(1000); - } + Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) + }); // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -10068,14 +10066,15 @@ fn test_problematic_blocks_are_not_mined() { num_download_passes + 5 ); - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); + wait_for(30, || { + let download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), + "\nFollower has performed {download_passes} download passes; wait for {}\n", num_download_passes + 5 ); - } + Ok(download_passes >= num_download_passes + 5) + }) + .expect("Failed waiting for follower to perform enough download passes"); eprintln!( "\nFollower has performed {} download passes\n", @@ -10674,15 +10673,15 @@ fn test_problematic_microblocks_are_not_mined() { sleep_ms(5_000); } - let tip_info = get_chain_info(&conf); - // microblocks and blocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for microblocks to be processed"); + // no microblocks considered problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // one microblock contained tx_exceeds let microblocks = test_observer::get_microblocks(); @@ -10741,14 +10740,13 @@ fn test_problematic_microblocks_are_not_mined() { ); // wait for runloop to advance - loop { - sleep_ms(1_000); + wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } + Ok(new_tip.block_height > tip.block_height) + }) + .expect("Failed waiting for runloop to advance"); + let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); @@ -10779,13 +10777,14 @@ fn test_problematic_microblocks_are_not_mined() { } // sleep a little longer before checking tip info; this should help with test flakiness - sleep_ms(10_000); - let tip_info = get_chain_info(&conf); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for microblocks to be processed"); - // all microblocks were processed - assert!(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5); // none were problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) @@ -10824,18 +10823,15 @@ fn test_problematic_microblocks_are_not_mined() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { + // Do not unwrap as we may just be slow + let _ = wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height { - break; - } eprintln!( "\nFollower is at burn block {} stacks block {}\n", follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, ); - sleep_ms(1000); - } + Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) + }); // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -10845,14 +10841,15 @@ fn test_problematic_microblocks_are_not_mined() { num_download_passes + 5 ); - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); + wait_for(30, || { + let download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), + "\nFollower has performed {download_passes} download passes; wait for {}\n", num_download_passes + 5 ); - } + Ok(download_passes >= num_download_passes + 5) + }) + .expect("Failed waiting for follower to perform enough download passes"); eprintln!( "\nFollower has performed {} download passes\n", @@ -11056,15 +11053,15 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { sleep_ms(5_000); } - let tip_info = get_chain_info(&conf); + // microblocks and blocks were all processed + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for microblocks to be processed"); - // microblocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); // no microblocks considered problematic - assert_eq!(all_new_files.len(), 0); + assert!(all_new_files.is_empty()); // one microblock contained tx_exceeds let microblocks = test_observer::get_microblocks(); @@ -11102,14 +11099,13 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { btc_regtest_controller.build_next_block(1); // wait for runloop to advance - loop { - sleep_ms(1_000); + wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } + Ok(new_tip.block_height > tip.block_height) + }) + .expect("Failed waiting for runloop to advance"); + let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); @@ -11185,11 +11181,12 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } // sleep a little longer before checking tip info; this should help with test flakiness - sleep_ms(10_000); - let tip_info = get_chain_info(&conf); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) + }) + .expect("Failed waiting for microblocks to be processed"); - // all microblocks were processed - assert!(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5); // at least one was problematic. // the miner might make multiple microblocks (only some of which are confirmed), so also check // the event observer to see that we actually picked up tx_high @@ -11244,22 +11241,15 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { + // Do not unwrap as we may just be slow + let _ = wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height - || follower_tip_info.stacks_tip_height == bad_block_height - { - break; - } eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {})\n", - follower_tip_info.burn_block_height, - follower_tip_info.stacks_tip_height, - bad_block_height + "\nFollower is at burn block {} stacks block {}\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, ); - sleep_ms(1000); - } + Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) + }); // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -11269,15 +11259,15 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { num_download_passes + 5 ); - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); + wait_for(30, || { + let download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), + "\nFollower has performed {download_passes} download passes; wait for {}\n", num_download_passes + 5 ); - } - + Ok(download_passes >= num_download_passes + 5) + }) + .expect("Failed waiting for follower to perform enough download passes"); eprintln!( "\nFollower has performed {} download passes\n", pox_sync_comms.get_download_passes() From d617fb3164b1550922fe33890543036d3d542abe Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 2 Oct 2024 11:21:43 -0700 Subject: [PATCH 1270/1400] Exit post_block_until_ok when testing feature is set Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 8febf4b948..c144d0401a 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -574,7 +574,9 @@ impl StacksClient { return block_push_result; } Err(e) => { - if cfg!(test) && start_time.elapsed() > Duration::from_secs(30) { + if cfg!(any(test, feature = "testing")) + && start_time.elapsed() > Duration::from_secs(30) + { panic!( "{log_fmt}: Timed out in test while pushing block to stacks node: {e}" ); From 2efa5a8075f3a442a7f540ab7f2bfe544c99f6a8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 2 Oct 2024 11:36:39 -0700 Subject: [PATCH 1271/1400] Remove potential flaky points in microblocks_disabled Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/epoch_25.rs | 62 +++++++++++++++-------- 1 file changed, 40 insertions(+), 22 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 2b2a9a640f..4c418939a6 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -23,6 +23,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, @@ -169,17 +170,25 @@ fn microblocks_disabled() { submit_tx(&http_origin, &tx); // wait until just before epoch 2.5 - loop { + wait_for(30, || { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_5 - 2 { - break; + if tip_info.burn_block_height >= epoch_2_1 - 2 { + return Ok(true); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } + Ok(false) + }) + .expect("Failed to wait until just before epoch 2.5"); + let old_tip_info = get_chain_info(&conf); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + wait_for(30, || { + let tip_info = get_chain_info(&conf); + Ok(tip_info.burn_block_height >= old_tip_info.burn_block_height + 3) + }) + .expect("Failed to process block"); info!("Test passed processing 2.5"); let account = get_account(&http_origin, &spender_1_addr); @@ -195,12 +204,15 @@ fn microblocks_disabled() { let mut last_block_height = get_chain_info(&conf).burn_block_height; for _i in 0..5 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } + wait_for(30, || { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + return Ok(true); + } + Ok(false) + }) + .expect("Failed to mine"); } // second transaction should not have been processed! @@ -226,12 +238,15 @@ fn microblocks_disabled() { let mut last_block_height = get_chain_info(&conf).burn_block_height; for _i in 0..2 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } + wait_for(30, || { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + return Ok(true); + } + Ok(false) + }) + .expect("Failed to mine"); } let miner_nonce_after_microblock_assembly = get_account(&http_origin, &miner_account).nonce; @@ -265,12 +280,15 @@ fn microblocks_disabled() { let mut last_block_height = get_chain_info(&conf).burn_block_height; for _i in 0..2 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } + wait_for(30, || { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + return Ok(true); + } + Ok(false) + }) + .expect("Failed to mine"); } let miner_nonce_after_microblock_confirmation = get_account(&http_origin, &miner_account).nonce; From 0b785f54315b8f8baafa90cf6e9cf391e0ad0538 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 2 Oct 2024 11:45:17 -0700 Subject: [PATCH 1272/1400] feat: refresh sortition view when proposed block has mismatch --- stacks-signer/src/chainstate.rs | 37 +++++++ stacks-signer/src/tests/chainstate.rs | 98 ++++++++++++++++--- stacks-signer/src/v0/signer.rs | 1 + .../src/tests/nakamoto_integrations.rs | 17 +++- 4 files changed, 134 insertions(+), 19 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4bbb9741a5..1ec1d2f8ef 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -187,6 +187,7 @@ impl SortitionsView { block: &NakamotoBlock, block_pk: &StacksPublicKey, reward_cycle: u64, + reset_view_if_wrong_consensus_hash: bool, ) -> Result { if self .cur_sortition @@ -236,6 +237,23 @@ impl SortitionsView { }) }) else { + if reset_view_if_wrong_consensus_hash { + info!( + "Miner block proposal has consensus hash that is neither the current or last sortition. Resetting view."; + "proposed_block_consensus_hash" => %block.header.consensus_hash, + "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, + "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), + ); + self.reset_view(client)?; + return self.check_proposal( + client, + signer_db, + block, + block_pk, + reward_cycle, + false, + ); + } warn!( "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, @@ -624,4 +642,23 @@ impl SortitionsView { config, }) } + + /// Reset the view to the current sortition and last sortition + pub fn reset_view(&mut self, client: &StacksClient) -> Result<(), ClientError> { + let CurrentAndLastSortition { + current_sortition, + last_sortition, + } = client.get_current_and_last_sortition()?; + + let cur_sortition = SortitionState::try_from(current_sortition)?; + let last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten(); + + self.cur_sortition = cur_sortition; + self.last_sortition = last_sortition; + Ok(()) + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index a390c27edc..432325daf2 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -26,6 +26,7 @@ use blockstack_lib::chainstate::stacks::{ TransactionSpendingCondition, TransactionVersion, }; use blockstack_lib::net::api::get_tenures_fork_info::TenureForkingInfo; +use blockstack_lib::net::api::getsortition::SortitionInfo; use clarity::types::chainstate::{BurnchainHeaderHash, SortitionId}; use clarity::util::vrf::VRFProof; use libsigner::BlockProposal; @@ -128,13 +129,13 @@ fn check_proposal_units() { setup_test_environment("check_proposal_units"); assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); view.last_sortition = None; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); } @@ -150,7 +151,8 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1 + 1, + false, ) .unwrap()); @@ -161,7 +163,8 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1 + 1, + false, ) .unwrap()); } @@ -257,7 +260,7 @@ fn reorg_timing_testing( config, } = MockServerClient::new(); let h = std::thread::spawn(move || { - view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1) + view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, false) }); header_clone.chain_length -= 1; let response = crate::client::tests::build_get_tenure_tip_response( @@ -294,16 +297,16 @@ fn check_proposal_invalid_status() { setup_test_environment("invalid_status"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -314,7 +317,7 @@ fn check_proposal_invalid_status() { // parent blocks have been seen before, while the signer state checks are only reasoning about // stacks blocks seen by the signer, which may be a subset) assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); } @@ -363,7 +366,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); let mut extend_payload = make_tenure_change_payload(); @@ -373,7 +376,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) .unwrap()); } @@ -400,7 +403,8 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1 + 1, + false, ) .unwrap()); @@ -410,7 +414,8 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1 + 1, + false, ) .unwrap()); @@ -422,7 +427,8 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1 + 1, + false, ) .unwrap()); @@ -432,7 +438,8 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1 + 1, + false, ) .unwrap()); } @@ -513,3 +520,64 @@ fn check_sortition_timeout() { .is_timed_out(Duration::from_secs(1), &signer_db) .unwrap()); } + +/// Test that the sortition info is refreshed once +/// when `check_proposal` is called with a sortition view +/// that doesn't match the block proposal +#[test] +fn check_proposal_refresh() { + let (stacks_client, mut signer_db, block_pk, mut view, mut block) = + setup_test_environment("check_proposal_refresh"); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + assert!(view + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .unwrap()); + + let MockServerClient { + server, + client, + config: _, + } = MockServerClient::new(); + + let last_sortition = view.last_sortition.as_ref().unwrap(); + + let expected_result = vec![ + SortitionInfo { + burn_block_hash: last_sortition.burn_block_hash, + burn_block_height: 2, + sortition_id: SortitionId([2; 32]), + parent_sortition_id: SortitionId([1; 32]), + consensus_hash: block.header.consensus_hash, + was_sortition: true, + burn_header_timestamp: 2, + miner_pk_hash160: Some(view.cur_sortition.miner_pkh), + stacks_parent_ch: Some(view.cur_sortition.parent_tenure_id), + last_sortition_ch: Some(view.cur_sortition.parent_tenure_id), + committed_block_hash: None, + }, + SortitionInfo { + burn_block_hash: BurnchainHeaderHash([128; 32]), + burn_block_height: 1, + sortition_id: SortitionId([1; 32]), + parent_sortition_id: SortitionId([0; 32]), + consensus_hash: view.cur_sortition.parent_tenure_id, + was_sortition: true, + burn_header_timestamp: 1, + miner_pk_hash160: Some(view.cur_sortition.miner_pkh), + stacks_parent_ch: Some(view.cur_sortition.parent_tenure_id), + last_sortition_ch: Some(view.cur_sortition.parent_tenure_id), + committed_block_hash: None, + }, + ]; + + view.cur_sortition.consensus_hash = ConsensusHash([128; 20]); + let h = std::thread::spawn(move || { + view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, true) + }); + crate::client::tests::write_response( + server, + format!("HTTP/1.1 200 Ok\n\n{}", serde_json::json!(expected_result)).as_bytes(), + ); + let result = h.join().unwrap(); + assert!(result.unwrap()); +} diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 064ccc3cd2..e68c3ca1dd 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -379,6 +379,7 @@ impl Signer { &block_proposal.block, miner_pubkey, self.reward_cycle, + true, ) { // Error validating block Err(e) => { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0be76c5362..722528e0ab 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6006,6 +6006,7 @@ fn signer_chainstate() { prior_tenure_first, miner_pk, reward_cycle, + true, ) .unwrap(); assert!( @@ -6020,6 +6021,7 @@ fn signer_chainstate() { block, miner_pk, reward_cycle, + true, ) .unwrap(); assert!( @@ -6056,6 +6058,7 @@ fn signer_chainstate() { &proposal.0, &proposal.1, reward_cycle, + true, ) .unwrap(); @@ -6105,6 +6108,7 @@ fn signer_chainstate() { &proposal_interim.0, &proposal_interim.1, reward_cycle, + true, ) .unwrap(); @@ -6134,6 +6138,7 @@ fn signer_chainstate() { &proposal_interim.0, &proposal_interim.1, reward_cycle, + true, ) .unwrap(); @@ -6209,7 +6214,8 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle + reward_cycle, + false, ) .unwrap(), "A sibling of a previously approved block must be rejected." @@ -6266,7 +6272,8 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle + reward_cycle, + false, ) .unwrap(), "A sibling of a previously approved block must be rejected." @@ -6329,7 +6336,8 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle + reward_cycle, + false, ) .unwrap(), "A sibling of a previously approved block must be rejected." @@ -6394,7 +6402,8 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle + reward_cycle, + false, ) .unwrap(), "A sibling of a previously approved block must be rejected." From e204f1109813a903d5d592939487eef192e54028 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:49:35 -0400 Subject: [PATCH 1273/1400] feat: add a p2p session struct and use it to query nakamoto inventory vectors --- stackslib/src/main.rs | 238 +++++++++++++++++++++++++++++++++--------- 1 file changed, 186 insertions(+), 52 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 52d481affb..f45eba79f0 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -36,6 +36,8 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream, ToSocketAddrs}; +use std::time::Duration; use std::{env, fs, io, process, thread}; use blockstack_lib::burnchains::bitcoin::{spv, BitcoinNetworkType}; @@ -62,11 +64,12 @@ use blockstack_lib::clarity::vm::ClarityVersion; use blockstack_lib::core::{MemPoolDB, *}; use blockstack_lib::cost_estimates::metrics::UnitMetric; use blockstack_lib::cost_estimates::UnitEstimator; +use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::db::LocalPeer; -use blockstack_lib::net::inv::nakamoto::InvGenerator; +use blockstack_lib::net::httpcore::{send_http_request, StacksHttpRequest}; use blockstack_lib::net::p2p::PeerNetwork; use blockstack_lib::net::relay::Relayer; -use blockstack_lib::net::{NakamotoInvData, StacksMessage}; +use blockstack_lib::net::{GetNakamotoInvData, HandshakeData, StacksMessage, StacksMessageType}; use blockstack_lib::util_lib::db::sqlite_open; use blockstack_lib::util_lib::strings::UrlString; use blockstack_lib::{clarity_cli, cli}; @@ -77,7 +80,7 @@ use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; -use stacks_common::types::net::PeerAddress; +use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::types::sqlite::NO_PARAMS; use stacks_common::types::MempoolCollectionBehavior; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; @@ -86,6 +89,170 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::vrf::VRFProof; use stacks_common::util::{get_epoch_time_ms, sleep_ms}; +struct P2PSession { + pub local_peer: LocalPeer, + peer_info: RPCPeerInfoData, + burn_block_hash: BurnchainHeaderHash, + stable_burn_block_hash: BurnchainHeaderHash, + tcp_socket: TcpStream, + seq: u32, +} + +impl P2PSession { + /// Make a StacksMessage. Sign it and set a sequence number. + fn make_peer_message(&mut self, payload: StacksMessageType) -> Result { + let mut msg = StacksMessage::new( + self.peer_info.peer_version, + self.peer_info.network_id, + self.peer_info.burn_block_height, + &self.burn_block_hash, + self.peer_info.stable_burn_block_height, + &self.stable_burn_block_hash, + payload, + ); + + msg.sign(self.seq, &self.local_peer.private_key) + .map_err(|e| format!("Failed to sign message {:?}: {:?}", &msg, &e))?; + self.seq = self.seq.wrapping_add(1); + + Ok(msg) + } + + /// Send a p2p message. + /// Returns error text on failure. + fn send_peer_message(&mut self, msg: StacksMessage) -> Result<(), String> { + msg.consensus_serialize(&mut self.tcp_socket) + .map_err(|e| format!("Failed to send message {:?}: {:?}", &msg, &e)) + } + + /// Receive a p2p message. + /// Returns error text on failure. + fn recv_peer_message(&mut self) -> Result { + let msg: StacksMessage = read_next(&mut self.tcp_socket) + .map_err(|e| format!("Failed to receive message: {:?}", &e))?; + Ok(msg) + } + + /// Begin a p2p session. + /// Synthesizes a LocalPeer from the remote peer's responses to /v2/info and /v2/pox. + /// Performs the initial handshake for you. + /// + /// Returns the session handle on success. + /// Returns error text on failure. + pub fn begin(peer_addr: SocketAddr, data_port: u16) -> Result { + let data_addr = match peer_addr { + SocketAddr::V4(v4addr) => { + SocketAddr::V4(SocketAddrV4::new(v4addr.ip().clone(), data_port)) + } + SocketAddr::V6(v6addr) => { + SocketAddr::V6(SocketAddrV6::new(v6addr.ip().clone(), data_port, 0, 0)) + } + }; + + // get /v2/info + let peer_info = send_http_request( + &format!("{}", data_addr.ip()), + data_addr.port(), + StacksHttpRequest::new_getinfo(PeerHost::from(data_addr.clone()), None) + .with_header("Connection".to_string(), "close".to_string()), + Duration::from_secs(60), + ) + .map_err(|e| format!("Failed to query /v2/info: {:?}", &e))? + .decode_peer_info() + .map_err(|e| format!("Failed to decode response from /v2/info: {:?}", &e))?; + + // convert `pox_consensus` and `stable_pox_consensus` into their respective burn block + // hashes + let sort_info = send_http_request( + &format!("{}", data_addr.ip()), + data_addr.port(), + StacksHttpRequest::new_get_sortition_consensus( + PeerHost::from(data_addr.clone()), + &peer_info.pox_consensus, + ) + .with_header("Connection".to_string(), "close".to_string()), + Duration::from_secs(60), + ) + .map_err(|e| format!("Failed to query /v3/sortitions: {:?}", &e))? + .decode_sortition_info() + .map_err(|e| format!("Failed to decode response from /v3/sortitions: {:?}", &e))? + .pop() + .ok_or_else(|| format!("No sortition returned for {}", &peer_info.pox_consensus))?; + + let stable_sort_info = send_http_request( + &format!("{}", data_addr.ip()), + data_addr.port(), + StacksHttpRequest::new_get_sortition_consensus( + PeerHost::from(data_addr.clone()), + &peer_info.stable_pox_consensus, + ) + .with_header("Connection".to_string(), "close".to_string()), + Duration::from_secs(60), + ) + .map_err(|e| format!("Failed to query stable /v3/sortitions: {:?}", &e))? + .decode_sortition_info() + .map_err(|e| { + format!( + "Failed to decode response from stable /v3/sortitions: {:?}", + &e + ) + })? + .pop() + .ok_or_else(|| { + format!( + "No sortition returned for {}", + &peer_info.stable_pox_consensus + ) + })?; + + let burn_block_hash = sort_info.burn_block_hash; + let stable_burn_block_hash = stable_sort_info.burn_block_hash; + + let local_peer = LocalPeer::new( + peer_info.network_id, + peer_info.parent_network_id, + PeerAddress::from_socketaddr(&peer_addr), + peer_addr.port(), + Some(StacksPrivateKey::new()), + u64::MAX, + UrlString::try_from(format!("http://127.0.0.1:{}", data_port).as_str()).unwrap(), + vec![], + ); + + let tcp_socket = TcpStream::connect(&peer_addr) + .map_err(|e| format!("Failed to open {:?}: {:?}", &peer_addr, &e))?; + + let mut session = Self { + local_peer, + peer_info, + burn_block_hash, + stable_burn_block_hash, + tcp_socket, + seq: 0, + }; + + // perform the handshake + let handshake_data = + StacksMessageType::Handshake(HandshakeData::from_local_peer(&session.local_peer)); + let handshake = session.make_peer_message(handshake_data)?; + session.send_peer_message(handshake)?; + + let resp = session.recv_peer_message()?; + match resp.payload { + StacksMessageType::HandshakeAccept(..) + | StacksMessageType::StackerDBHandshakeAccept(..) => {} + x => { + return Err(format!( + "Peer returned unexpected message (expected HandshakeAccept variant): {:?}", + &x + )); + } + } + + Ok(session) + } +} + #[cfg_attr(test, mutants::skip)] fn main() { let mut argv: Vec = env::args().collect(); @@ -975,59 +1142,26 @@ simulating a miner. process::exit(1); } - if argv[1] == "get-tenure-inv" { - let chainstate_root_path = &argv[2]; - let tip_block_ids = &argv[3..]; - let chainstate_path = format!("{}/chainstate", &chainstate_root_path); - let sortition_path = format!("{}/burnchain/sortition", &chainstate_root_path); + if argv[1] == "getnakamotoinv" { + let peer_addr: SocketAddr = argv[2].to_socket_addrs().unwrap().next().unwrap(); + let data_port: u16 = argv[3].parse().unwrap(); + let ch = ConsensusHash::from_hex(&argv[4]).unwrap(); - let (chainstate, _) = - StacksChainState::open(false, 0x80000000, &chainstate_path, None).unwrap(); - let pox_consts = - PoxConstants::new(900, 100, 80, 0, 0, u64::MAX, u64::MAX, 240, 241, 242, 242); - let sortition_db = SortitionDB::open(&sortition_path, true, pox_consts).unwrap(); - - let mut invgen = InvGenerator::new(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortition_db.conn()).unwrap(); - - for tip_block_id in tip_block_ids.iter() { - let tip_block_id = StacksBlockId::from_hex(tip_block_id).unwrap(); - let header = - NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &tip_block_id) - .unwrap() - .unwrap(); - let sn = SortitionDB::get_block_snapshot_consensus( - sortition_db.conn(), - &header.consensus_hash, - ) - .unwrap() - .unwrap(); + let mut session = P2PSession::begin(peer_addr, data_port).unwrap(); - let reward_cycle = sortition_db - .pox_constants - .block_height_to_reward_cycle(230, sn.block_height) - .unwrap(); + // send getnakamotoinv + let get_nakamoto_inv = + StacksMessageType::GetNakamotoInv(GetNakamotoInvData { consensus_hash: ch }); - let bitvec_bools = invgen - .make_tenure_bitvector( - &tip, - &sortition_db, - &chainstate, - &header.consensus_hash, - &header.anchored_header.block_hash(), - reward_cycle, - ) - .unwrap(); - let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools) - .map_err(|e| { - warn!("Failed to create a NakamotoInv response: {:?}", &e); - e - }) - .unwrap(); + let msg = session.make_peer_message(get_nakamoto_inv).unwrap(); + session.send_peer_message(msg).unwrap(); + let resp = session.recv_peer_message().unwrap(); - println!("{}: {:?}", tip_block_id, &nakamoto_inv); - } - process::exit(0); + let StacksMessageType::NakamotoInv(inv) = &resp.payload else { + panic!("Got spurious message: {:?}", &resp); + }; + + println!("{:?}", inv); } if argv[1] == "replay-chainstate" { From 3c31ca1dbfb40f44b32cdb016bd52044729635f2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:49:55 -0400 Subject: [PATCH 1274/1400] chore: add helpers to make sortition requests and decode them --- stackslib/src/net/api/getsortition.rs | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 7b594530c2..7074e70792 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -377,3 +377,34 @@ impl HttpResponse for GetSortitionHandler { Ok(HttpResponsePayload::try_from_json(sortition_info)?) } } + +impl StacksHttpRequest { + /// Make a new getinfo request to this endpoint + pub fn new_get_sortition( + host: PeerHost, + sort_key: &str, + sort_value: &str, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/sortitions/{}/{}", sort_key, sort_value), + HttpRequestContents::new(), + ) + .expect("FATAL: failed to construct request from infallible data") + } + + pub fn new_get_sortition_consensus(host: PeerHost, ch: &ConsensusHash) -> StacksHttpRequest { + Self::new_get_sortition(host, "consensus", &format!("{}", ch)) + } +} + +impl StacksHttpResponse { + pub fn decode_sortition_info(self) -> Result, NetError> { + let contents = self.get_http_payload_ok()?; + let response_json: serde_json::Value = contents.try_into()?; + let response: Vec = serde_json::from_value(response_json) + .map_err(|_e| Error::DecodeError(format!("Failed to decode JSON: {:?}", &_e)))?; + Ok(response) + } +} From d01baf5bb1bfd64a1f4be6f37a63b62d325d4de3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:50:10 -0400 Subject: [PATCH 1275/1400] feat: with_header() constructor function for a StacksHttpRequest --- stackslib/src/net/httpcore.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 3b4bf8c9b9..c58355a6a9 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -532,6 +532,12 @@ impl StacksHttpRequest { self.preamble.add_header(hdr, value); } + /// Constructor to add headers + pub fn with_header(mut self, hdr: String, value: String) -> Self { + self.add_header(hdr, value); + self + } + /// Get a ref to all request headers pub fn get_headers(&self) -> &BTreeMap { &self.preamble.headers From 0bef8a7c0910bbfa00f1e77622473b9fe34c1a73 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:50:24 -0400 Subject: [PATCH 1276/1400] fix: make no_cache a test-only variable --- stackslib/src/net/inv/nakamoto.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index d771848fec..acacc74153 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -114,6 +114,7 @@ pub struct InvGenerator { /// count cache misses for `processed_tenures` cache_misses: u128, /// Disable caching (test only) + #[cfg(test)] no_cache: bool, } @@ -124,6 +125,7 @@ impl InvGenerator { sortitions: HashMap::new(), tip_ancestor_search_depth: TIP_ANCESTOR_SEARCH_DEPTH, cache_misses: 0, + #[cfg(test)] no_cache: false, } } @@ -206,6 +208,17 @@ impl InvGenerator { Ok(None) } + #[cfg(not(test))] + fn test_clear_cache(&mut self) {} + + /// Clear the cache (test only) + #[cfg(test)] + fn test_clear_cache(&mut self) { + if self.no_cache { + self.processed_tenures.clear(); + } + } + /// Get a processed tenure. If it's not cached, then load it from disk. /// /// Loading it is expensive, so once loaded, store it with the cached processed tenure map @@ -287,9 +300,7 @@ impl InvGenerator { self.cache_misses = self.cache_misses.saturating_add(1); Ok(loaded_info_opt) }; - if self.no_cache { - self.processed_tenures.clear(); - } + self.test_clear_cache(); ret } From d76133053edaaa0fe5429e6b9618ef220cd12cbd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 2 Oct 2024 23:50:35 -0400 Subject: [PATCH 1277/1400] chroe: don't broadcast to unauthenticated peers --- stackslib/src/net/p2p.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 4338671641..d7ea9684f3 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1050,6 +1050,9 @@ impl PeerNetwork { if let Some(event_id) = self.events.get(&nk) { let event_id = *event_id; if let Some(convo) = self.peers.get_mut(&event_id) { + if !convo.is_authenticated() { + continue; + } // safety check -- don't send to someone who has already been a relayer let mut do_relay = true; if let Some(pubkey) = convo.ref_public_key() { From 0c101e312a29da333c838e83c051b5dfb44ce303 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 3 Oct 2024 07:27:31 -0700 Subject: [PATCH 1278/1400] CRC: fix typo of 2_1 to 2_5 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 4c418939a6..94a3edaadb 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -172,7 +172,7 @@ fn microblocks_disabled() { // wait until just before epoch 2.5 wait_for(30, || { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_1 - 2 { + if tip_info.burn_block_height >= epoch_2_5 - 2 { return Ok(true); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); From 064a223d2190a31c44cea86981d9f4068c624af0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 14:25:13 -0400 Subject: [PATCH 1279/1400] fix: fix 5267 by making it so that any peer can be re-assigned to an idle downloader --- .../nakamoto/download_state_machine.rs | 10 ++-- .../download/nakamoto/tenure_downloader.rs | 59 ++++++++++--------- .../nakamoto/tenure_downloader_set.rs | 13 ++-- 3 files changed, 42 insertions(+), 40 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 132a03f34d..02ed8b9419 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -384,7 +384,7 @@ impl NakamotoDownloadStateMachine { &new_wanted_tenures ); self.wanted_tenures.append(&mut new_wanted_tenures); - debug!("extended wanted_tenures is now {:?}", &self.wanted_tenures); + test_debug!("extended wanted_tenures is now {:?}", &self.wanted_tenures); Ok(()) } @@ -983,9 +983,9 @@ impl NakamotoDownloadStateMachine { prev_schedule }; - debug!("new schedule: {:?}", schedule); - debug!("new available: {:?}", &available); - debug!("new tenure_block_ids: {:?}", &tenure_block_ids); + test_debug!("new schedule: {:?}", schedule); + test_debug!("new available: {:?}", &available); + test_debug!("new tenure_block_ids: {:?}", &tenure_block_ids); self.tenure_download_schedule = schedule; self.tenure_block_ids = tenure_block_ids; @@ -1023,7 +1023,7 @@ impl NakamotoDownloadStateMachine { .map(|wt| (wt.burn_height, &wt.tenure_id_consensus_hash)) .collect(); - debug!("Check availability {:?}", available); + test_debug!("Check availability {:?}", available); let mut highest_available = Vec::with_capacity(2); for (_, ch) in tenure_block_heights.iter().rev() { let available_count = available diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 92e032fa38..e309072f84 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -66,16 +66,18 @@ use crate::util_lib::db::{DBConn, Error as DBError}; /// start and end block. This includes all tenures except for the two most recent ones. #[derive(Debug, Clone, PartialEq)] pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), + /// Getting the tenure-start block (the given StacksBlockId is it's block ID), as well as the + /// millisecond epoch timestamp at which the request began + GetTenureStartBlock(StacksBlockId, u128), /// Getting the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), + /// The fields here are the block ID of the tenure end block, as well as the millisecond epoch + /// timestamp at which the request begahn + GetTenureEndBlock(StacksBlockId, u128), /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), + /// The fields here are the hash of the _last_ block in the tenure that must be downloaded, as well + /// as the millisecond epoch timestamp at which the request began. The first field is needed + /// because a tenure is fetched in order from highest block to lowest block. + GetTenureBlocks(StacksBlockId, u128), /// We have gotten all the blocks for this tenure Done, } @@ -166,7 +168,7 @@ impl NakamotoTenureDownloader { start_signer_keys, end_signer_keys, idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), + state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone(), get_epoch_time_ms()), tenure_start_block: None, tenure_end_block: None, tenure_blocks: None, @@ -187,7 +189,7 @@ impl NakamotoTenureDownloader { &mut self, tenure_start_block: NakamotoBlock, ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { + let NakamotoTenureDownloadState::GetTenureStartBlock(..) = &self.state else { // not the right state for this warn!("Invalid state for this method"; "state" => %self.state); @@ -235,7 +237,7 @@ impl NakamotoTenureDownloader { } else { // need to get tenure_end_block. self.state = - NakamotoTenureDownloadState::GetTenureEndBlock(self.tenure_end_block_id.clone()); + NakamotoTenureDownloadState::GetTenureEndBlock(self.tenure_end_block_id.clone(), get_epoch_time_ms()); } Ok(()) } @@ -252,7 +254,7 @@ impl NakamotoTenureDownloader { ) -> Result<(), NetError> { if !matches!( &self.state, - NakamotoTenureDownloadState::GetTenureEndBlock(_) + NakamotoTenureDownloadState::GetTenureEndBlock(..) ) { warn!("Invalid state for this method"; "state" => %self.state); @@ -326,6 +328,7 @@ impl NakamotoTenureDownloader { self.tenure_end_block = Some(tenure_end_block.clone()); self.state = NakamotoTenureDownloadState::GetTenureBlocks( tenure_end_block.header.parent_block_id.clone(), + get_epoch_time_ms() ); Ok(()) } @@ -361,7 +364,7 @@ impl NakamotoTenureDownloader { &mut self, mut tenure_blocks: Vec, ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor, start_request_time) = &self.state else { warn!("Invalid state for this method"; "state" => %self.state); return Err(NetError::InvalidState); @@ -461,7 +464,7 @@ impl NakamotoTenureDownloader { &earliest_block.block_id(), &next_block_id ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); + self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id, *start_request_time); return Ok(None); } @@ -486,16 +489,16 @@ impl NakamotoTenureDownloader { peerhost: PeerHost, ) -> Result, ()> { let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - debug!("Request tenure-start block {}", &start_block_id); + NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id, start_request_time) => { + debug!("Request tenure-start block {} at {}", &start_block_id, start_request_time); StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - debug!("Request tenure-end block {}", &end_block_id); + NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id, start_request_time) => { + debug!("Request tenure-end block {} at {}", &end_block_id, start_request_time); StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - debug!("Downloading tenure ending at {}", &end_block_id); + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id, start_request_time) => { + debug!("Downloading tenure ending at {} at {}", &end_block_id, start_request_time); StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) } NakamotoTenureDownloadState::Done => { @@ -558,10 +561,10 @@ impl NakamotoTenureDownloader { response: StacksHttpResponse, ) -> Result>, NetError> { let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { + NakamotoTenureDownloadState::GetTenureStartBlock(_block_id, _start_request_time) => { debug!( - "Got download response for tenure-start block {}", - &_block_id + "Got download response for tenure-start block {} in {}ms", + &_block_id, get_epoch_time_ms().saturating_sub(_start_request_time) ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); @@ -570,8 +573,8 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_start_block(block)?; Ok(None) } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - debug!("Got download response to tenure-end block {}", &_block_id); + NakamotoTenureDownloadState::GetTenureEndBlock(_block_id, _start_request_time) => { + debug!("Got download response to tenure-end block {} in {}ms", &_block_id, get_epoch_time_ms().saturating_sub(_start_request_time)); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); e @@ -579,10 +582,10 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_end_block(&block)?; Ok(None) } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { + NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id, _start_request_time) => { debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id + "Got download response for tenure blocks ending at {} in {}ms", + &_end_block_id, get_epoch_time_ms().saturating_sub(_start_request_time) ); let blocks = response.decode_nakamoto_tenure().map_err(|e| { warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 74ff83460d..32d45667cc 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -230,13 +230,11 @@ impl NakamotoTenureDownloaderSet { if !downloader.idle { continue; } - if downloader.naddr != naddr { - continue; - } debug!( "Assign peer {} to work on downloader for {} in state {}", &naddr, &downloader.tenure_id_consensus_hash, &downloader.state ); + downloader.naddr = naddr.clone(); self.peers.insert(naddr, i); return true; } @@ -308,8 +306,8 @@ impl NakamotoTenureDownloaderSet { }; if &downloader.tenure_id_consensus_hash == tenure_id { debug!( - "Have downloader for tenure {} already (idle={}, state={})", - tenure_id, downloader.idle, &downloader.state + "Have downloader for tenure {} already (idle={}, state={}, naddr={})", + tenure_id, downloader.idle, &downloader.state, &downloader.naddr ); return true; } @@ -328,7 +326,7 @@ impl NakamotoTenureDownloaderSet { count: usize, current_reward_cycles: &BTreeMap, ) { - debug!("make_tenure_downloaders"; + test_debug!("make_tenure_downloaders"; "schedule" => ?schedule, "available" => ?available, "tenure_block_ids" => ?tenure_block_ids, @@ -463,7 +461,7 @@ impl NakamotoTenureDownloaderSet { continue; }; if downloader.is_done() { - debug!("Downloader for {} is done", &naddr); + debug!("Downloader for {} on tenure {} is finished", &naddr, &downloader.tenure_id_consensus_hash); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; @@ -534,6 +532,7 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { + debug!("Downloader for {} on tenure {} is finished", &naddr, &downloader.tenure_id_consensus_hash); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; From 1947fc62ce5c7e445ab25d4a96fb1ebb8390a23f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 15:01:28 -0400 Subject: [PATCH 1280/1400] chore: cargo fmt --- .../download/nakamoto/tenure_downloader.rs | 52 ++++++++++++++----- .../nakamoto/tenure_downloader_set.rs | 10 +++- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index e309072f84..c11e9d42dd 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -168,7 +168,10 @@ impl NakamotoTenureDownloader { start_signer_keys, end_signer_keys, idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone(), get_epoch_time_ms()), + state: NakamotoTenureDownloadState::GetTenureStartBlock( + tenure_start_block_id.clone(), + get_epoch_time_ms(), + ), tenure_start_block: None, tenure_end_block: None, tenure_blocks: None, @@ -236,8 +239,10 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_end_block(&tenure_end_block)?; } else { // need to get tenure_end_block. - self.state = - NakamotoTenureDownloadState::GetTenureEndBlock(self.tenure_end_block_id.clone(), get_epoch_time_ms()); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock( + self.tenure_end_block_id.clone(), + get_epoch_time_ms(), + ); } Ok(()) } @@ -328,7 +333,7 @@ impl NakamotoTenureDownloader { self.tenure_end_block = Some(tenure_end_block.clone()); self.state = NakamotoTenureDownloadState::GetTenureBlocks( tenure_end_block.header.parent_block_id.clone(), - get_epoch_time_ms() + get_epoch_time_ms(), ); Ok(()) } @@ -364,7 +369,9 @@ impl NakamotoTenureDownloader { &mut self, mut tenure_blocks: Vec, ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor, start_request_time) = &self.state else { + let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor, start_request_time) = + &self.state + else { warn!("Invalid state for this method"; "state" => %self.state); return Err(NetError::InvalidState); @@ -464,7 +471,8 @@ impl NakamotoTenureDownloader { &earliest_block.block_id(), &next_block_id ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id, *start_request_time); + self.state = + NakamotoTenureDownloadState::GetTenureBlocks(next_block_id, *start_request_time); return Ok(None); } @@ -489,16 +497,28 @@ impl NakamotoTenureDownloader { peerhost: PeerHost, ) -> Result, ()> { let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id, start_request_time) => { - debug!("Request tenure-start block {} at {}", &start_block_id, start_request_time); + NakamotoTenureDownloadState::GetTenureStartBlock( + start_block_id, + start_request_time, + ) => { + debug!( + "Request tenure-start block {} at {}", + &start_block_id, start_request_time + ); StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) } NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id, start_request_time) => { - debug!("Request tenure-end block {} at {}", &end_block_id, start_request_time); + debug!( + "Request tenure-end block {} at {}", + &end_block_id, start_request_time + ); StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) } NakamotoTenureDownloadState::GetTenureBlocks(end_block_id, start_request_time) => { - debug!("Downloading tenure ending at {} at {}", &end_block_id, start_request_time); + debug!( + "Downloading tenure ending at {} at {}", + &end_block_id, start_request_time + ); StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) } NakamotoTenureDownloadState::Done => { @@ -564,7 +584,8 @@ impl NakamotoTenureDownloader { NakamotoTenureDownloadState::GetTenureStartBlock(_block_id, _start_request_time) => { debug!( "Got download response for tenure-start block {} in {}ms", - &_block_id, get_epoch_time_ms().saturating_sub(_start_request_time) + &_block_id, + get_epoch_time_ms().saturating_sub(_start_request_time) ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); @@ -574,7 +595,11 @@ impl NakamotoTenureDownloader { Ok(None) } NakamotoTenureDownloadState::GetTenureEndBlock(_block_id, _start_request_time) => { - debug!("Got download response to tenure-end block {} in {}ms", &_block_id, get_epoch_time_ms().saturating_sub(_start_request_time)); + debug!( + "Got download response to tenure-end block {} in {}ms", + &_block_id, + get_epoch_time_ms().saturating_sub(_start_request_time) + ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); e @@ -585,7 +610,8 @@ impl NakamotoTenureDownloader { NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id, _start_request_time) => { debug!( "Got download response for tenure blocks ending at {} in {}ms", - &_end_block_id, get_epoch_time_ms().saturating_sub(_start_request_time) + &_end_block_id, + get_epoch_time_ms().saturating_sub(_start_request_time) ); let blocks = response.decode_nakamoto_tenure().map_err(|e| { warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 32d45667cc..160bad309e 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -461,7 +461,10 @@ impl NakamotoTenureDownloaderSet { continue; }; if downloader.is_done() { - debug!("Downloader for {} on tenure {} is finished", &naddr, &downloader.tenure_id_consensus_hash); + debug!( + "Downloader for {} on tenure {} is finished", + &naddr, &downloader.tenure_id_consensus_hash + ); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; @@ -532,7 +535,10 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { - debug!("Downloader for {} on tenure {} is finished", &naddr, &downloader.tenure_id_consensus_hash); + debug!( + "Downloader for {} on tenure {} is finished", + &naddr, &downloader.tenure_id_consensus_hash + ); finished.push(naddr.clone()); finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); continue; From dca3d5a7c30f4b7540831d4163ed3229e9f146ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 15:11:26 -0400 Subject: [PATCH 1281/1400] chore: address PR feedback --- stackslib/src/main.rs | 20 +++++++++++--------- stackslib/src/net/api/getsortition.rs | 4 ++-- stackslib/src/net/inv/nakamoto.rs | 2 +- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index f45eba79f0..98315cffa8 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -36,7 +36,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; -use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream, ToSocketAddrs}; +use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::time::Duration; use std::{env, fs, io, process, thread}; @@ -140,14 +140,8 @@ impl P2PSession { /// Returns the session handle on success. /// Returns error text on failure. pub fn begin(peer_addr: SocketAddr, data_port: u16) -> Result { - let data_addr = match peer_addr { - SocketAddr::V4(v4addr) => { - SocketAddr::V4(SocketAddrV4::new(v4addr.ip().clone(), data_port)) - } - SocketAddr::V6(v6addr) => { - SocketAddr::V6(SocketAddrV6::new(v6addr.ip().clone(), data_port, 0, 0)) - } - }; + let mut data_addr = peer_addr.clone(); + data_addr.set_port(data_port); // get /v2/info let peer_info = send_http_request( @@ -1143,6 +1137,14 @@ simulating a miner. } if argv[1] == "getnakamotoinv" { + if argv.len() < 5 { + eprintln!( + "Usage: {} getnakamotoinv HOST:PORT DATA_PORT CONSENSUS_HASH", + &argv[0] + ); + process::exit(1); + } + let peer_addr: SocketAddr = argv[2].to_socket_addrs().unwrap().next().unwrap(); let data_port: u16 = argv[3].parse().unwrap(); let ch = ConsensusHash::from_hex(&argv[4]).unwrap(); diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 36e00c4ec5..28298eab42 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -392,14 +392,14 @@ impl StacksHttpRequest { StacksHttpRequest::new_for_peer( host, "GET".into(), - format!("/v3/sortitions/{}/{}", sort_key, sort_value), + format!("{}/{}/{}", RPC_SORTITION_INFO_PATH, sort_key, sort_value), HttpRequestContents::new(), ) .expect("FATAL: failed to construct request from infallible data") } pub fn new_get_sortition_consensus(host: PeerHost, ch: &ConsensusHash) -> StacksHttpRequest { - Self::new_get_sortition(host, "consensus", &format!("{}", ch)) + Self::new_get_sortition(host, "consensus", &ch.to_string()) } } diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index acacc74153..8971a8230f 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -251,7 +251,7 @@ impl InvGenerator { // The table removals here are for cache maintenance. // // Between successive calls to this function, the Stacks tip (identified by - // `tip_block_ch` and `tip_block_bh) can advance as more blocks are discovered. + // `tip_block_ch` and `tip_block_bh`) can advance as more blocks are discovered. // This means that tenures that had previously been treated as absent could now be // present. By evicting cached data for all tenures between (and including) the // highest ancestor of the current Stacks tip, and the current Stacks tip, we force From 6b459a15a8e237629170acdaf4e834f056fac035 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 15:41:17 -0400 Subject: [PATCH 1282/1400] fix: compile issues in tests, and don't use _ --- .../download/nakamoto/tenure_downloader.rs | 18 ++++---- stackslib/src/net/tests/download/nakamoto.rs | 41 +++++++++++++++---- 2 files changed, 41 insertions(+), 18 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index c11e9d42dd..63a622a424 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -581,11 +581,11 @@ impl NakamotoTenureDownloader { response: StacksHttpResponse, ) -> Result>, NetError> { let handle_result = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id, _start_request_time) => { + NakamotoTenureDownloadState::GetTenureStartBlock(block_id, start_request_time) => { debug!( "Got download response for tenure-start block {} in {}ms", - &_block_id, - get_epoch_time_ms().saturating_sub(_start_request_time) + &block_id, + get_epoch_time_ms().saturating_sub(start_request_time) ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); @@ -594,11 +594,11 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_start_block(block)?; Ok(None) } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id, _start_request_time) => { + NakamotoTenureDownloadState::GetTenureEndBlock(block_id, start_request_time) => { debug!( "Got download response to tenure-end block {} in {}ms", - &_block_id, - get_epoch_time_ms().saturating_sub(_start_request_time) + &block_id, + get_epoch_time_ms().saturating_sub(start_request_time) ); let block = response.decode_nakamoto_block().map_err(|e| { warn!("Failed to decode response for a Nakamoto block: {:?}", &e); @@ -607,11 +607,11 @@ impl NakamotoTenureDownloader { self.try_accept_tenure_end_block(&block)?; Ok(None) } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id, _start_request_time) => { + NakamotoTenureDownloadState::GetTenureBlocks(end_block_id, start_request_time) => { debug!( "Got download response for tenure blocks ending at {} in {}ms", - &_end_block_id, - get_epoch_time_ms().saturating_sub(_start_request_time) + &end_block_id, + get_epoch_time_ms().saturating_sub(start_request_time) ); let blocks = response.decode_nakamoto_tenure().map_err(|e| { warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index cc90d90011..45fa04d8d6 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -50,6 +50,17 @@ use crate::net::{Error as NetError, Hash160, NeighborAddress, SortitionDB}; use crate::stacks_common::types::Address; use crate::util_lib::db::Error as DBError; +impl NakamotoTenureDownloadState { + pub fn request_time(&self) -> Option { + match self { + Self::GetTenureStartBlock(_, ts) => Some(*ts), + Self::GetTenureEndBlock(_, ts) => Some(*ts), + Self::GetTenureBlocks(_, ts) => Some(*ts), + Self::Done => None, + } + } +} + impl NakamotoDownloadStateMachine { /// Find the list of wanted tenures for the given reward cycle. The reward cycle must /// be complete already. Used for testing. @@ -240,7 +251,10 @@ fn test_nakamoto_tenure_downloader() { // must be first block assert_eq!( td.state, - NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block.header.block_id()) + NakamotoTenureDownloadState::GetTenureStartBlock( + tenure_start_block.header.block_id(), + td.state.request_time().unwrap() + ) ); assert!(td .try_accept_tenure_start_block(blocks.last().unwrap().clone()) @@ -254,7 +268,7 @@ fn test_nakamoto_tenure_downloader() { .try_accept_tenure_start_block(blocks.first().unwrap().clone()) .is_ok()); - let NakamotoTenureDownloadState::GetTenureEndBlock(block_id) = td.state else { + let NakamotoTenureDownloadState::GetTenureEndBlock(block_id, ..) = td.state else { panic!("wrong state"); }; assert_eq!(block_id, next_tenure_start_block.header.block_id()); @@ -274,7 +288,8 @@ fn test_nakamoto_tenure_downloader() { assert_eq!( td.state, NakamotoTenureDownloadState::GetTenureBlocks( - next_tenure_start_block.header.parent_block_id.clone() + next_tenure_start_block.header.parent_block_id.clone(), + td.state.request_time().unwrap(), ) ); assert_eq!(td.tenure_end_block, Some(next_tenure_start_block.clone())); @@ -299,7 +314,10 @@ fn test_nakamoto_tenure_downloader() { // tail pointer moved assert_eq!( td.state, - NakamotoTenureDownloadState::GetTenureBlocks(block.header.parent_block_id.clone()) + NakamotoTenureDownloadState::GetTenureBlocks( + block.header.parent_block_id.clone(), + td.state.request_time().unwrap() + ) ); } @@ -571,7 +589,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } @@ -669,7 +688,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } @@ -769,7 +789,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } @@ -846,7 +867,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } @@ -986,7 +1008,8 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { assert_eq!( ntd.state, NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() + unconfirmed_wanted_tenure.winning_block_id.clone(), + ntd.state.request_time().unwrap() ) ); } From 638b26a0d8419386300b9d08a07b4eee5c552369 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 3 Oct 2024 15:42:41 -0400 Subject: [PATCH 1283/1400] fix: typo --- stackslib/src/net/api/getsortition.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 28298eab42..9b22d8b82f 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -383,7 +383,7 @@ impl HttpResponse for GetSortitionHandler { } impl StacksHttpRequest { - /// Make a new getinfo request to this endpoint + /// Make a new getsortition request to this endpoint pub fn new_get_sortition( host: PeerHost, sort_key: &str, From 66904ccd77d3b599c949edc8e2c197983cbf5163 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 3 Oct 2024 12:51:23 -0700 Subject: [PATCH 1284/1400] CRC: increase the timeout to reach right before epoch 2.5 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 94a3edaadb..5a45b35e86 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -170,7 +170,7 @@ fn microblocks_disabled() { submit_tx(&http_origin, &tx); // wait until just before epoch 2.5 - wait_for(30, || { + wait_for(120, || { let tip_info = get_chain_info(&conf); if tip_info.burn_block_height >= epoch_2_5 - 2 { return Ok(true); From 994ef3b5aae8a34e36e6e087e0b5178000e27946 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 3 Oct 2024 15:02:03 -0500 Subject: [PATCH 1285/1400] test: add long tx test --- .../src/tests/nakamoto_integrations.rs | 200 +++++++++++++++++- .../src/tests/neon_integrations.rs | 2 +- 2 files changed, 200 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e5a6c87af0..e1880e6e36 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -94,7 +94,7 @@ use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::nakamoto_node::miner::{ - TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; @@ -8926,3 +8926,201 @@ fn v3_signer_api_endpoint() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test asserts that a long running transaction doesn't get mined, +/// but that the stacks-node continues to make progress +fn skip_mining_long_tx() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.nakamoto_attempt_time_ms = 5_000; + let sender_1_sk = Secp256k1PrivateKey::from_seed(&[30]); + let sender_2_sk = Secp256k1PrivateKey::from_seed(&[31]); + // setup sender + recipient for a test stx transfer + let sender_1_addr = tests::to_addr(&sender_1_sk); + let sender_2_addr = tests::to_addr(&sender_2_sk); + let send_amt = 1000; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_1_addr.clone()).to_string(), + send_amt * 15 + send_fee * 15, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_2_addr.clone()).to_string(), + 10000, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + test_observer::spawn(); + let observer_port = test_observer::EVENT_OBSERVER_PORT; + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{observer_port}"), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + naka_mined_blocks: mined_naka_blocks, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // submit a long running TX and the transfer TX + let input_list: Vec<_> = (1..100u64).into_iter().map(|x| x.to_string()).collect(); + let input_list = input_list.join(" "); + + // Mine a few nakamoto tenures with some interim blocks in them + for i in 0..5 { + let mined_before = mined_naka_blocks.load(Ordering::SeqCst); + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + if i == 0 { + // we trigger the nakamoto miner to evaluate the long running transaction, + // but we disable the block broadcast, so the tx doesn't end up included in a + // confirmed block, even though its been evaluated. + // once we've seen the miner increment the mined counter, we allow it to start + // broadcasting (because at this point, any future blocks produced will skip the long + // running tx because they have an estimate). + wait_for(30, || { + Ok(mined_naka_blocks.load(Ordering::SeqCst) > mined_before) + }) + .unwrap(); + + TEST_SKIP_P2P_BROADCAST.lock().unwrap().replace(true); + let tx = make_contract_publish( + &sender_2_sk, + 0, + 9_000, + "large_contract", + &format!( + "(define-constant INP_LIST (list {input_list})) + (define-private (mapping-fn (input int)) + (begin (sha256 (sha256 (sha256 (sha256 (sha256 (sha256 (sha256 (sha256 (sha256 input))))))))) + 0)) + + (define-private (mapping-fn-2 (input int)) + (begin (map mapping-fn INP_LIST) (map mapping-fn INP_LIST) (map mapping-fn INP_LIST) (map mapping-fn INP_LIST) 0)) + + (begin + (map mapping-fn-2 INP_LIST))" + ), + ); + submit_tx(&http_origin, &tx); + + wait_for(90, || { + Ok(mined_naka_blocks.load(Ordering::SeqCst) > mined_before + 1) + }) + .unwrap(); + + TEST_SKIP_P2P_BROADCAST.lock().unwrap().replace(false); + } else { + let transfer_tx = + make_stacks_transfer(&sender_1_sk, i - 1, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &sender_1_addr).nonce; + Ok(cur_sender_nonce >= i) + }) + .unwrap(); + } + } + + let sender_1_nonce = get_account(&http_origin, &sender_1_addr).nonce; + let sender_2_nonce = get_account(&http_origin, &sender_2_addr).nonce; + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + "sender_1_nonce" => sender_1_nonce, + "sender_2_nonce" => sender_2_nonce, + ); + + assert_eq!(sender_2_nonce, 0); + assert_eq!(sender_1_nonce, 4); + + // Check that we aren't missing burn blocks + let bhh = u64::from(tip.burn_header_height); + test_observer::contains_burn_block_range(220..=bhh).unwrap(); + + check_nakamoto_empty_block_heuristics(); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 2c2055bad9..126f089c35 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -591,7 +591,7 @@ pub mod test_observer { } } let tx_hex = tx_json.get("raw_tx").unwrap().as_str().unwrap(); - let tx_bytes = hex_bytes(tx_hex).unwrap(); + let tx_bytes = hex_bytes(&tx_hex[2..]).unwrap(); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); Some(tx) From 56ae22d80b5925b80759cf35c449dddc27b0ed69 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 3 Oct 2024 13:44:18 -0700 Subject: [PATCH 1286/1400] Bind ports should not use the same port numbers Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 27 ++++----- .../src/tests/nakamoto_integrations.rs | 46 ++++++++------- .../src/tests/neon_integrations.rs | 58 ++++++++++++++----- 3 files changed, 81 insertions(+), 50 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a7892b9a2d..8829c9782d 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -23,7 +23,7 @@ use clarity::vm::events::STXEventType; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; -use rand::RngCore; +use rand::Rng; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StacksTransactionEvent; @@ -295,13 +295,14 @@ pub fn new_test_conf() -> Config { // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", // stacksAddress: "ST2VHM28V9E5QCRD6C73215KAPSBKQGPWTEE5CMQT" let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); + // Use a non-privileged port between 1024 and 65534 + let rpc_port: u16 = rng.gen_range(1024..65533); + let p2p_port = rpc_port + 1; let mut conf = Config::default(); conf.node.working_dir = format!( "/tmp/stacks-node-tests/integrations-neon/{}-{}", - to_hex(&buf), + to_hex(format!("{rpc_port}{p2p_port}").as_bytes()), get_epoch_time_secs() ); conf.node.seed = @@ -313,14 +314,11 @@ pub fn new_test_conf() -> Config { conf.burnchain.epochs = Some(StacksEpoch::all(0, 0, 0)); - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let localhost = "127.0.0.1"; - conf.node.rpc_bind = format!("{}:{}", localhost, rpc_port); - conf.node.p2p_bind = format!("{}:{}", localhost, p2p_port); - conf.node.data_url = format!("http://{}:{}", localhost, rpc_port); - conf.node.p2p_address = format!("{}:{}", localhost, p2p_port); + conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + conf.node.p2p_address = format!("{localhost}:{p2p_port}"); conf } @@ -344,10 +342,9 @@ pub fn set_random_binds(config: &mut Config) { .unwrap(); let (rpc_port, p2p_port) = loop { let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + // Use a non-privileged port between 1024 and 65534 + let rpc_port: u16 = rng.gen_range(1024..65533); + let p2p_port = rpc_port + 1; if rpc_port != prior_rpc_port && p2p_port != prior_p2p_port { break (rpc_port, p2p_port); } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 722528e0ab..17840dcb55 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -29,7 +29,7 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; use libsigner::{SignerSession, StackerDBSession}; -use rand::RngCore; +use rand::Rng; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -3459,18 +3459,20 @@ fn follower_bootup() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; + let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + // Use a non-privileged port between 1024 and 65534 + let mut rpc_port: u16 = rng.gen_range(1024..65533); + while format!("{localhost}:{rpc_port}") == naka_conf.node.rpc_bind { + // We should NOT match the miner's rpc bind and subsequently p2p port + rpc_port = rng.gen_range(1024..65533); + } + let p2p_port = rpc_port + 1; - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); follower_conf.node.pox_sync_sample_secs = 30; let node_info = get_chain_info(&naka_conf); @@ -3813,18 +3815,20 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.local_peer_seed = vec![0x02; 32]; follower_conf.node.miner = false; + let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + // Use a non-privileged port between 1024 and 65534 + let mut rpc_port: u16 = rng.gen_range(1024..65533); + while format!("{localhost}:{rpc_port}") == naka_conf.node.rpc_bind { + // We should NOT match the miner's rpc bind and subsequently p2p port + rpc_port = rng.gen_range(1024..65533); + } + let p2p_port = rpc_port + 1; - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); follower_conf.node.pox_sync_sample_secs = 30; let node_info = get_chain_info(&naka_conf); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ab892f25b7..d34cbffe5b 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12,7 +12,7 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; -use rand::{Rng, RngCore}; +use rand::Rng; use rusqlite::params; use serde::Deserialize; use serde_json::json; @@ -986,7 +986,16 @@ fn bitcoind_integration_test() { } let (mut conf, miner_account) = neon_integration_test_conf(); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let localhost = "127.0.0.1"; + let mut rng = rand::thread_rng(); + // Use a non-privileged port between 1024 and 65534 + let mut prom_port = 6000; + let mut prom_bind = format!("{localhost}:{prom_port}"); + while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + // We should NOT match the miner's rpc or p2p binds + prom_port = rng.gen_range(1024..65533); + prom_bind = format!("{localhost}:{prom_port}"); + } conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; @@ -12466,18 +12475,21 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; + let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 + // Use a non-privileged port between 1024 and 65534 + let mut rpc_port: u16 = rng.gen_range(1024..65533); + while format!("{localhost}:{rpc_port}") == conf.node.rpc_bind { + // We should NOT match the miner's rpc bind and subsequently p2p port + rpc_port = rng.gen_range(1024..65533); + } + let p2p_port = rpc_port + 1; - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); + follower_conf.node.pox_sync_sample_secs = 30; let run_loop_thread = thread::spawn(move || miner_run_loop.start(None, 0)); wait_for_runloop(&miner_blocks_processed); @@ -12800,7 +12812,16 @@ fn listunspent_max_utxos() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let localhost = "127.0.0.1"; + let mut rng = rand::thread_rng(); + // Use a non-privileged port between 1024 and 65534 + let mut prom_port = 6000; + let mut prom_bind = format!("{localhost}:{prom_port}"); + while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + // We should NOT match the miner's rpc or p2p binds + prom_port = rng.gen_range(1024..65533); + prom_bind = format!("{localhost}:{prom_port}"); + } conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; @@ -12846,7 +12867,16 @@ fn start_stop_bitcoind() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let localhost = "127.0.0.1"; + let mut rng = rand::thread_rng(); + // Use a non-privileged port between 1024 and 65534 + let mut prom_port = 6000; + let mut prom_bind = format!("{localhost}:{prom_port}"); + while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + // We should NOT match the miner's rpc or p2p binds + prom_port = rng.gen_range(1024..65533); + prom_bind = format!("{localhost}:{prom_port}"); + } conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; From 331dc9436e8296513d543f348e9d9b1765fa570f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 4 Oct 2024 11:17:10 -0500 Subject: [PATCH 1287/1400] feat: wait to build block if min gap won't be met * ci: disable unneeded microblocks tests * test: remove blanket 2 sec wait for nakamoto btc blocks --- .github/workflows/bitcoin-tests.yml | 23 ++++---- .../stacks-node/src/nakamoto_node/miner.rs | 52 +++++++++++++------ .../src/tests/nakamoto_integrations.rs | 1 - 3 files changed, 49 insertions(+), 27 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index cd867340ec..4115118eaf 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -32,7 +32,6 @@ jobs: - tests::bitcoin_regtest::bitcoind_integration_test - tests::integrations::integration_test_get_info - tests::neon_integrations::antientropy_integration_test - - tests::neon_integrations::bad_microblock_pubkey - tests::neon_integrations::bitcoind_forking_test - tests::neon_integrations::bitcoind_integration_test - tests::neon_integrations::block_large_tx_integration_test @@ -43,21 +42,26 @@ jobs: - tests::neon_integrations::fuzzed_median_fee_rate_estimation_test_window10 - tests::neon_integrations::fuzzed_median_fee_rate_estimation_test_window5 - tests::neon_integrations::liquid_ustx_integration - - tests::neon_integrations::microblock_fork_poison_integration_test - - tests::neon_integrations::microblock_integration_test + # Microblock tests that are no longer needed on every CI run + # (microblocks are unsupported starting in Epoch 2.5) + # - tests::neon_integrations::bad_microblock_pubkey + # - tests::neon_integrations::microblock_fork_poison_integration_test + # - tests::neon_integrations::microblock_integration_test + # - tests::neon_integrations::microblock_limit_hit_integration_test + # - tests::neon_integrations::test_problematic_microblocks_are_not_mined + # - tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored + # - tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test + # - tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test + # - tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test + # - tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test # Disable this flaky test. Microblocks are no longer supported anyways. # - tests::neon_integrations::microblock_large_tx_integration_test_FLAKY - - tests::neon_integrations::microblock_limit_hit_integration_test - tests::neon_integrations::miner_submit_twice - tests::neon_integrations::mining_events_integration_test - tests::neon_integrations::pox_integration_test - tests::neon_integrations::push_boot_receipts - - tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test - tests::neon_integrations::should_fix_2771 - tests::neon_integrations::size_check_integration_test - - tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test - - tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test - - tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test - tests::neon_integrations::stx_delegate_btc_integration_test - tests::neon_integrations::stx_transfer_btc_integration_test - tests::neon_integrations::stack_stx_burn_op_test @@ -66,8 +70,6 @@ jobs: - tests::neon_integrations::test_flash_block_skip_tenure - tests::neon_integrations::test_problematic_blocks_are_not_mined - tests::neon_integrations::test_problematic_blocks_are_not_relayed_or_stored - - tests::neon_integrations::test_problematic_microblocks_are_not_mined - - tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored - tests::neon_integrations::test_problematic_txs_are_not_stored - tests::neon_integrations::use_latest_tip_integration_test - tests::neon_integrations::confirm_unparsed_ongoing_ops @@ -90,6 +92,7 @@ jobs: - tests::nakamoto_integrations::follower_bootup - tests::nakamoto_integrations::forked_tenure_is_ignored - tests::nakamoto_integrations::nakamoto_attempt_time + - tests::nakamoto_integrations::skip_mining_long_tx - tests::signer::v0::block_proposal_rejection - tests::signer::v0::miner_gather_signatures - tests::signer::v0::end_of_tenure diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 850fffaab6..af539db5b1 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -40,6 +40,7 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; @@ -949,6 +950,29 @@ impl BlockMinerThread { Some(vrf_proof) } + fn validate_timestamp_info( + &self, + current_timestamp_secs: u64, + stacks_parent_header: &StacksHeaderInfo, + ) -> bool { + let parent_timestamp = match stacks_parent_header.anchored_header.as_stacks_nakamoto() { + Some(naka_header) => naka_header.timestamp, + None => stacks_parent_header.burn_header_timestamp, + }; + let time_since_parent_ms = current_timestamp_secs.saturating_sub(parent_timestamp) * 1000; + if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { + debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; + "current_timestamp" => current_timestamp_secs, + "parent_block_id" => %stacks_parent_header.index_block_hash(), + "parent_block_height" => stacks_parent_header.stacks_block_height, + "parent_block_timestamp" => stacks_parent_header.burn_header_timestamp, + ); + false + } else { + true + } + } + /// Check that the provided block is not mined too quickly after the parent block. /// This is to ensure that the signers do not reject the block due to the block being mined within the same second as the parent block. fn validate_timestamp(&self, x: &NakamotoBlock) -> Result { @@ -970,22 +994,7 @@ impl BlockMinerThread { ); NakamotoNodeError::ParentNotFound })?; - let current_timestamp = x.header.timestamp; - let parent_timestamp = match stacks_parent_header.anchored_header.as_stacks_nakamoto() { - Some(naka_header) => naka_header.timestamp, - None => stacks_parent_header.burn_header_timestamp, - }; - let time_since_parent_ms = current_timestamp.saturating_sub(parent_timestamp) * 1000; - if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { - debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; - "current_timestamp" => current_timestamp, - "parent_block_id" => %stacks_parent_header.index_block_hash(), - "parent_block_height" => stacks_parent_header.stacks_block_height, - "parent_block_timestamp" => stacks_parent_header.burn_header_timestamp, - ); - return Ok(false); - } - Ok(true) + Ok(self.validate_timestamp_info(x.header.timestamp, &stacks_parent_header)) } // TODO: add tests from mutation testing results #4869 @@ -1042,6 +1051,17 @@ impl BlockMinerThread { let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); + if !self.validate_timestamp_info( + get_epoch_time_secs(), + &parent_block_info.stacks_parent_header, + ) { + // treat a too-soon-to-mine block as an interrupt: this will let the caller sleep and then re-evaluate + // all the pre-mining checks (burnchain tip changes, signal interrupts, etc.) + return Err(NakamotoNodeError::MiningFailure( + ChainstateError::MinerAborted, + )); + } + // build the block itself let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3b8cdecb2e..79e7ed6424 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -728,7 +728,6 @@ pub fn next_block_and_wait_for_commits( (0..commits_before.len()).map(|_| None).collect(); let mut commit_sent_time: Vec> = (0..commits_before.len()).map(|_| None).collect(); - sleep_ms(2000); // Make sure that the proposed stacks block has a different timestamp than its parent next_block_and(btc_controller, timeout_secs, || { for i in 0..commits_submitted.len() { let commits_sent = commits_submitted[i].load(Ordering::SeqCst); From cf5fd4ceef081436d099793c558d85ae5d32e82b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 09:36:05 -0700 Subject: [PATCH 1288/1400] Cleanup bind ports Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 26 ++-- .../src/tests/nakamoto_integrations.rs | 74 +++++++-- .../src/tests/neon_integrations.rs | 142 ++++++++++++++---- 3 files changed, 189 insertions(+), 53 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 8829c9782d..e45c22c162 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -295,9 +295,13 @@ pub fn new_test_conf() -> Config { // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", // stacksAddress: "ST2VHM28V9E5QCRD6C73215KAPSBKQGPWTEE5CMQT" let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let rpc_port: u16 = rng.gen_range(1024..65533); - let p2p_port = rpc_port + 1; + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b { + break (a, b); + } + }; let mut conf = Config::default(); conf.node.working_dir = format!( @@ -324,6 +328,7 @@ pub fn new_test_conf() -> Config { /// Randomly change the config's network ports to new ports. pub fn set_random_binds(config: &mut Config) { + let mut rng = rand::thread_rng(); let prior_rpc_port: u16 = config .node .rpc_bind @@ -341,12 +346,15 @@ pub fn set_random_binds(config: &mut Config) { .parse() .unwrap(); let (rpc_port, p2p_port) = loop { - let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let rpc_port: u16 = rng.gen_range(1024..65533); - let p2p_port = rpc_port + 1; - if rpc_port != prior_rpc_port && p2p_port != prior_p2p_port { - break (rpc_port, p2p_port); + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { + break (a, b); } }; let localhost = "127.0.0.1"; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17840dcb55..e6eabc99d3 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3459,16 +3459,37 @@ fn follower_bootup() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let mut rpc_port: u16 = rng.gen_range(1024..65533); - while format!("{localhost}:{rpc_port}") == naka_conf.node.rpc_bind { - // We should NOT match the miner's rpc bind and subsequently p2p port - rpc_port = rng.gen_range(1024..65533); - } - let p2p_port = rpc_port + 1; + let prior_rpc_port: u16 = naka_conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = naka_conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { + break (a, b); + } + }; + let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); @@ -3815,16 +3836,37 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.local_peer_seed = vec![0x02; 32]; follower_conf.node.miner = false; - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let mut rpc_port: u16 = rng.gen_range(1024..65533); - while format!("{localhost}:{rpc_port}") == naka_conf.node.rpc_bind { - // We should NOT match the miner's rpc bind and subsequently p2p port - rpc_port = rng.gen_range(1024..65533); - } - let p2p_port = rpc_port + 1; + let prior_rpc_port: u16 = naka_conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = naka_conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { + break (a, b); + } + }; + let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d34cbffe5b..bef5786fb2 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -986,16 +986,31 @@ fn bitcoind_integration_test() { } let (mut conf, miner_account) = neon_integration_test_conf(); - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 let mut prom_port = 6000; - let mut prom_bind = format!("{localhost}:{prom_port}"); - while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + // Use a non-privileged port between 1024 and 65534 + while prom_port == prior_rpc_port || prom_port == prior_p2p_port { // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..65533); - prom_bind = format!("{localhost}:{prom_port}"); + prom_port = rng.gen_range(1024..u16::MAX); } + let localhost = "127.0.0.1"; + let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; @@ -12475,16 +12490,37 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 - let mut rpc_port: u16 = rng.gen_range(1024..65533); - while format!("{localhost}:{rpc_port}") == conf.node.rpc_bind { - // We should NOT match the miner's rpc bind and subsequently p2p port - rpc_port = rng.gen_range(1024..65533); - } - let p2p_port = rpc_port + 1; + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let (rpc_port, p2p_port) = loop { + let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { + break (a, b); + } + }; + let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); @@ -12670,11 +12706,31 @@ fn mock_miner_replay() { follower_conf.node.local_peer_seed = vec![0x02; 32]; let mut rng = rand::thread_rng(); - + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); let (rpc_port, p2p_port) = loop { let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b { + if a != b + && a != prior_rpc_port + && a != prior_p2p_port + && b != prior_rpc_port + && b != prior_p2p_port + { break (a, b); } }; @@ -12812,16 +12868,31 @@ fn listunspent_max_utxos() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 let mut prom_port = 6000; - let mut prom_bind = format!("{localhost}:{prom_port}"); - while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + // Use a non-privileged port between 1024 and 65534 + while prom_port == prior_rpc_port || prom_port == prior_p2p_port { // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..65533); - prom_bind = format!("{localhost}:{prom_port}"); + prom_port = rng.gen_range(1024..u16::MAX); } + let localhost = "127.0.0.1"; + let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; @@ -12867,16 +12938,31 @@ fn start_stop_bitcoind() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let localhost = "127.0.0.1"; let mut rng = rand::thread_rng(); - // Use a non-privileged port between 1024 and 65534 let mut prom_port = 6000; - let mut prom_bind = format!("{localhost}:{prom_port}"); - while prom_bind == conf.node.rpc_bind || prom_bind == conf.node.p2p_bind { + let prior_rpc_port: u16 = conf + .node + .rpc_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + let prior_p2p_port: u16 = conf + .node + .p2p_bind + .split(":") + .last() + .unwrap() + .parse() + .unwrap(); + // Use a non-privileged port between 1024 and 65534 + while prom_port == prior_rpc_port || prom_port == prior_p2p_port { // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..65533); - prom_bind = format!("{localhost}:{prom_port}"); + prom_port = rng.gen_range(1024..u16::MAX); } + let localhost = "127.0.0.1"; + let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); conf.burnchain.max_rbf = 1000000; From f7cdbaca4028c3773d931dd4a66d469c1631fc59 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 10:40:35 -0700 Subject: [PATCH 1289/1400] Incorrect path was passed to new_rpc_call_timer calling out of bounds array access Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index c144d0401a..6168e76106 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -376,10 +376,12 @@ impl StacksClient { "last_sortition" => %last_sortition, ); let path = self.tenure_forking_info_path(chosen_parent, last_sortition); - let timer = crate::monitoring::new_rpc_call_timer( - "/v3/tenures/fork_info/:start/:stop", - &self.http_origin, + // Use a seperate metrics path to allow the same metric for different start and stop hashes + let metrics_path = format!( + "{}{RPC_TENURE_FORKING_INFO_PATH}/:start/:stop", + self.http_origin ); + let timer = crate::monitoring::new_rpc_call_timer(&metrics_path, &self.http_origin); let send_request = || { self.stacks_node_client .get(&path) From 5b036df16e938987c58886ba1e0df682c7ee91a3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 10:43:31 -0700 Subject: [PATCH 1290/1400] Typo for Brice Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/stacks_client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 6168e76106..5caf9d3f42 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -376,7 +376,7 @@ impl StacksClient { "last_sortition" => %last_sortition, ); let path = self.tenure_forking_info_path(chosen_parent, last_sortition); - // Use a seperate metrics path to allow the same metric for different start and stop hashes + // Use a separate metrics path to allow the same metric for different start and stop hashes let metrics_path = format!( "{}{RPC_TENURE_FORKING_INFO_PATH}/:start/:stop", self.http_origin From 381a8c789ce83ce0fdd6b5a1bf493d9cb4c908d6 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 4 Oct 2024 11:09:00 -0700 Subject: [PATCH 1291/1400] fix: better implementation of path label for prom metrics --- stacks-signer/src/monitoring/mod.rs | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index e03b03d47a..621886b9c0 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -92,13 +92,22 @@ pub fn update_signer_nonce(nonce: u64) { prometheus::SIGNER_NONCE.set(nonce as i64); } +// Allow dead code because this is only used in the `monitoring_prom` feature +// but we want to run it in a test +#[allow(dead_code)] +/// Remove the origin from the full path to avoid duplicate metrics for different origins +fn remove_origin_from_path(full_path: &str, origin: &str) -> String { + let path = full_path.replace(origin, ""); + path +} + /// Start a new RPC call timer. /// The `origin` parameter is the base path of the RPC call, e.g. `http://node.com`. /// The `origin` parameter is removed from `full_path` when storing in prometheus. #[cfg(feature = "monitoring_prom")] pub fn new_rpc_call_timer(full_path: &str, origin: &str) -> HistogramTimer { - let path = &full_path[origin.len()..]; - let histogram = prometheus::SIGNER_RPC_CALL_LATENCIES_HISTOGRAM.with_label_values(&[path]); + let path = remove_origin_from_path(full_path, origin); + let histogram = prometheus::SIGNER_RPC_CALL_LATENCIES_HISTOGRAM.with_label_values(&[&path]); histogram.start_timer() } @@ -140,3 +149,16 @@ pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), Stri } Ok(()) } + +#[test] +fn test_remove_origin_from_path() { + let full_path = "http://localhost:20443/v2/info"; + let origin = "http://localhost:20443"; + let path = remove_origin_from_path(full_path, origin); + assert_eq!(path, "/v2/info"); + + let full_path = "/v2/info"; + let origin = "http://localhost:20443"; + let path = remove_origin_from_path(full_path, origin); + assert_eq!(path, "/v2/info"); +} From 30cd8ec965e1103c8618a523853b3aa8cdd7c326 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 12:52:13 -0700 Subject: [PATCH 1292/1400] Do not abort the runloop unless we were not told to exit Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/run_loop/neon.rs | 40 ++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index b1fa0ff53b..331e7e597c 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -1314,6 +1314,26 @@ impl RunLoop { // // _this will block if the relayer's buffer is full_ if !node.relayer_sortition_notify() { + // First check if we were supposed to cleanly exit + if !globals.keep_running() { + // The p2p thread relies on the same atomic_bool, it will + // discontinue its execution after completing its ongoing runloop epoch. + info!("Terminating p2p process"); + info!("Terminating relayer"); + info!("Terminating chains-coordinator"); + + globals.coord().stop_chains_coordinator(); + coordinator_thread_handle.join().unwrap(); + let peer_network = node.join(); + liveness_thread.join().unwrap(); + + // Data that will be passed to Nakamoto run loop + // Only gets transfered on clean shutdown of neon run loop + let data_to_naka = Neon2NakaData::new(globals, peer_network); + + info!("Exiting stacks-node"); + return Some(data_to_naka); + } // relayer hung up, exit. error!("Runloop: Block relayer and miner hung up, exiting."); return None; @@ -1388,6 +1408,26 @@ impl RunLoop { } if !node.relayer_issue_tenure(ibd) { + // First check if we were supposed to cleanly exit + if !globals.keep_running() { + // The p2p thread relies on the same atomic_bool, it will + // discontinue its execution after completing its ongoing runloop epoch. + info!("Terminating p2p process"); + info!("Terminating relayer"); + info!("Terminating chains-coordinator"); + + globals.coord().stop_chains_coordinator(); + coordinator_thread_handle.join().unwrap(); + let peer_network = node.join(); + liveness_thread.join().unwrap(); + + // Data that will be passed to Nakamoto run loop + // Only gets transfered on clean shutdown of neon run loop + let data_to_naka = Neon2NakaData::new(globals, peer_network); + + info!("Exiting stacks-node"); + return Some(data_to_naka); + } // relayer hung up, exit. error!("Runloop: Block relayer and miner hung up, exiting."); break None; From fc80812111252eddf611d017d5aa808d504bb285 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 13:48:23 -0700 Subject: [PATCH 1293/1400] CRC: make a global mutex of used ports in tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 60 +++++--- .../src/tests/nakamoto_integrations.rs | 66 +-------- .../src/tests/neon_integrations.rs | 135 ++---------------- 3 files changed, 51 insertions(+), 210 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index e45c22c162..883970ef09 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -13,9 +13,9 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::atomic::AtomicU64; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::BurnStateDB; @@ -99,6 +99,34 @@ lazy_static! { ); } +lazy_static! { + static ref USED_PORTS: Mutex> = Mutex::new(HashSet::new()); +} + +/// Generate a random port number between 1024 and 65534 (inclusive) and insert it into the USED_PORTS set. +/// Returns the generated port number. +pub fn gen_random_port() -> u16 { + let mut rng = rand::thread_rng(); + let range_len = (1024..u16::MAX).len(); + loop { + let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if insert_new_port(port) { + return port; + } + assert!( + USED_PORTS.lock().unwrap().len() < range_len, + "No more available ports" + ); + } +} + +// Add a port to the USED_PORTS set. This is used to ensure that we don't try to bind to the same port in tests +// Returns true if the port was inserted, false if it was already in the set. +pub fn insert_new_port(port: u16) -> bool { + let mut ports = USED_PORTS.lock().unwrap(); + ports.insert(port) +} + pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -294,14 +322,8 @@ pub fn new_test_conf() -> Config { // secretKey: "b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01", // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", // stacksAddress: "ST2VHM28V9E5QCRD6C73215KAPSBKQGPWTEE5CMQT" - let mut rng = rand::thread_rng(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let mut conf = Config::default(); conf.node.working_dir = format!( @@ -328,7 +350,7 @@ pub fn new_test_conf() -> Config { /// Randomly change the config's network ports to new ports. pub fn set_random_binds(config: &mut Config) { - let mut rng = rand::thread_rng(); + // Just in case prior config was not created with `new_test_conf`, we need to add the prior generated ports let prior_rpc_port: u16 = config .node .rpc_bind @@ -345,18 +367,10 @@ pub fn set_random_binds(config: &mut Config) { .unwrap() .parse() .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + insert_new_port(prior_rpc_port); + insert_new_port(prior_p2p_port); + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e6eabc99d3..6577d5e985 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -105,8 +105,8 @@ use crate::tests::neon_integrations::{ test_observer, wait_for_runloop, }; use crate::tests::{ - get_chain_info, make_contract_publish, make_contract_publish_versioned, make_stacks_transfer, - to_addr, + gen_random_port, get_chain_info, make_contract_publish, make_contract_publish_versioned, + make_stacks_transfer, to_addr, }; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -3459,35 +3459,8 @@ fn follower_bootup() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let mut rng = rand::thread_rng(); - let prior_rpc_port: u16 = naka_conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = naka_conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); @@ -3836,35 +3809,8 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.local_peer_seed = vec![0x02; 32]; follower_conf.node.miner = false; - let mut rng = rand::thread_rng(); - let prior_rpc_port: u16 = naka_conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = naka_conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index bef5786fb2..7c7cb97e34 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -89,6 +89,7 @@ use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; use crate::syncctl::PoxSyncWatchdogComms; +use crate::tests::gen_random_port; use crate::tests::nakamoto_integrations::{get_key_for_cycle, wait_for}; use crate::util::hash::{MerkleTree, Sha512Trunc256Sum}; use crate::util::secp256k1::MessageSignature; @@ -986,29 +987,7 @@ fn bitcoind_integration_test() { } let (mut conf, miner_account) = neon_integration_test_conf(); - let mut rng = rand::thread_rng(); - let mut prom_port = 6000; - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - // Use a non-privileged port between 1024 and 65534 - while prom_port == prior_rpc_port || prom_port == prior_p2p_port { - // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..u16::MAX); - } + let prom_port = gen_random_port(); let localhost = "127.0.0.1"; let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); @@ -12490,35 +12469,8 @@ fn bitcoin_reorg_flap_with_follower() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let mut rng = rand::thread_rng(); - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); @@ -12705,35 +12657,8 @@ fn mock_miner_replay() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - let mut rng = rand::thread_rng(); - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let (rpc_port, p2p_port) = loop { - let a = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - let b = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if a != b - && a != prior_rpc_port - && a != prior_p2p_port - && b != prior_rpc_port - && b != prior_p2p_port - { - break (a, b); - } - }; + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); @@ -12868,29 +12793,7 @@ fn listunspent_max_utxos() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let mut rng = rand::thread_rng(); - let mut prom_port = 6000; - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - // Use a non-privileged port between 1024 and 65534 - while prom_port == prior_rpc_port || prom_port == prior_p2p_port { - // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..u16::MAX); - } + let prom_port = gen_random_port(); let localhost = "127.0.0.1"; let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); @@ -12938,29 +12841,7 @@ fn start_stop_bitcoind() { } let (mut conf, _miner_account) = neon_integration_test_conf(); - let mut rng = rand::thread_rng(); - let mut prom_port = 6000; - let prior_rpc_port: u16 = conf - .node - .rpc_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - let prior_p2p_port: u16 = conf - .node - .p2p_bind - .split(":") - .last() - .unwrap() - .parse() - .unwrap(); - // Use a non-privileged port between 1024 and 65534 - while prom_port == prior_rpc_port || prom_port == prior_p2p_port { - // We should NOT match the miner's rpc or p2p binds - prom_port = rng.gen_range(1024..u16::MAX); - } + let prom_port = gen_random_port(); let localhost = "127.0.0.1"; let prom_bind = format!("{localhost}:{prom_port}"); conf.node.prometheus_bind = Some(prom_bind.clone()); From d67bdcd8679a149e6e0d785ac86ea13f58e6004a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 14:03:30 -0700 Subject: [PATCH 1294/1400] CRC: remove unused import Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6577d5e985..34af301ac8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -29,7 +29,6 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; use libsigner::{SignerSession, StackerDBSession}; -use rand::Rng; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ From 3690ad98ace9a194b970bdc077bf6f4ce023af46 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 4 Oct 2024 14:13:00 -0700 Subject: [PATCH 1295/1400] Should check before attempting to insert and return a result Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 883970ef09..ba88584f39 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -109,14 +109,14 @@ pub fn gen_random_port() -> u16 { let mut rng = rand::thread_rng(); let range_len = (1024..u16::MAX).len(); loop { - let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if insert_new_port(port) { - return port; - } assert!( USED_PORTS.lock().unwrap().len() < range_len, "No more available ports" ); + let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if insert_new_port(port) { + return port; + } } } From 02fa642ed8c411a44ad668b2040f98683e4f7832 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 7 Oct 2024 15:31:10 -0700 Subject: [PATCH 1296/1400] Fix metric breakage Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 56 ++++++++++++------- testnet/stacks-node/src/tests/signer/v0.rs | 28 ++++++---- 2 files changed, 51 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 049df9f83a..5868111047 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1488,16 +1488,19 @@ fn simple_neon_integration() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + Ok(res.contains(&expected_result)) + }) + .expect("Prometheus metrics did not update"); } info!("Nakamoto miner started..."); @@ -1599,19 +1602,30 @@ fn simple_neon_integration() { let bhh = u64::from(tip.burn_header_height); test_observer::contains_burn_block_range(220..=bhh).unwrap(); - // make sure prometheus returns an updated height + // make sure prometheus returns an updated number of processed blocks #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let prom_http_origin = format!("http://{}", prom_bind); + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result_1 = format!( + "stacks_node_stx_blocks_processed_total {}", + tip.stacks_block_height + ); + + let expected_result_2 = format!( + "stacks_node_stacks_tip_height {}", + tip.stacks_block_height - 1 + ); + Ok(res.contains(&expected_result_1) && res.contains(&expected_result_2)) + }) + .expect("Prometheus metrics did not update"); } check_nakamoto_empty_block_heuristics(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6bffea2749..2e1a448375 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -558,18 +558,22 @@ fn miner_gather_signatures() { // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] { - let metrics_response = signer_test.get_signer_metrics(); - - // Because 5 signers are running in the same process, the prometheus metrics - // are incremented once for every signer. This is why we expect the metric to be - // `5`, even though there is only one block proposed. - let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); - assert!(metrics_response.contains(&expected_result)); - let expected_result = format!( - "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", - num_signers - ); - assert!(metrics_response.contains(&expected_result)); + wait_for(30, || { + let metrics_response = signer_test.get_signer_metrics(); + + // Because 5 signers are running in the same process, the prometheus metrics + // are incremented once for every signer. This is why we expect the metric to be + // `10`, even though there are only two blocks proposed. + let expected_result_1 = + format!("stacks_signer_block_proposals_received {}", num_signers * 2); + let expected_result_2 = format!( + "stacks_signer_block_responses_sent{{response_type=\"accepted\"}} {}", + num_signers * 2 + ); + Ok(metrics_response.contains(&expected_result_1) + && metrics_response.contains(&expected_result_2)) + }) + .expect("Failed to advance prometheus metrics"); } } From 807c4d4017de02797634c8df495a0d00f5d6b3fc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 8 Oct 2024 12:54:02 -0400 Subject: [PATCH 1297/1400] feat: make the timeout for event observers configurable --- CHANGELOG.md | 1 + testnet/stacks-node/src/config.rs | 8 +- testnet/stacks-node/src/event_dispatcher.rs | 77 ++++++++++++++++++- testnet/stacks-node/src/tests/epoch_205.rs | 4 + testnet/stacks-node/src/tests/epoch_21.rs | 7 ++ testnet/stacks-node/src/tests/epoch_22.rs | 2 + testnet/stacks-node/src/tests/epoch_23.rs | 1 + testnet/stacks-node/src/tests/epoch_24.rs | 2 + testnet/stacks-node/src/tests/epoch_25.rs | 1 + .../src/tests/nakamoto_integrations.rs | 24 ++++++ .../src/tests/neon_integrations.rs | 39 ++++++++++ testnet/stacks-node/src/tests/signer/mod.rs | 2 + testnet/stacks-node/src/tests/signer/v0.rs | 1 + testnet/stacks-node/src/tests/stackerdb.rs | 2 + 14 files changed, 165 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5c84db9a6..5ccb9b5cac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-tenure-info?` added - `get-block-info?` removed - Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint +- Added optional `timeout_ms` to `events_observer` configuration ## [2.5.0.0.7] diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 3852bf4224..e90d610040 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1128,11 +1128,10 @@ impl Config { .map(|e| EventKeyType::from_string(e).unwrap()) .collect(); - let endpoint = format!("{}", observer.endpoint); - observers.insert(EventObserverConfig { - endpoint, + endpoint: observer.endpoint, events_keys, + timeout_ms: observer.timeout_ms.unwrap_or(1_000), }); } observers @@ -1146,6 +1145,7 @@ impl Config { events_observers.insert(EventObserverConfig { endpoint: val, events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1_000, }); () } @@ -2921,12 +2921,14 @@ impl AtlasConfigFile { pub struct EventObserverConfigFile { pub endpoint: String, pub events_keys: Vec, + pub timeout_ms: Option, } #[derive(Clone, Default, Debug, Hash, PartialEq, Eq, PartialOrd)] pub struct EventObserverConfig { pub endpoint: String, pub events_keys: Vec, + pub timeout_ms: u64, } #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd)] diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 864a964ee6..faf437c444 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -69,6 +69,7 @@ use super::config::{EventKeyType, EventObserverConfig}; #[derive(Debug, Clone)] struct EventObserver { endpoint: String, + timeout: Duration, } struct ReceiptPayloadInfo<'a> { @@ -335,8 +336,7 @@ impl EventObserver { .parse() .unwrap_or(PeerHost::DNS(host.to_string(), port)); - let backoff = Duration::from_millis(1000); // 1 second - + let mut backoff = Duration::from_millis(100); loop { let mut request = StacksHttpRequest::new_for_peer( peerhost.clone(), @@ -347,7 +347,7 @@ impl EventObserver { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match send_http_request(host, port, request, backoff) { + match send_http_request(host, port, request, self.timeout) { Ok(response) => { if response.preamble().status_code == 200 { debug!( @@ -368,6 +368,7 @@ impl EventObserver { } } sleep(backoff); + backoff *= 2; } } @@ -1406,6 +1407,7 @@ impl EventDispatcher { info!("Registering event observer at: {}", conf.endpoint); let event_observer = EventObserver { endpoint: conf.endpoint.clone(), + timeout: Duration::from_millis(conf.timeout_ms), }; let observer_index = self.registered_observers.len() as u16; @@ -1498,6 +1500,7 @@ mod test { fn build_block_processed_event() { let observer = EventObserver { endpoint: "nowhere".to_string(), + timeout: Duration::from_secs(3), }; let filtered_events = vec![]; @@ -1558,6 +1561,7 @@ mod test { fn test_block_processed_event_nakamoto() { let observer = EventObserver { endpoint: "nowhere".to_string(), + timeout: Duration::from_secs(3), }; let filtered_events = vec![]; @@ -1699,6 +1703,7 @@ mod test { let observer = EventObserver { endpoint: format!("127.0.0.1:{}", port), + timeout: Duration::from_secs(3), }; let payload = json!({"key": "value"}); @@ -1749,6 +1754,7 @@ mod test { let observer = EventObserver { endpoint: format!("127.0.0.1:{}", port), + timeout: Duration::from_secs(3), }; let payload = json!({"key": "value"}); @@ -1759,4 +1765,69 @@ mod test { rx.recv_timeout(Duration::from_secs(5)) .expect("Server did not receive request in time"); } + + #[test] + fn test_send_payload_timeout() { + let port = get_random_port(); + let timeout = Duration::from_secs(3); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let mut attempt = 0; + let mut _request_holder = None; + while let Ok(request) = server.recv() { + attempt += 1; + if attempt == 1 { + debug!("Mock server received request attempt 1"); + // Do not reply, forcing the sender to timeout and retry, + // but don't drop the request or it will receive a 500 error, + _request_holder = Some(request); + } else { + debug!("Mock server received request attempt 2"); + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // Notify the test that the request was processed successfully + tx.send(()).unwrap(); + break; + } + } + }); + + let observer = EventObserver { + endpoint: format!("127.0.0.1:{}", port), + timeout, + }; + + let payload = json!({"key": "value"}); + + // Record the time before sending the payload + let start_time = Instant::now(); + + // Call the function being tested + observer.send_payload(&payload, "/test"); + + // Record the time after the function returns + let elapsed_time = start_time.elapsed(); + + println!("Elapsed time: {:?}", elapsed_time); + assert!( + elapsed_time >= timeout, + "Expected a timeout, but the function returned too quickly" + ); + + assert!( + elapsed_time < timeout + Duration::from_secs(1), + "Expected a timeout, but the function took too long" + ); + + // Wait for the server to process the request + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } } diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 2006abb05e..6ee9650987 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -112,6 +112,7 @@ fn test_exact_block_costs() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -338,6 +339,7 @@ fn test_dynamic_db_method_costs() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -775,6 +777,7 @@ fn test_cost_limit_switch_version205() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1032,6 +1035,7 @@ fn bigger_microblock_streams_in_2_05() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 2f74ffa770..dd350026fe 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -75,6 +75,7 @@ fn advance_to_2_1( conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); @@ -579,6 +580,7 @@ fn transition_fixes_bitcoin_rigidity() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); @@ -1476,6 +1478,7 @@ fn transition_removes_pox_sunset() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.push(InitialBalance { @@ -1791,6 +1794,7 @@ fn transition_empty_blocks() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let keychain = Keychain::default(conf.node.seed.clone()); @@ -4740,6 +4744,7 @@ fn trait_invocation_cross_epoch() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].end_height = epoch_2_05; @@ -4986,6 +4991,7 @@ fn test_v1_unlock_height_with_current_stackers() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); @@ -5251,6 +5257,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 289d09be64..8b5df5ddaf 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -138,6 +138,7 @@ fn disable_pox() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); @@ -671,6 +672,7 @@ fn pox_2_unlock_all() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 0452be8476..e3fa85dfc0 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -104,6 +104,7 @@ fn trait_invocation_behavior() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 3fc3b3d590..cdd0b01560 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -156,6 +156,7 @@ fn fix_to_pox_contract() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); @@ -795,6 +796,7 @@ fn verify_auto_unlock_behavior() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 5a45b35e86..dfddcb8464 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -87,6 +87,7 @@ fn microblocks_disabled() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.append(&mut initial_balances); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 049df9f83a..e16332744f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1436,6 +1436,7 @@ fn simple_neon_integration() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -1669,6 +1670,7 @@ fn flash_blocks_on_epoch_3() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -1936,6 +1938,7 @@ fn mine_multiple_per_tenure_integration() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -2150,6 +2153,7 @@ fn multiple_miners() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -2383,6 +2387,7 @@ fn correct_burn_outs() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -2706,6 +2711,7 @@ fn block_proposal_api_endpoint() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::BlockProposal], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -3078,6 +3084,7 @@ fn miner_writes_proposed_block_to_stackerdb() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -3193,6 +3200,7 @@ fn vote_for_aggregate_key_burn_op() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -3441,6 +3449,7 @@ fn follower_bootup() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -3764,6 +3773,7 @@ fn follower_bootup_across_multiple_cycles() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -3990,6 +4000,7 @@ fn burn_ops_integration_test() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -4593,6 +4604,7 @@ fn forked_tenure_is_ignored() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -5374,6 +5386,7 @@ fn nakamoto_attempt_time() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::BlockProposal], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -5671,6 +5684,7 @@ fn clarity_burn_state() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::MinedBlocks], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -5943,6 +5957,7 @@ fn signer_chainstate() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -6551,6 +6566,7 @@ fn continue_tenure_extend() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -6859,6 +6875,7 @@ fn check_block_times() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -7328,6 +7345,7 @@ fn check_block_info() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -7848,6 +7866,7 @@ fn check_block_info_rewards() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -8194,6 +8213,7 @@ fn mock_mining() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); @@ -8461,6 +8481,7 @@ fn utxo_check_on_startup_panic() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); @@ -8541,6 +8562,7 @@ fn utxo_check_on_startup_recover() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); @@ -8622,6 +8644,7 @@ fn v3_signer_api_endpoint() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::BlockProposal], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -8806,6 +8829,7 @@ fn skip_mining_long_tx() { naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{observer_port}"), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0294876931..1053852cb9 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1028,6 +1028,7 @@ fn bitcoind_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1141,6 +1142,7 @@ fn confirm_unparsed_ongoing_ops() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1472,6 +1474,7 @@ fn deep_contract() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1576,6 +1579,7 @@ fn bad_microblock_pubkey() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1661,6 +1665,7 @@ fn liquid_ustx_integration() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1789,6 +1794,7 @@ fn lockup_integration() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -1905,6 +1911,7 @@ fn stx_transfer_btc_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.push(InitialBalance { @@ -2174,6 +2181,7 @@ fn stx_delegate_btc_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -2462,6 +2470,7 @@ fn stack_stx_burn_op_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -2867,6 +2876,7 @@ fn vote_for_aggregate_key_burn_op_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -3469,6 +3479,7 @@ fn microblock_fork_poison_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -3710,6 +3721,7 @@ fn microblock_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -4695,6 +4707,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -4891,6 +4904,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -5085,6 +5099,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -5351,6 +5366,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -5523,6 +5539,7 @@ fn block_replay_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -5655,6 +5672,7 @@ fn cost_voting_integration() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -5977,6 +5995,7 @@ fn mining_events_integration_test() { EventKeyType::MinedBlocks, EventKeyType::MinedMicroblocks, ], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -6244,6 +6263,7 @@ fn block_limit_hit_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -6499,6 +6519,7 @@ fn microblock_limit_hit_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -6649,6 +6670,7 @@ fn block_large_tx_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.push(InitialBalance { @@ -6787,6 +6809,7 @@ fn microblock_large_tx_integration_test_FLAKY() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances.push(InitialBalance { @@ -6925,6 +6948,7 @@ fn pox_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let first_bal = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -7470,6 +7494,7 @@ fn atlas_integration_test() { .insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf_follower_node.node.always_use_affirmation_maps = false; @@ -8010,6 +8035,7 @@ fn antientropy_integration_test() { .insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf_follower_node.node.mine_microblocks = true; @@ -9012,6 +9038,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -9192,6 +9219,7 @@ fn use_latest_tip_integration_test() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -9596,6 +9624,7 @@ fn test_problematic_txs_are_not_stored() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -9748,6 +9777,7 @@ fn spawn_follower_node( conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.initial_balances = initial_conf.initial_balances.clone(); @@ -9847,6 +9877,7 @@ fn test_problematic_blocks_are_not_mined() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -10204,6 +10235,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -10603,6 +10635,7 @@ fn test_problematic_microblocks_are_not_mined() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -10987,6 +11020,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -11331,6 +11365,7 @@ fn push_boot_receipts() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -11379,6 +11414,7 @@ fn run_with_custom_wallet() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); // custom wallet @@ -11979,6 +12015,7 @@ fn min_txs() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.miner.min_tx_count = 4; @@ -12085,6 +12122,7 @@ fn filter_txs_by_type() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.miner.min_tx_count = 4; @@ -12201,6 +12239,7 @@ fn filter_txs_by_origin() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); conf.miner.min_tx_count = 4; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 454ccde780..5dcbc9a16a 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -649,6 +649,7 @@ fn setup_stx_btc_node ()>( EventKeyType::BlockProposal, EventKeyType::BurnchainBlocks, ], + timeout_ms: 1000, }); } @@ -663,6 +664,7 @@ fn setup_stx_btc_node ()>( EventKeyType::MinedBlocks, EventKeyType::BurnchainBlocks, ], + timeout_ms: 1000, }); // The signers need some initial balances in order to pay for epoch 2.5 transaction votes diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6bffea2749..1f946fd7e0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2981,6 +2981,7 @@ fn signer_set_rollover() { EventKeyType::BlockProposal, EventKeyType::BurnchainBlocks, ], + timeout_ms: 1000, }); } naka_conf.node.rpc_bind = rpc_bind.clone(); diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index e24b5c5c24..a4dca66ea8 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -116,6 +116,7 @@ fn test_stackerdb_load_store() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, }); let privks = vec![ @@ -249,6 +250,7 @@ fn test_stackerdb_event_observer() { conf.events_observers.insert(EventObserverConfig { endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), events_keys: vec![EventKeyType::StackerDBChunks], + timeout_ms: 1000, }); let privks = vec![ From 58f16356e43a11843be62fa77b1e46483ef9bd7b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 8 Oct 2024 22:42:36 -0400 Subject: [PATCH 1298/1400] feat: support re-sending events to event observers across restarts Record events to be sent to event observers in a new sqlite database so that in the event that the node is killed before successfully sending, they can be re-sent on restart. --- CHANGELOG.md | 1 + Cargo.lock | 244 ++++++++++- testnet/stacks-node/Cargo.toml | 2 + testnet/stacks-node/src/config.rs | 12 + testnet/stacks-node/src/event_dispatcher.rs | 416 +++++++++++++++++-- testnet/stacks-node/src/node.rs | 2 +- testnet/stacks-node/src/run_loop/nakamoto.rs | 2 +- testnet/stacks-node/src/run_loop/neon.rs | 2 +- 8 files changed, 619 insertions(+), 62 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ccb9b5cac..50d149b730 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-block-info?` removed - Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint - Added optional `timeout_ms` to `events_observer` configuration +- Added support for re-sending events to event observers across restarts ## [2.5.0.0.7] diff --git a/Cargo.lock b/Cargo.lock index dc27c931cc..227cd9d768 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -190,6 +190,16 @@ dependencies = [ "serde_json", ] +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -626,7 +636,7 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" name = "clarity" version = "0.0.1" dependencies = [ - "assert-json-diff", + "assert-json-diff 1.1.0", "hashbrown", "integer-sqrt", "lazy_static", @@ -652,6 +662,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "colored" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +dependencies = [ + "lazy_static", + "windows-sys 0.48.0", +] + [[package]] name = "concurrent-queue" version = "2.4.0" @@ -1347,7 +1367,26 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.11", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", "indexmap", "slab", "tokio", @@ -1390,7 +1429,7 @@ dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http", + "http 0.2.11", "httpdate", "mime", "sha1 0.10.6", @@ -1402,7 +1441,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http", + "http 0.2.11", ] [[package]] @@ -1457,6 +1496,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -1464,7 +1514,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.11", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -1512,9 +1585,9 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", - "http", - "http-body", + "h2 0.3.24", + "http 0.2.11", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -1526,6 +1599,26 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", +] + [[package]] name = "hyper-rustls" version = "0.24.2" @@ -1533,13 +1626,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "rustls", "tokio", "tokio-rustls", ] +[[package]] +name = "hyper-util" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "hyper 1.4.1", + "pin-project-lite", + "tokio", +] + [[package]] name = "iana-time-zone" version = "0.1.60" @@ -1725,7 +1833,7 @@ checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ "bitflags 2.4.2", "libc", - "redox_syscall", + "redox_syscall 0.4.1", ] [[package]] @@ -1792,6 +1900,16 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + [[package]] name = "log" version = "0.4.20" @@ -1892,6 +2010,30 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "mockito" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b34bd91b9e5c5b06338d392463e1318d683cf82ec3d3af4014609be6e2108d" +dependencies = [ + "assert-json-diff 2.0.2", + "bytes", + "colored", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "log", + "rand 0.8.5", + "regex", + "serde_json", + "serde_urlencoded", + "similar", + "tokio", +] + [[package]] name = "multer" version = "2.1.0" @@ -1901,7 +2043,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http", + "http 0.2.11", "httparse", "log", "memchr", @@ -2023,6 +2165,29 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall 0.5.7", + "smallvec", + "windows-targets 0.52.0", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -2388,6 +2553,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +dependencies = [ + "bitflags 2.4.2", +] + [[package]] name = "redox_users" version = "0.4.4" @@ -2461,10 +2635,10 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", - "http", - "http-body", - "hyper", + "h2 0.3.24", + "http 0.2.11", + "http-body 0.4.6", + "hyper 0.14.28", "hyper-rustls", "ipnet", "js-sys", @@ -2739,6 +2913,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + [[package]] name = "sct" version = "0.7.1" @@ -2969,6 +3149,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "similar" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" + [[package]] name = "siphasher" version = "0.3.11" @@ -3080,7 +3266,7 @@ dependencies = [ name = "stacks-common" version = "0.0.1" dependencies = [ - "assert-json-diff", + "assert-json-diff 1.1.0", "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", @@ -3124,6 +3310,7 @@ dependencies = [ "lazy_static", "libc", "libsigner", + "mockito", "mutants", "pico-args", "rand 0.8.5", @@ -3140,6 +3327,7 @@ dependencies = [ "stacks-signer", "stackslib", "stx-genesis", + "tempfile", "tikv-jemallocator", "tiny_http", "tokio", @@ -3190,7 +3378,7 @@ dependencies = [ name = "stackslib" version = "0.0.1" dependencies = [ - "assert-json-diff", + "assert-json-diff 1.1.0", "chrono", "clarity", "criterion", @@ -3369,6 +3557,19 @@ dependencies = [ "libc", ] +[[package]] +name = "tempfile" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" +dependencies = [ + "cfg-if 1.0.0", + "fastrand 2.0.1", + "once_cell", + "rustix 0.38.31", + "windows-sys 0.52.0", +] + [[package]] name = "term" version = "0.7.0" @@ -3558,6 +3759,7 @@ dependencies = [ "libc", "mio 0.8.10", "num_cpus", + "parking_lot", "pin-project-lite", "socket2 0.5.5", "windows-sys 0.48.0", @@ -3702,7 +3904,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http", + "http 0.2.11", "httparse", "log", "rand 0.8.5", @@ -3865,8 +4067,8 @@ dependencies = [ "futures-channel", "futures-util", "headers", - "http", - "hyper", + "http 0.2.11", + "hyper 0.14.28", "log", "mime", "mime_guess", diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 0b9b59a0e7..958820b491 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -50,6 +50,8 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" +tempfile = "3.3" +mockito = "1.5" [[bin]] name = "stacks-node" diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e90d610040..5a9be5ab80 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1188,6 +1188,18 @@ impl Config { }) } + /// Returns the path working directory path, and ensures it exists. + pub fn get_working_dir(&self) -> PathBuf { + let path = PathBuf::from(&self.node.working_dir); + fs::create_dir_all(&path).unwrap_or_else(|_| { + panic!( + "Failed to create working directory at {}", + path.to_string_lossy() + ) + }); + path + } + fn get_burnchain_path(&self) -> PathBuf { let mut path = PathBuf::from(&self.node.working_dir); path.push(&self.burnchain.mode); diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index faf437c444..8c6d66cb8f 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,6 +16,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Mutex; use std::thread::sleep; @@ -25,6 +26,7 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +use rusqlite::{params, Connection}; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; @@ -56,6 +58,7 @@ use stacks::net::http::HttpRequestContents; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; +use stacks::util_lib::db::Error as db_error; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; @@ -68,7 +71,12 @@ use super::config::{EventKeyType, EventObserverConfig}; #[derive(Debug, Clone)] struct EventObserver { + /// Path to the database where pending payloads are stored. If `None`, then + /// the database is not used and events are not recoverable across restarts. + db_path: Option, + /// URL to which events will be sent endpoint: String, + /// Timeout for sending events to this observer timeout: Duration, } @@ -314,21 +322,90 @@ impl RewardSetEventPayload { } impl EventObserver { - pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { + fn init_db(db_path: &str) -> Result { + let conn = Connection::open(db_path)?; + conn.execute( + "CREATE TABLE IF NOT EXISTS pending_payloads ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + url TEXT NOT NULL, + payload TEXT NOT NULL, + timeout INTEGER NOT NULL + )", + [], + )?; + Ok(conn) + } + + fn insert_payload( + conn: &Connection, + url: &str, + payload: &serde_json::Value, + timeout: Duration, + ) -> Result<(), db_error> { + let payload_text = payload.to_string(); + let timeout_ms: u64 = timeout.as_millis().try_into().expect("Timeout too large"); + conn.execute( + "INSERT INTO pending_payloads (url, payload, timeout) VALUES (?1, ?2, ?3)", + params![url, payload_text, timeout_ms], + )?; + Ok(()) + } + + fn get_pending_payloads( + conn: &Connection, + ) -> Result, db_error> { + let mut stmt = conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads")?; + let payload_iter = stmt.query_and_then( + [], + |row| -> Result<(i64, String, serde_json::Value, u64), db_error> { + let id: i64 = row.get(0)?; + let url: String = row.get(1)?; + let payload_text: String = row.get(2)?; + let payload: serde_json::Value = serde_json::from_str(&payload_text) + .map_err(|e| db_error::SerializationError(e))?; + let timeout_ms: u64 = row.get(3)?; + Ok((id, url, payload, timeout_ms)) + }, + )?; + payload_iter.collect() + } + + fn delete_payload(conn: &Connection, id: i64) -> Result<(), db_error> { + conn.execute("DELETE FROM pending_payloads WHERE id = ?1", params![id])?; + Ok(()) + } + + fn process_pending_payloads(conn: &Connection) { + let pending_payloads = match Self::get_pending_payloads(conn) { + Ok(payloads) => payloads, + Err(e) => { + error!( + "Event observer: failed to retrieve pending payloads from database"; + "error" => ?e + ); + return; + } + }; + + for (id, url, payload, timeout_ms) in pending_payloads { + let timeout = Duration::from_millis(timeout_ms); + Self::send_payload_directly(&payload, &url, timeout); + if let Err(e) = Self::delete_payload(conn, id) { + error!( + "Event observer: failed to delete pending payload from database"; + "error" => ?e + ); + } + } + } + + fn send_payload_directly(payload: &serde_json::Value, full_url: &str, timeout: Duration) { debug!( - "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload + "Event dispatcher: Sending payload"; "url" => %full_url, "payload" => ?payload ); - let url = { - let joined_components = if path.starts_with('/') { - format!("{}{}", &self.endpoint, path) - } else { - format!("{}/{}", &self.endpoint, path) - }; - let url = format!("http://{}", joined_components); - Url::parse(&url) - .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", url)) - }; + let url = Url::parse(full_url) + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", full_url)); let host = url.host_str().expect("Invalid URL: missing host"); let port = url.port_or_known_default().unwrap_or(80); @@ -347,7 +424,7 @@ impl EventObserver { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match send_http_request(host, port, request, self.timeout) { + match send_http_request(host, port, request, timeout) { Ok(response) => { if response.preamble().status_code == 200 { debug!( @@ -372,6 +449,55 @@ impl EventObserver { } } + fn new(working_dir: Option, endpoint: String, timeout: Duration) -> Self { + let db_path = if let Some(mut db_path) = working_dir { + db_path.push("event_observers.sqlite"); + + Self::init_db( + db_path + .to_str() + .expect("Failed to convert chainstate path to string"), + ) + .expect("Failed to initialize database for event observer"); + Some(db_path) + } else { + None + }; + + EventObserver { + db_path, + endpoint, + timeout, + } + } + + /// Send the payload to the given URL. + /// Before sending this payload, any pending payloads in the database will be sent first. + pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { + // Construct the full URL + let url_str = if path.starts_with('/') { + format!("{}{}", &self.endpoint, path) + } else { + format!("{}/{}", &self.endpoint, path) + }; + let full_url = format!("http://{}", url_str); + + if let Some(db_path) = &self.db_path { + let conn = + Connection::open(db_path).expect("Failed to open database for event observer"); + + // Insert the new payload into the database + Self::insert_payload(&conn, &full_url, payload, self.timeout) + .expect("Failed to insert payload into event observer database"); + + // Process all pending payloads + Self::process_pending_payloads(&conn); + } else { + // No database, just send the payload + Self::send_payload_directly(payload, &full_url, self.timeout); + } + } + fn make_new_mempool_txs_payload(transactions: Vec) -> serde_json::Value { let raw_txs = transactions .into_iter() @@ -1403,12 +1529,13 @@ impl EventDispatcher { } } - pub fn register_observer(&mut self, conf: &EventObserverConfig) { + pub fn register_observer(&mut self, conf: &EventObserverConfig, working_dir: PathBuf) { info!("Registering event observer at: {}", conf.endpoint); - let event_observer = EventObserver { - endpoint: conf.endpoint.clone(), - timeout: Duration::from_millis(conf.timeout_ms), - }; + let event_observer = EventObserver::new( + Some(working_dir), + conf.endpoint.clone(), + Duration::from_millis(conf.timeout_ms), + ); let observer_index = self.registered_observers.len() as u16; @@ -1492,16 +1619,14 @@ mod test { use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; + use tempfile::tempdir; use tiny_http::{Method, Response, Server, StatusCode}; use super::*; #[test] fn build_block_processed_event() { - let observer = EventObserver { - endpoint: "nowhere".to_string(), - timeout: Duration::from_secs(3), - }; + let observer = EventObserver::new(None, "nowhere".to_string(), Duration::from_secs(3)); let filtered_events = vec![]; let block = StacksBlock::genesis_block(); @@ -1559,10 +1684,7 @@ mod test { #[test] fn test_block_processed_event_nakamoto() { - let observer = EventObserver { - endpoint: "nowhere".to_string(), - timeout: Duration::from_secs(3), - }; + let observer = EventObserver::new(None, "nowhere".to_string(), Duration::from_secs(3)); let filtered_events = vec![]; let mut block_header = NakamotoBlockHeader::empty(); @@ -1679,6 +1801,231 @@ mod test { listener.local_addr().unwrap().port() } + #[test] + fn test_init_db() { + let dir = tempdir().unwrap(); + let db_path = dir.path().join("test_init_db.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + // Call init_db + let conn_result = EventObserver::init_db(db_path_str); + assert!(conn_result.is_ok(), "Failed to initialize the database"); + + // Check that the database file exists + assert!(db_path.exists(), "Database file was not created"); + + // Check that the table exists + let conn = conn_result.unwrap(); + let mut stmt = conn + .prepare( + "SELECT name FROM sqlite_master WHERE type='table' AND name='pending_payloads'", + ) + .unwrap(); + let table_exists = stmt.exists([]).unwrap(); + assert!(table_exists, "Table 'pending_payloads' does not exist"); + } + + #[test] + fn test_insert_and_get_pending_payloads() { + let dir = tempdir().unwrap(); + let db_path = dir.path().join("test_payloads.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + + let url = "http://example.com/api"; + let payload = json!({"key": "value"}); + let timeout = Duration::from_secs(5); + + // Insert payload + let insert_result = EventObserver::insert_payload(&conn, url, &payload, timeout); + assert!(insert_result.is_ok(), "Failed to insert payload"); + + // Get pending payloads + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 1, "Expected one pending payload"); + + let (_id, retrieved_url, retrieved_payload, timeout_ms) = &pending_payloads[0]; + assert_eq!(retrieved_url, url, "URL does not match"); + assert_eq!(retrieved_payload, &payload, "Payload does not match"); + assert_eq!( + *timeout_ms, + timeout.as_millis() as u64, + "Timeout does not match" + ); + } + + #[test] + fn test_delete_payload() { + let dir = tempdir().unwrap(); + let db_path = dir.path().join("test_delete_payload.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + + let url = "http://example.com/api"; + let payload = json!({"key": "value"}); + let timeout = Duration::from_secs(5); + + // Insert payload + EventObserver::insert_payload(&conn, url, &payload, timeout) + .expect("Failed to insert payload"); + + // Get pending payloads + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 1, "Expected one pending payload"); + + let (id, _, _, _) = pending_payloads[0]; + + // Delete payload + let delete_result = EventObserver::delete_payload(&conn, id); + assert!(delete_result.is_ok(), "Failed to delete payload"); + + // Verify that the pending payloads list is empty + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); + } + + #[test] + fn test_process_pending_payloads() { + use mockito::Matcher; + + let dir = tempdir().unwrap(); + let db_path = dir.path().join("test_process_payloads.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + + let payload = json!({"key": "value"}); + let timeout = Duration::from_secs(5); + + // Create a mock server + let mut server = mockito::Server::new(); + let _m = server + .mock("POST", "/api") + .match_header("content-type", Matcher::Regex("application/json.*".into())) + .match_body(Matcher::Json(payload.clone())) + .with_status(200) + .create(); + + let url = &format!("{}/api", &server.url()); + + // Insert payload + EventObserver::insert_payload(&conn, url, &payload, timeout) + .expect("Failed to insert payload"); + + // Process pending payloads + EventObserver::process_pending_payloads(&conn); + + // Verify that the pending payloads list is empty + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); + + // Verify that the mock was called + _m.assert(); + } + + #[test] + fn test_new_event_observer_with_db() { + let dir = tempdir().unwrap(); + let working_dir = dir.path().to_path_buf(); + + let endpoint = "http://example.com".to_string(); + let timeout = Duration::from_secs(5); + + let observer = EventObserver::new(Some(working_dir.clone()), endpoint.clone(), timeout); + + // Verify fields + assert_eq!(observer.endpoint, endpoint); + assert_eq!(observer.timeout, timeout); + + // Verify that the database was initialized + let mut db_path = working_dir; + db_path.push("event_observers.sqlite"); + assert!(db_path.exists(), "Database file was not created"); + } + + #[test] + fn test_new_event_observer_without_db() { + let endpoint = "http://example.com".to_string(); + let timeout = Duration::from_secs(5); + + let observer = EventObserver::new(None, endpoint.clone(), timeout); + + // Verify fields + assert_eq!(observer.endpoint, endpoint); + assert_eq!(observer.timeout, timeout); + assert!(observer.db_path.is_none(), "Expected db_path to be None"); + } + + #[test] + fn test_send_payload_with_db() { + use mockito::Matcher; + + let dir = tempdir().unwrap(); + let working_dir = dir.path().to_path_buf(); + let payload = json!({"key": "value"}); + + // Create a mock server + let mut server = mockito::Server::new(); + let _m = server + .mock("POST", "/test") + .match_header("content-type", Matcher::Regex("application/json.*".into())) + .match_body(Matcher::Json(payload.clone())) + .with_status(200) + .create(); + + let endpoint = server.url().strip_prefix("http://").unwrap().to_string(); + let timeout = Duration::from_secs(5); + + let observer = EventObserver::new(Some(working_dir.clone()), endpoint, timeout); + + // Call send_payload + observer.send_payload(&payload, "/test"); + + // Verify that the payload was sent and database is empty + _m.assert(); + + // Verify that the database is empty + let db_path = observer.db_path.unwrap(); + let db_path_str = db_path.to_str().unwrap(); + let conn = Connection::open(db_path_str).expect("Failed to open database"); + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); + } + + #[test] + fn test_send_payload_without_db() { + use mockito::Matcher; + + let timeout = Duration::from_secs(5); + let payload = json!({"key": "value"}); + + // Create a mock server + let mut server = mockito::Server::new(); + let _m = server + .mock("POST", "/test") + .match_header("content-type", Matcher::Regex("application/json.*".into())) + .match_body(Matcher::Json(payload.clone())) + .with_status(200) + .create(); + + let endpoint = server.url().strip_prefix("http://").unwrap().to_string(); + + let observer = EventObserver::new(None, endpoint, timeout); + + // Call send_payload + observer.send_payload(&payload, "/test"); + + // Verify that the payload was sent + _m.assert(); + } + #[test] fn test_send_payload_success() { let port = get_random_port(); @@ -1701,10 +2048,8 @@ mod test { tx.send(()).unwrap(); }); - let observer = EventObserver { - endpoint: format!("127.0.0.1:{}", port), - timeout: Duration::from_secs(3), - }; + let observer = + EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -1752,10 +2097,8 @@ mod test { } }); - let observer = EventObserver { - endpoint: format!("127.0.0.1:{}", port), - timeout: Duration::from_secs(3), - }; + let observer = + EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -1799,10 +2142,7 @@ mod test { } }); - let observer = EventObserver { - endpoint: format!("127.0.0.1:{}", port), - timeout, - }; + let observer = EventObserver::new(None, format!("127.0.0.1:{}", port), timeout); let payload = json!({"key": "value"}); diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 39095a51d5..1895912ba5 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -340,7 +340,7 @@ impl Node { let mut event_dispatcher = EventDispatcher::new(); for observer in &config.events_observers { - event_dispatcher.register_observer(observer); + event_dispatcher.register_observer(observer, config.get_working_dir()); } let burnchain_config = config.get_burnchain(); diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 6e3222d99d..04afdd79ee 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -93,7 +93,7 @@ impl RunLoop { let mut event_dispatcher = EventDispatcher::new(); for observer in config.events_observers.iter() { - event_dispatcher.register_observer(observer); + event_dispatcher.register_observer(observer, config.get_working_dir()); } Self { diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 331e7e597c..a18a61988b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -236,7 +236,7 @@ impl RunLoop { let mut event_dispatcher = EventDispatcher::new(); for observer in config.events_observers.iter() { - event_dispatcher.register_observer(observer); + event_dispatcher.register_observer(observer, config.get_working_dir()); } Self { From 1a08c136875e884a1242a2f39caa5ae987cd3e8a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 9 Oct 2024 09:51:37 -0500 Subject: [PATCH 1299/1400] docs: set 3.0 primary testnet activation height --- testnet/stacks-node/conf/testnet-follower-conf.toml | 2 +- testnet/stacks-node/conf/testnet-miner-conf.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index de0973f2c7..54814c610c 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -62,4 +62,4 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 +start_height = 56_457 diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index 9b0d88ad42..39af98b091 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -73,4 +73,4 @@ start_height = 6 [[burnchain.epochs]] epoch_name = "3.0" -start_height = 2000701 +start_height = 56_457 From c3edb2073aca6279b6a120d885b50abe015985a4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 9 Oct 2024 09:53:59 -0500 Subject: [PATCH 1300/1400] chore: bump peer version epoch to 3.0 --- stacks-common/src/libcommon.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 5059f6f049..1a13aa02ed 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -84,7 +84,7 @@ pub mod consts { /// this should be updated to the latest network epoch version supported by /// this node. this will be checked by the `validate_epochs()` method. - pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_5 as u32; + pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32; /// set the fourth byte of the peer version pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; From 1c3302ac1a344c2dd0c23cc194278126ee7a27ff Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 9 Oct 2024 12:33:56 -0400 Subject: [PATCH 1301/1400] refactor: add `test_observer::register` --- testnet/stacks-node/src/tests/epoch_205.rs | 27 +- testnet/stacks-node/src/tests/epoch_21.rs | 47 +--- testnet/stacks-node/src/tests/epoch_22.rs | 7 +- testnet/stacks-node/src/tests/epoch_23.rs | 7 +- testnet/stacks-node/src/tests/epoch_24.rs | 14 +- testnet/stacks-node/src/tests/epoch_25.rs | 7 +- .../src/tests/nakamoto_integrations.rs | 174 ++---------- .../src/tests/neon_integrations.rs | 264 ++++-------------- testnet/stacks-node/src/tests/stackerdb.rs | 12 +- 9 files changed, 105 insertions(+), 454 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 6ee9650987..0ad7000631 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -109,11 +109,10 @@ fn test_exact_block_costs() { .collect(); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], - timeout_ms: 1000, - }); + test_observer::register( + &mut conf, + &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + ); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -336,11 +335,7 @@ fn test_dynamic_db_method_costs() { }; test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -774,11 +769,7 @@ fn test_cost_limit_switch_version205() { }); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1032,11 +1023,7 @@ fn bigger_microblock_streams_in_2_05() { conf.burnchain.pox_2_activation = Some(10_003); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index dd350026fe..4490fa5b07 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -71,12 +71,7 @@ fn advance_to_2_1( conf.burnchain.peer_host = "localhost".to_string(); conf.initial_balances.append(&mut initial_balances); conf.miner.block_reward_recipient = block_reward_recipient; - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].end_height = epoch_2_05; @@ -577,11 +572,7 @@ fn transition_fixes_bitcoin_rigidity() { ]; conf.initial_balances.append(&mut initial_balances); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].end_height = epoch_2_05; @@ -1474,12 +1465,7 @@ fn transition_removes_pox_sunset() { let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), @@ -1790,12 +1776,7 @@ fn transition_empty_blocks() { conf.burnchain.epochs = Some(epochs); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let keychain = Keychain::default(conf.node.seed.clone()); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -4741,11 +4722,7 @@ fn trait_invocation_cross_epoch() { amount: 200_000_000, }]; conf.initial_balances.append(&mut initial_balances); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].end_height = epoch_2_05; epochs[2].start_height = epoch_2_05; @@ -4987,12 +4964,7 @@ fn test_v1_unlock_height_with_current_stackers() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); @@ -5253,12 +5225,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 8b5df5ddaf..fecf5c4652 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -134,12 +134,7 @@ fn disable_pox() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index e3fa85dfc0..7d0a5216a0 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -100,12 +100,7 @@ fn trait_invocation_behavior() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index cdd0b01560..9c57a732d0 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -152,12 +152,7 @@ fn fix_to_pox_contract() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); @@ -792,12 +787,7 @@ fn verify_auto_unlock_behavior() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index dfddcb8464..345aec4557 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -83,12 +83,7 @@ fn microblocks_disabled() { conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ae1257f933..b3d2959f9c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1432,12 +1432,7 @@ fn simple_neon_integration() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -1680,12 +1675,7 @@ fn flash_blocks_on_epoch_3() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -1948,12 +1938,7 @@ fn mine_multiple_per_tenure_integration() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -2163,12 +2148,7 @@ fn multiple_miners() { ); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -2397,12 +2377,7 @@ fn correct_burn_outs() { let signers = TestSigners::new(vec![sender_signer_sk]); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -2721,12 +2696,7 @@ fn block_proposal_api_endpoint() { // only subscribe to the block proposal events test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::BlockProposal], - timeout_ms: 1000, - }); + test_observer::register(&mut conf, &[EventKeyType::BlockProposal]); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3094,12 +3064,10 @@ fn miner_writes_proposed_block_to_stackerdb() { let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], - timeout_ms: 1000, - }); + test_observer::register( + &mut naka_conf, + &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + ); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -3210,12 +3178,7 @@ fn vote_for_aggregate_key_burn_op() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -3459,12 +3422,7 @@ fn follower_bootup() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -3783,12 +3741,7 @@ fn follower_bootup_across_multiple_cycles() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -4010,12 +3963,7 @@ fn burn_ops_integration_test() { ); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -4614,12 +4562,10 @@ fn forked_tenure_is_ignored() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent, EventKeyType::MinedBlocks], - timeout_ms: 1000, - }); + test_observer::register( + &mut naka_conf, + &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], + ); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -5396,12 +5342,7 @@ fn nakamoto_attempt_time() { // only subscribe to the block proposal events test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::BlockProposal], - timeout_ms: 1000, - }); + test_observer::register(&mut naka_conf, &[EventKeyType::BlockProposal]); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -5694,12 +5635,7 @@ fn clarity_burn_state() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::MinedBlocks], - timeout_ms: 1000, - }); + test_observer::register(&mut naka_conf, &[EventKeyType::MinedBlocks]); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -5967,12 +5903,7 @@ fn signer_chainstate() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -6576,12 +6507,7 @@ fn continue_tenure_extend() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -6885,12 +6811,7 @@ fn check_block_times() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -7355,12 +7276,7 @@ fn check_block_info() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -7876,12 +7792,7 @@ fn check_block_info_rewards() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -8223,12 +8134,7 @@ fn mock_mining() { let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller @@ -8491,12 +8397,7 @@ fn utxo_check_on_startup_panic() { naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); let (last, rest) = epochs.split_last_mut().unwrap(); @@ -8572,12 +8473,7 @@ fn utxo_check_on_startup_recover() { naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut epochs = NAKAMOTO_INTEGRATION_EPOCHS.to_vec(); let (last, rest) = epochs.split_last_mut().unwrap(); @@ -8654,12 +8550,7 @@ fn v3_signer_api_endpoint() { // only subscribe to the block proposal events test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::BlockProposal], - timeout_ms: 1000, - }); + test_observer::register(&mut conf, &[EventKeyType::BlockProposal]); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -8839,12 +8730,7 @@ fn skip_mining_long_tx() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut naka_conf); let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 1053852cb9..38b34d124d 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -206,7 +206,9 @@ pub mod test_observer { use warp::Filter; use {tokio, warp}; + use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent}; + use crate::Config; pub const EVENT_OBSERVER_PORT: u16 = 50303; @@ -631,6 +633,18 @@ pub mod test_observer { Err(format!("Missing the following burn blocks: {missing:?}")) } } + + pub fn register(config: &mut Config, event_keys: &[EventKeyType]) { + config.events_observers.insert(EventObserverConfig { + endpoint: format!("localhost:{EVENT_OBSERVER_PORT}"), + events_keys: event_keys.to_vec(), + timeout_ms: 1000, + }); + } + + pub fn register_any(config: &mut Config) { + self::register(config, &[EventKeyType::AnyEvent]); + } } const PANIC_TIMEOUT_SECS: u64 = 600; @@ -1024,12 +1038,7 @@ fn bitcoind_integration_test() { conf.burnchain.max_rbf = 1000000; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1138,12 +1147,7 @@ fn confirm_unparsed_ongoing_ops() { conf.burnchain.max_rbf = 1000000; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1470,12 +1474,7 @@ fn deep_contract() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1575,12 +1574,7 @@ fn bad_microblock_pubkey() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1661,12 +1655,7 @@ fn liquid_ustx_integration() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1790,12 +1779,7 @@ fn lockup_integration() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -1908,11 +1892,7 @@ fn stx_transfer_btc_integration_test() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), @@ -2178,11 +2158,7 @@ fn stx_delegate_btc_integration_test() { conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -2467,11 +2443,7 @@ fn stack_stx_burn_op_test() { conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -2873,11 +2845,7 @@ fn vote_for_aggregate_key_burn_op_test() { conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3475,12 +3443,7 @@ fn microblock_fork_poison_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3717,12 +3680,7 @@ fn microblock_integration_test() { conf.node.wait_time_for_microblocks = 0; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4704,11 +4662,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4901,11 +4855,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5096,11 +5046,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5363,11 +5309,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { conf.burnchain.epochs = Some(epochs); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5535,12 +5477,7 @@ fn block_replay_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5668,12 +5605,7 @@ fn cost_voting_integration() { conf.node.wait_time_for_blocks = 1_000; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -5987,16 +5919,14 @@ fn mining_events_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![ + test_observer::register( + &mut conf, + &[ EventKeyType::AnyEvent, EventKeyType::MinedBlocks, EventKeyType::MinedMicroblocks, ], - timeout_ms: 1000, - }); + ); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -6259,12 +6189,7 @@ fn block_limit_hit_integration_test() { conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -6515,12 +6440,7 @@ fn microblock_limit_hit_integration_test() { conf.burnchain.pox_2_activation = Some(10_003); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -6666,12 +6586,7 @@ fn block_large_tx_integration_test() { let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { address: spender_addr.clone().into(), @@ -6805,12 +6720,7 @@ fn microblock_large_tx_integration_test_FLAKY() { let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { address: addr.clone().into(), @@ -6941,16 +6851,11 @@ fn pox_integration_test() { let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); + test_observer::register_any(&mut conf); // required for testing post-sunset behavior conf.node.always_use_affirmation_maps = false; - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); - let first_bal = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let second_bal = 2_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let third_bal = 2_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -9035,11 +8940,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value amount: 10000000000, }); test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -9215,12 +9116,7 @@ fn use_latest_tip_integration_test() { conf.node.microblock_frequency = 1_000; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -9620,12 +9516,7 @@ fn test_problematic_txs_are_not_stored() { conf.burnchain.ast_precheck_size_height = Some(0); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -9774,11 +9665,7 @@ fn spawn_follower_node( conf.burnchain.peer_version, ); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.initial_balances = initial_conf.initial_balances.clone(); conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); @@ -9873,12 +9760,7 @@ fn test_problematic_blocks_are_not_mined() { conf.burnchain.ast_precheck_size_height = Some(210); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -10231,12 +10113,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { conf.burnchain.ast_precheck_size_height = Some(210); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -10631,12 +10508,7 @@ fn test_problematic_microblocks_are_not_mined() { conf.node.wait_time_for_microblocks = 0; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -11016,12 +10888,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { conf.connection_options.inv_sync_interval = 3; test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -11362,11 +11229,7 @@ fn push_boot_receipts() { } let (mut conf, _) = neon_integration_test_conf(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -11411,11 +11274,7 @@ fn run_with_custom_wallet() { } let (mut conf, _) = neon_integration_test_conf(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); // custom wallet conf.burnchain.wallet_name = "test_with_custom_wallet".to_string(); @@ -12011,12 +11870,7 @@ fn min_txs() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; @@ -12118,12 +11972,7 @@ fn filter_txs_by_type() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; @@ -12235,12 +12084,7 @@ fn filter_txs_by_origin() { let (mut conf, _miner_account) = neon_integration_test_conf(); test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index a4dca66ea8..fbc47e0c3c 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -113,11 +113,7 @@ fn test_stackerdb_load_store() { } let (mut conf, _) = neon_integration_test_conf(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - }); + test_observer::register_any(&mut conf); let privks = vec![ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R @@ -247,11 +243,7 @@ fn test_stackerdb_event_observer() { } let (mut conf, _) = neon_integration_test_conf(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::StackerDBChunks], - timeout_ms: 1000, - }); + test_observer::register(&mut conf, &[EventKeyType::StackerDBChunks]); let privks = vec![ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R From b8432739a3368be4f9d517f9697bc4c5ffb76716 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 9 Oct 2024 10:02:46 -0700 Subject: [PATCH 1302/1400] fix: use O(n) instead of mn when checking pox bitvec --- stackslib/src/chainstate/nakamoto/mod.rs | 28 ++++++++++++++---------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index a8708b0a8d..e000c9c582 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3810,6 +3810,15 @@ impl NakamotoChainState { active_reward_set: &RewardSet, ) -> Result<(), ChainstateError> { if !tenure_block_commit.treatment.is_empty() { + let address_to_indeces: HashMap<_, Vec<_>> = active_reward_set + .rewarded_addresses + .iter() + .enumerate() + .fold(HashMap::new(), |mut map, (ix, addr)| { + map.entry(addr).or_insert_with(Vec::new).push(ix); + map + }); + // our block commit issued a punishment, check the reward set and bitvector // to ensure that this was valid. for treated_addr in tenure_block_commit.treatment.iter() { @@ -3820,24 +3829,19 @@ impl NakamotoChainState { } // otherwise, we need to find the indices in the rewarded_addresses // corresponding to this address. - let address_indices = active_reward_set - .rewarded_addresses - .iter() - .enumerate() - .filter_map(|(ix, addr)| { - if addr == treated_addr.deref() { - Some(ix) - } else { - None - } - }); + let empty_vec = vec![]; + let address_indices = address_to_indeces + .get(treated_addr.deref()) + .unwrap_or(&empty_vec); + // if any of them are 0, punishment is okay. // if all of them are 1, punishment is not okay. // if all of them are 0, *must* have punished let bitvec_values: Result, ChainstateError> = address_indices + .iter() .map( |ix| { - let ix = u16::try_from(ix) + let ix = u16::try_from(*ix) .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; let bitvec_value = block_bitvec.get(ix) .unwrap_or_else(|| { From 8dfd1cdcbac1aa1b13d19f87381584b68bba8a2b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 9 Oct 2024 13:21:33 -0700 Subject: [PATCH 1303/1400] Do not error if block submit fails in mock mining case in Nakamoto Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index dd6fd02a71..ef01f67f4b 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1066,20 +1066,23 @@ impl RelayerThread { // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); - let txid = self - .bitcoin_controller - .submit_operation( - last_committed.get_epoch_id().clone(), - BlockstackOperationType::LeaderBlockCommit( - last_committed.get_block_commit().clone(), - ), - &mut op_signer, - 1, - ) - .map_err(|e| { + let res = self.bitcoin_controller.submit_operation( + last_committed.get_epoch_id().clone(), + BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()), + &mut op_signer, + 1, + ); + let txid = match res { + Ok(txid) => txid, + Err(e) => { + if self.config.node.mock_mining { + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + return Ok(()); + } warn!("Failed to submit block-commit bitcoin transaction: {e}"); - NakamotoNodeError::BurnchainSubmissionFailed(e) - })?; + return Err(NakamotoNodeError::BurnchainSubmissionFailed(e)); + } + }; info!( "Relayer: Submitted block-commit"; From e7a8fff17ca1799aae44f11e3ae6f4647bd0a623 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 10 Oct 2024 09:40:36 -0700 Subject: [PATCH 1304/1400] Continually get stacker_set in FIRST block of prepare phase and set pox_sync_sample_secs to positive int Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 38 ++++++++++++------- .../src/tests/neon_integrations.rs | 11 ------ 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5868111047..64f2960c68 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -255,7 +255,7 @@ pub fn check_nakamoto_empty_block_heuristics() { } } -pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { +pub fn get_stacker_set(http_origin: &str, cycle: u64) -> Result { let client = reqwest::blocking::Client::new(); let path = format!("{http_origin}/v3/stacker_set/{cycle}"); let res = client @@ -263,10 +263,9 @@ pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { .send() .unwrap() .json::() - .unwrap(); + .map_err(|e| format!("{e}"))?; info!("Stacker set response: {res}"); - let res = serde_json::from_value(res).unwrap(); - res + serde_json::from_value(res).map_err(|e| format!("{e}")) } pub fn get_stackerdb_slot_version( @@ -886,19 +885,21 @@ pub fn boot_to_epoch_3( signers.signer_keys = signer_sks.to_vec(); } - let prepare_phase_start = btc_regtest_controller + // the reward set is generally calculated in the first block of the prepare phase hence the + 1 + let reward_set_calculation = btc_regtest_controller .get_burnchain() .pox_constants .prepare_phase_start( btc_regtest_controller.get_burnchain().first_block_height, reward_cycle, - ); + ) + + 1; // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, &blocks_processed, - prepare_phase_start, + reward_set_calculation, &naka_conf, ); @@ -909,7 +910,11 @@ pub fn boot_to_epoch_3( let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); - let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); + wait_for(30, || { + Ok(get_stacker_set(&http_origin, reward_cycle + 1).is_ok()) + }) + .expect("Timed out waiting for stacker set"); + let signer_set = get_stacker_set(&http_origin, reward_cycle + 1).unwrap(); // Vote on the aggregate public key for signer_sk in signer_sks_unique.values() { let signer_index = @@ -1040,19 +1045,21 @@ pub fn boot_to_pre_epoch_3_boundary( signers.signer_keys = signer_sks.to_vec(); } - let prepare_phase_start = btc_regtest_controller + // the reward set is generally calculated in the first block of the prepare phase hence the + 1 + let reward_set_calculation = btc_regtest_controller .get_burnchain() .pox_constants .prepare_phase_start( btc_regtest_controller.get_burnchain().first_block_height, reward_cycle, - ); + ) + + 1; // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, &blocks_processed, - prepare_phase_start, + reward_set_calculation, &naka_conf, ); @@ -1063,7 +1070,11 @@ pub fn boot_to_pre_epoch_3_boundary( let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); - let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); + wait_for(30, || { + Ok(get_stacker_set(&http_origin, reward_cycle + 1).is_ok()) + }) + .expect("Timed out waiting for stacker set"); + let signer_set = get_stacker_set(&http_origin, reward_cycle + 1).unwrap(); // Vote on the aggregate public key for signer_sk in signer_sks_unique.values() { let signer_index = @@ -2566,7 +2577,7 @@ fn correct_burn_outs() { info!("first_epoch_3_cycle: {:?}", first_epoch_3_cycle); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - let stacker_response = get_stacker_set(&http_origin, first_epoch_3_cycle); + let stacker_response = get_stacker_set(&http_origin, first_epoch_3_cycle).unwrap(); assert!(stacker_response.stacker_set.signers.is_some()); assert_eq!( stacker_response.stacker_set.signers.as_ref().unwrap().len(), @@ -8168,6 +8179,7 @@ fn mock_mining() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 5; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0294876931..143ad89b8f 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -53,7 +53,6 @@ use stacks::net::api::getaccount::AccountEntryResponse; use stacks::net::api::getcontractsrc::ContractSrcResponse; use stacks::net::api::getinfo::RPCPeerInfoData; use stacks::net::api::getpoxinfo::RPCPoxInfoData; -use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::gettransaction_unconfirmed::UnconfirmedTransactionResponse; use stacks::net::api::postblock::StacksBlockAcceptedData; use stacks::net::api::postfeerate::RPCFeeEstimateResponse; @@ -1437,16 +1436,6 @@ pub fn get_contract_src( } } -pub fn get_stacker_set(http_origin: &str, reward_cycle: u64) -> GetStackersResponse { - let client = reqwest::blocking::Client::new(); - let path = format!("{}/v3/stacker_set/{}", http_origin, reward_cycle); - let res = client.get(&path).send().unwrap(); - - info!("Got stacker_set response {:?}", &res); - let res = res.json::().unwrap(); - res -} - #[test] #[ignore] fn deep_contract() { From 08abd6a032738ce6db5c13c181ea7c32776f395d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 14:03:03 -0400 Subject: [PATCH 1305/1400] feat: make `chain_id` configurable --- testnet/stacks-node/src/config.rs | 559 +++++++++++++++++------------- 1 file changed, 320 insertions(+), 239 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 3852bf4224..35fb1c0231 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -106,240 +106,6 @@ pub struct LegacyMstxConfigFile { pub mstx_balance: Option>, } -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_config_file() { - assert_eq!( - format!("Invalid path: No such file or directory (os error 2)"), - ConfigFile::from_path("some_path").unwrap_err() - ); - assert_eq!( - format!("Invalid toml: unexpected character found: `/` at line 1 column 1"), - ConfigFile::from_str("//[node]").unwrap_err() - ); - assert!(ConfigFile::from_str("").is_ok()); - } - - #[test] - fn test_config() { - assert_eq!( - format!("node.seed should be a hex encoded string"), - Config::from_config_file( - ConfigFile::from_str( - r#" - [node] - seed = "invalid-hex-value" - "#, - ) - .unwrap(), - false - ) - .unwrap_err() - ); - - assert_eq!( - format!("node.local_peer_seed should be a hex encoded string"), - Config::from_config_file( - ConfigFile::from_str( - r#" - [node] - local_peer_seed = "invalid-hex-value" - "#, - ) - .unwrap(), - false - ) - .unwrap_err() - ); - - let expected_err_prefix = - "Invalid burnchain.peer_host: failed to lookup address information:"; - let actual_err_msg = Config::from_config_file( - ConfigFile::from_str( - r#" - [burnchain] - peer_host = "bitcoin2.blockstack.com" - "#, - ) - .unwrap(), - false, - ) - .unwrap_err(); - assert_eq!( - expected_err_prefix, - &actual_err_msg[..expected_err_prefix.len()] - ); - - assert!(Config::from_config_file(ConfigFile::from_str("").unwrap(), false).is_ok()); - } - - #[test] - fn should_load_legacy_mstx_balances_toml() { - let config = ConfigFile::from_str( - r#" - [[ustx_balance]] - address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" - amount = 10000000000000000 - - [[ustx_balance]] - address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" - amount = 10000000000000000 - - [[mstx_balance]] # legacy property name - address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" - amount = 10000000000000000 - - [[mstx_balance]] # legacy property name - address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" - amount = 10000000000000000 - "#, - ); - let config = config.unwrap(); - assert!(config.ustx_balance.is_some()); - let balances = config - .ustx_balance - .expect("Failed to parse stx balances from toml"); - assert_eq!(balances.len(), 4); - assert_eq!( - balances[0].address, - "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" - ); - assert_eq!( - balances[1].address, - "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" - ); - assert_eq!( - balances[2].address, - "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" - ); - assert_eq!( - balances[3].address, - "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" - ); - } - - #[test] - fn should_load_auth_token() { - let config = Config::from_config_file( - ConfigFile::from_str( - r#" - [connection_options] - auth_token = "password" - "#, - ) - .unwrap(), - false, - ) - .expect("Expected to be able to parse block proposal token from file"); - - assert_eq!( - config.connection_options.auth_token, - Some("password".to_string()) - ); - } - - #[test] - fn should_load_affirmation_map() { - let affirmation_string = "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; - let affirmation = - AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); - let config = Config::from_config_file( - ConfigFile::from_str(&format!( - r#" - [[burnchain.affirmation_overrides]] - reward_cycle = 413 - affirmation = "{affirmation_string}" - "# - )) - .expect("Expected to be able to parse config file from string"), - false, - ) - .expect("Expected to be able to parse affirmation map from file"); - - assert_eq!(config.burnchain.affirmation_overrides.len(), 1); - assert_eq!(config.burnchain.affirmation_overrides.get(&0), None); - assert_eq!( - config.burnchain.affirmation_overrides.get(&413), - Some(&affirmation) - ); - } - - #[test] - fn should_fail_to_load_invalid_affirmation_map() { - let bad_affirmation_string = "bad_map"; - let file = ConfigFile::from_str(&format!( - r#" - [[burnchain.affirmation_overrides]] - reward_cycle = 1 - affirmation = "{bad_affirmation_string}" - "# - )) - .expect("Expected to be able to parse config file from string"); - - assert!(Config::from_config_file(file, false).is_err()); - } - - #[test] - fn should_load_empty_affirmation_map() { - let config = Config::from_config_file( - ConfigFile::from_str(r#""#) - .expect("Expected to be able to parse config file from string"), - false, - ) - .expect("Expected to be able to parse affirmation map from file"); - - assert!(config.burnchain.affirmation_overrides.is_empty()); - } - - #[test] - fn should_include_xenon_default_affirmation_overrides() { - let config = Config::from_config_file( - ConfigFile::from_str( - r#" - [burnchain] - chain = "bitcoin" - mode = "xenon" - "#, - ) - .expect("Expected to be able to parse config file from string"), - false, - ) - .expect("Expected to be able to parse affirmation map from file"); - // Should default add xenon affirmation overrides - assert_eq!(config.burnchain.affirmation_overrides.len(), 5); - } - - #[test] - fn should_override_xenon_default_affirmation_overrides() { - let affirmation_string = "aaapnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; - let affirmation = - AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); - - let config = Config::from_config_file( - ConfigFile::from_str(&format!( - r#" - [burnchain] - chain = "bitcoin" - mode = "xenon" - - [[burnchain.affirmation_overrides]] - reward_cycle = 413 - affirmation = "{affirmation_string}" - "#, - )) - .expect("Expected to be able to parse config file from string"), - false, - ) - .expect("Expected to be able to parse affirmation map from file"); - // Should default add xenon affirmation overrides, but overwrite with the configured one above - assert_eq!(config.burnchain.affirmation_overrides.len(), 5); - assert_eq!(config.burnchain.affirmation_overrides[&413], affirmation); - } -} - impl ConfigFile { pub fn from_path(path: &str) -> Result { let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {}", &e))?; @@ -1554,8 +1320,9 @@ pub struct AffirmationOverride { #[derive(Clone, Deserialize, Default, Debug)] pub struct BurnchainConfigFile { pub chain: Option, - pub burn_fee_cap: Option, pub mode: Option, + pub chain_id: Option, + pub burn_fee_cap: Option, pub commit_anchor_block_within: Option, pub peer_host: Option, pub peer_port: Option, @@ -1702,10 +1469,22 @@ impl BurnchainConfigFile { let mut config = BurnchainConfig { chain: self.chain.unwrap_or(default_burnchain_config.chain), - chain_id: if is_mainnet { - CHAIN_ID_MAINNET - } else { - CHAIN_ID_TESTNET + chain_id: match self.chain_id { + Some(chain_id) => { + if is_mainnet && chain_id != CHAIN_ID_MAINNET { + return Err(format!( + "Attempted to run mainnet node with chain_id {chain_id}", + )); + } + chain_id + } + None => { + if is_mainnet { + CHAIN_ID_MAINNET + } else { + CHAIN_ID_TESTNET + } + } }, peer_version: if is_mainnet { PEER_VERSION_MAINNET @@ -1841,6 +1620,7 @@ impl BurnchainConfigFile { Ok(config) } } + #[derive(Clone, Debug)] pub struct NodeConfig { pub name: String, @@ -3022,3 +2802,304 @@ pub struct InitialBalanceFile { pub address: String, pub amount: u64, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_config_file() { + assert_eq!( + format!("Invalid path: No such file or directory (os error 2)"), + ConfigFile::from_path("some_path").unwrap_err() + ); + assert_eq!( + format!("Invalid toml: unexpected character found: `/` at line 1 column 1"), + ConfigFile::from_str("//[node]").unwrap_err() + ); + assert!(ConfigFile::from_str("").is_ok()); + } + + #[test] + fn test_config() { + assert_eq!( + format!("node.seed should be a hex encoded string"), + Config::from_config_file( + ConfigFile::from_str( + r#" + [node] + seed = "invalid-hex-value" + "#, + ) + .unwrap(), + false + ) + .unwrap_err() + ); + + assert_eq!( + format!("node.local_peer_seed should be a hex encoded string"), + Config::from_config_file( + ConfigFile::from_str( + r#" + [node] + local_peer_seed = "invalid-hex-value" + "#, + ) + .unwrap(), + false + ) + .unwrap_err() + ); + + let expected_err_prefix = + "Invalid burnchain.peer_host: failed to lookup address information:"; + let actual_err_msg = Config::from_config_file( + ConfigFile::from_str( + r#" + [burnchain] + peer_host = "bitcoin2.blockstack.com" + "#, + ) + .unwrap(), + false, + ) + .unwrap_err(); + assert_eq!( + expected_err_prefix, + &actual_err_msg[..expected_err_prefix.len()] + ); + + assert!(Config::from_config_file(ConfigFile::from_str("").unwrap(), false).is_ok()); + } + + #[test] + fn should_load_legacy_mstx_balances_toml() { + let config = ConfigFile::from_str( + r#" + [[ustx_balance]] + address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" + amount = 10000000000000000 + + [[ustx_balance]] + address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" + amount = 10000000000000000 + + [[mstx_balance]] # legacy property name + address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" + amount = 10000000000000000 + + [[mstx_balance]] # legacy property name + address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" + amount = 10000000000000000 + "#, + ); + let config = config.unwrap(); + assert!(config.ustx_balance.is_some()); + let balances = config + .ustx_balance + .expect("Failed to parse stx balances from toml"); + assert_eq!(balances.len(), 4); + assert_eq!( + balances[0].address, + "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" + ); + assert_eq!( + balances[1].address, + "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" + ); + assert_eq!( + balances[2].address, + "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" + ); + assert_eq!( + balances[3].address, + "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" + ); + } + + #[test] + fn should_load_auth_token() { + let config = Config::from_config_file( + ConfigFile::from_str( + r#" + [connection_options] + auth_token = "password" + "#, + ) + .unwrap(), + false, + ) + .expect("Expected to be able to parse block proposal token from file"); + + assert_eq!( + config.connection_options.auth_token, + Some("password".to_string()) + ); + } + + #[test] + fn should_load_affirmation_map() { + let affirmation_string = "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; + let affirmation = + AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); + let config = Config::from_config_file( + ConfigFile::from_str(&format!( + r#" + [[burnchain.affirmation_overrides]] + reward_cycle = 413 + affirmation = "{affirmation_string}" + "# + )) + .expect("Expected to be able to parse config file from string"), + false, + ) + .expect("Expected to be able to parse affirmation map from file"); + + assert_eq!(config.burnchain.affirmation_overrides.len(), 1); + assert_eq!(config.burnchain.affirmation_overrides.get(&0), None); + assert_eq!( + config.burnchain.affirmation_overrides.get(&413), + Some(&affirmation) + ); + } + + #[test] + fn should_fail_to_load_invalid_affirmation_map() { + let bad_affirmation_string = "bad_map"; + let file = ConfigFile::from_str(&format!( + r#" + [[burnchain.affirmation_overrides]] + reward_cycle = 1 + affirmation = "{bad_affirmation_string}" + "# + )) + .expect("Expected to be able to parse config file from string"); + + assert!(Config::from_config_file(file, false).is_err()); + } + + #[test] + fn should_load_empty_affirmation_map() { + let config = Config::from_config_file( + ConfigFile::from_str(r#""#) + .expect("Expected to be able to parse config file from string"), + false, + ) + .expect("Expected to be able to parse affirmation map from file"); + + assert!(config.burnchain.affirmation_overrides.is_empty()); + } + + #[test] + fn should_include_xenon_default_affirmation_overrides() { + let config = Config::from_config_file( + ConfigFile::from_str( + r#" + [burnchain] + chain = "bitcoin" + mode = "xenon" + "#, + ) + .expect("Expected to be able to parse config file from string"), + false, + ) + .expect("Expected to be able to parse affirmation map from file"); + // Should default add xenon affirmation overrides + assert_eq!(config.burnchain.affirmation_overrides.len(), 5); + } + + #[test] + fn should_override_xenon_default_affirmation_overrides() { + let affirmation_string = "aaapnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; + let affirmation = + AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); + + let config = Config::from_config_file( + ConfigFile::from_str(&format!( + r#" + [burnchain] + chain = "bitcoin" + mode = "xenon" + + [[burnchain.affirmation_overrides]] + reward_cycle = 413 + affirmation = "{affirmation_string}" + "#, + )) + .expect("Expected to be able to parse config file from string"), + false, + ) + .expect("Expected to be able to parse affirmation map from file"); + // Should default add xenon affirmation overrides, but overwrite with the configured one above + assert_eq!(config.burnchain.affirmation_overrides.len(), 5); + assert_eq!(config.burnchain.affirmation_overrides[&413], affirmation); + } + + #[test] + fn test_into_config_default_chain_id() { + // Helper function to create BurnchainConfigFile with mode and optional chain_id + fn make_burnchain_config_file(mainnet: bool, chain_id: Option) -> BurnchainConfigFile { + let mut config = BurnchainConfigFile::default(); + if mainnet { + config.mode = Some("mainnet".to_string()); + } + config.chain_id = chain_id; + config + } + let default_burnchain_config = BurnchainConfig::default(); + + // **Case 1a:** Should panic when `is_mainnet` is true and `chain_id` != `CHAIN_ID_MAINNET` + { + let config_file = make_burnchain_config_file(true, Some(CHAIN_ID_TESTNET)); + + let result = config_file.into_config_default(default_burnchain_config.clone()); + + assert!( + result.is_err(), + "Expected error when chain_id != CHAIN_ID_MAINNET on mainnet" + ); + } + + // **Case 1b:** Should not panic when `is_mainnet` is true and `chain_id` == `CHAIN_ID_MAINNET` + { + let config_file = make_burnchain_config_file(true, Some(CHAIN_ID_MAINNET)); + + let config = config_file + .into_config_default(default_burnchain_config.clone()) + .expect("Should not panic"); + assert_eq!(config.chain_id, CHAIN_ID_MAINNET); + } + + // **Case 1c:** Should not panic when `is_mainnet` is false; chain_id should be as provided + { + let chain_id = 123456; + let config_file = make_burnchain_config_file(false, Some(chain_id)); + + let config = config_file + .into_config_default(default_burnchain_config.clone()) + .expect("Should not panic"); + assert_eq!(config.chain_id, chain_id); + } + + // **Case 2a:** Should not panic when `chain_id` is None and `is_mainnet` is true + { + let config_file = make_burnchain_config_file(true, None); + + let config = config_file + .into_config_default(default_burnchain_config.clone()) + .expect("Should not panic"); + assert_eq!(config.chain_id, CHAIN_ID_MAINNET); + } + + // **Case 2b:** Should not panic when `chain_id` is None and `is_mainnet` is false + { + let config_file = make_burnchain_config_file(false, None); + + let config = config_file + .into_config_default(default_burnchain_config.clone()) + .expect("Should not panic"); + assert_eq!(config.chain_id, CHAIN_ID_TESTNET); + } + } +} From 8f9efb9e80871d93328afd3ffc3f6fad7d5078ea Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 14:31:57 -0400 Subject: [PATCH 1306/1400] feat: check for unknown fields in config file Error if an unknown field is found in a node's config file. This is helpful to catch errors in the config file. --- testnet/stacks-node/src/config.rs | 127 ++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 35fb1c0231..e203dadb16 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -89,6 +89,7 @@ const INV_REWARD_CYCLES_TESTNET: u64 = 6; const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct ConfigFile { pub __path: Option, // Only used for config file reloads pub burnchain: Option, @@ -1318,6 +1319,7 @@ pub struct AffirmationOverride { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct BurnchainConfigFile { pub chain: Option, pub mode: Option, @@ -2200,6 +2202,7 @@ impl Default for MinerConfig { } #[derive(Clone, Default, Deserialize, Debug)] +#[serde(deny_unknown_fields)] pub struct ConnectionOptionsFile { pub inbox_maxlen: Option, pub outbox_maxlen: Option, @@ -2383,6 +2386,7 @@ impl ConnectionOptionsFile { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct NodeConfigFile { pub name: Option, pub seed: Option, @@ -2517,6 +2521,7 @@ impl NodeConfigFile { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct FeeEstimationConfigFile { pub cost_estimator: Option, pub fee_estimator: Option, @@ -2528,6 +2533,7 @@ pub struct FeeEstimationConfigFile { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct MinerConfigFile { pub first_attempt_time_ms: Option, pub subsequent_attempt_time_ms: Option, @@ -2670,6 +2676,7 @@ impl MinerConfigFile { } } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct AtlasConfigFile { pub attachments_max_size: Option, pub max_uninstantiated_attachments: Option, @@ -2698,6 +2705,7 @@ impl AtlasConfigFile { } #[derive(Clone, Deserialize, Default, Debug, Hash, PartialEq, Eq, PartialOrd)] +#[serde(deny_unknown_fields)] pub struct EventObserverConfigFile { pub endpoint: String, pub events_keys: Vec, @@ -2798,6 +2806,7 @@ pub struct InitialBalance { } #[derive(Clone, Deserialize, Default, Debug)] +#[serde(deny_unknown_fields)] pub struct InitialBalanceFile { pub address: String, pub amount: u64, @@ -2873,6 +2882,124 @@ mod tests { assert!(Config::from_config_file(ConfigFile::from_str("").unwrap(), false).is_ok()); } + #[test] + fn test_deny_unknown_fields() { + { + let err = ConfigFile::from_str( + r#" + [node] + name = "test" + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [burnchain] + chain_id = 0x00000500 + unknown_field = "test" + chain = "bitcoin" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [node] + rpc_bind = "0.0.0.0:20443" + unknown_field = "test" + p2p_bind = "0.0.0.0:20444" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [[ustx_balance]] + address = "ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0" + amount = 10000000000000000 + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [[events_observer]] + endpoint = "localhost:30000" + unknown_field = "test" + events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [connection_options] + inbox_maxlen = 100 + outbox_maxlen = 200 + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [fee_estimation] + cost_estimator = "foo" + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [miner] + first_attempt_time_ms = 180_000 + unknown_field = "test" + subsequent_attempt_time_ms = 360_000 + "#, + ) + .unwrap_err(); + println!("{}", err); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + + { + let err = ConfigFile::from_str( + r#" + [atlas] + attachments_max_size = 100 + unknown_field = "test" + "#, + ) + .unwrap_err(); + assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); + } + } + #[test] fn should_load_legacy_mstx_balances_toml() { let config = ConfigFile::from_str( From 9b86c5080e29d69951302ced5b5b06db842f7226 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 14:55:33 -0400 Subject: [PATCH 1307/1400] test: parse all example configs in unit tests --- testnet/stacks-node/src/config.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e203dadb16..e1742f3465 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2814,6 +2814,8 @@ pub struct InitialBalanceFile { #[cfg(test)] mod tests { + use std::path::Path; + use super::*; #[test] @@ -3000,6 +3002,26 @@ mod tests { } } + #[test] + fn test_example_confs() { + // For each config file in the ../conf/ directory, we should be able to parse it + let conf_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("conf"); + println!("Reading config files from: {:?}", conf_dir); + let conf_files = fs::read_dir(conf_dir).unwrap(); + + for entry in conf_files { + let entry = entry.unwrap(); + let path = entry.path(); + if path.is_file() { + let file_name = path.file_name().unwrap().to_str().unwrap(); + if file_name.ends_with(".toml") { + let _config = ConfigFile::from_path(path.to_str().unwrap()).unwrap(); + debug!("Parsed config file: {}", file_name); + } + } + } + } + #[test] fn should_load_legacy_mstx_balances_toml() { let config = ConfigFile::from_str( From 788a14925d2079bbdf6e524d0320826e2f2d6600 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 15:16:40 -0400 Subject: [PATCH 1308/1400] chore: remove `retry_count` in commented lines --- testnet/stacks-node/conf/local-follower-conf.toml | 1 - testnet/stacks-node/conf/mainnet-follower-conf.toml | 1 - testnet/stacks-node/conf/mocknet-follower-conf.toml | 1 - testnet/stacks-node/conf/regtest-follower-conf.toml | 1 - testnet/stacks-node/conf/testnet-follower-conf.toml | 1 - 5 files changed, 5 deletions(-) diff --git a/testnet/stacks-node/conf/local-follower-conf.toml b/testnet/stacks-node/conf/local-follower-conf.toml index c828c18373..8186b57f54 100644 --- a/testnet/stacks-node/conf/local-follower-conf.toml +++ b/testnet/stacks-node/conf/local-follower-conf.toml @@ -15,7 +15,6 @@ peer_port = 18444 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] [[ustx_balance]] diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 2ecbc80686..6f6bab70d8 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -16,5 +16,4 @@ peer_port = 8333 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] diff --git a/testnet/stacks-node/conf/mocknet-follower-conf.toml b/testnet/stacks-node/conf/mocknet-follower-conf.toml index 3cb9beb5d7..e9a0e7a643 100644 --- a/testnet/stacks-node/conf/mocknet-follower-conf.toml +++ b/testnet/stacks-node/conf/mocknet-follower-conf.toml @@ -13,7 +13,6 @@ mode = "mocknet" # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] [[ustx_balance]] diff --git a/testnet/stacks-node/conf/regtest-follower-conf.toml b/testnet/stacks-node/conf/regtest-follower-conf.toml index a2a71c8acb..151446fbaf 100644 --- a/testnet/stacks-node/conf/regtest-follower-conf.toml +++ b/testnet/stacks-node/conf/regtest-follower-conf.toml @@ -17,7 +17,6 @@ peer_port = 18444 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] [[ustx_balance]] diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index cb23477b27..5fe717bfb1 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -17,7 +17,6 @@ peer_port = 18333 # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] [[ustx_balance]] From bae605d0a0a1e401b56d5ac603b2b761ff126053 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 10 Oct 2024 11:40:47 -0700 Subject: [PATCH 1309/1400] Add pox_sync_sample_secs to follower_bootup_across_multiple_cycles and fix prom monitoring issues Signed-off-by: Jacinta Ferrant --- .../src/tests/nakamoto_integrations.rs | 65 +++++++++++-------- 1 file changed, 38 insertions(+), 27 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5868111047..34c60406b8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3750,6 +3750,7 @@ fn follower_bootup_across_multiple_cycles() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.node.pox_sync_sample_secs = 5; naka_conf.burnchain.max_rbf = 10_000_000; let sender_sk = Secp256k1PrivateKey::new(); @@ -6009,15 +6010,18 @@ fn signer_chainstate() { .unwrap() .stacks_block_height; let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + Ok(res.contains(&expected_result)) + }) + .expect("Failed waiting for prometheus metrics to update") } info!("Nakamoto miner started..."); @@ -6619,15 +6623,18 @@ fn continue_tenure_extend() { #[cfg(feature = "monitoring_prom")] { let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = format!("stacks_node_stacks_tip_height {block_height_pre_3_0}"); + Ok(res.contains(&expected_result)) + }) + .expect("Prometheus metrics did not update"); } info!("Nakamoto miner started..."); @@ -6815,15 +6822,19 @@ fn continue_tenure_extend() { #[cfg(feature = "monitoring_prom")] { let prom_http_origin = format!("http://{}", prom_bind); - let client = reqwest::blocking::Client::new(); - let res = client - .get(&prom_http_origin) - .send() - .unwrap() - .text() - .unwrap(); - let expected_result = format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); - assert!(res.contains(&expected_result)); + wait_for(10, || { + let client = reqwest::blocking::Client::new(); + let res = client + .get(&prom_http_origin) + .send() + .unwrap() + .text() + .unwrap(); + let expected_result = + format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); + Ok(res.contains(&expected_result)) + }) + .expect("Prometheus metrics did not update"); } coord_channel From a3f32303e4ef7ad112f2737e4d990f6c98796f1e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 10 Oct 2024 17:07:43 -0400 Subject: [PATCH 1310/1400] fix: handled deprecated `mstx_balance` correctly --- testnet/stacks-node/src/config.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index e1742f3465..7bdffa76b6 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -95,6 +95,8 @@ pub struct ConfigFile { pub burnchain: Option, pub node: Option, pub ustx_balance: Option>, + /// Deprecated: use `ustx_balance` instead + pub mstx_balance: Option>, pub events_observer: Option>, pub connection_options: Option, pub fee_estimation: Option, @@ -102,11 +104,6 @@ pub struct ConfigFile { pub atlas: Option, } -#[derive(Clone, Deserialize, Default)] -pub struct LegacyMstxConfigFile { - pub mstx_balance: Option>, -} - impl ConfigFile { pub fn from_path(path: &str) -> Result { let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {}", &e))?; @@ -118,13 +115,16 @@ impl ConfigFile { pub fn from_str(content: &str) -> Result { let mut config: ConfigFile = toml::from_str(content).map_err(|e| format!("Invalid toml: {}", e))?; - let legacy_config: LegacyMstxConfigFile = toml::from_str(content).unwrap(); - if let Some(mstx_balance) = legacy_config.mstx_balance { - warn!("'mstx_balance' inside toml config is deprecated, replace with 'ustx_balance'"); - config.ustx_balance = match config.ustx_balance { - Some(balance) => Some([balance, mstx_balance].concat()), - None => Some(mstx_balance), - }; + if let Some(mstx_balance) = config.mstx_balance.take() { + warn!("'mstx_balance' in the config is deprecated; please use 'ustx_balance' instead."); + match config.ustx_balance { + Some(ref mut ustx_balance) => { + ustx_balance.extend(mstx_balance); + } + None => { + config.ustx_balance = Some(mstx_balance); + } + } } Ok(config) } From 31bc50569aefc5e1184fa1aa4d5e9355e917eb03 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Oct 2024 21:49:55 -0400 Subject: [PATCH 1311/1400] chore: log neighbors unconditionally in tests --- stackslib/src/net/p2p.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index d7ea9684f3..eb224f3e80 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4998,7 +4998,7 @@ impl PeerNetwork { /// Log our neighbors. /// Used for testing and debuggin fn log_neighbors(&mut self) { - if self.get_connection_opts().log_neighbors_freq == 0 { + if !cfg!(test) && self.get_connection_opts().log_neighbors_freq == 0 { return; } From d4a06198edfbfc464372045d774802cfc86d5abd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Oct 2024 21:50:09 -0400 Subject: [PATCH 1312/1400] feat: track stackerdb peer eviction time --- stackslib/src/net/stackerdb/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 40fbc7711a..57d1a427dc 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -443,6 +443,8 @@ pub struct StackerDBSync { rounds: u128, /// Round when we last pushed push_round: u128, + /// time we last deliberately evicted a peer + last_eviction_time: u64, } impl StackerDBSyncResult { From ce9a1611693c42c26cdf97e9bc2bffb82f6e13ef Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Oct 2024 21:50:29 -0400 Subject: [PATCH 1313/1400] fix: (1) connect_begin() succeeds when at least one replica is connected. Subsequent calls to connect_begin() in subsequent passes will connect and keep connected more and more peers. (2) Evict peers periodically so we get some churn. (3) Always, always, always try to send up to request_capacity messages (doing a full cycle through the push schedule). --- stackslib/src/net/stackerdb/sync.rs | 69 ++++++++++++++++++++++++----- 1 file changed, 57 insertions(+), 12 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 08e6e978ea..d6610c20fc 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -79,6 +79,7 @@ impl StackerDBSync { num_attempted_connections: 0, rounds: 0, push_round: 0, + last_eviction_time: get_epoch_time_secs(), }; dbsync.reset(None, config); dbsync @@ -217,10 +218,32 @@ impl StackerDBSync { self.expected_versions.clear(); self.downloaded_chunks.clear(); - // reset comms, but keep all connected replicas pinned + // reset comms, but keep all connected replicas pinned. + // Randomly evict one every so often. self.comms.reset(); if let Some(network) = network { - for naddr in self.replicas.iter() { + let mut eviction_index = None; + if self.last_eviction_time + 60 < get_epoch_time_secs() { + self.last_eviction_time = get_epoch_time_secs(); + if self.replicas.len() > 0 { + eviction_index = Some(thread_rng().gen::() % self.replicas.len()); + } + } + + let mut remove_naddr = None; + for (i, naddr) in self.replicas.iter().enumerate() { + if let Some(eviction_index) = eviction_index.as_ref() { + if *eviction_index == i { + debug!( + "{:?}: {}: don't reuse connection for replica {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &naddr, + ); + remove_naddr = Some(naddr.clone()); + continue; + } + } if let Some(event_id) = network.get_event_id(&naddr.to_neighbor_key(network)) { self.comms.pin_connection(event_id); debug!( @@ -232,6 +255,9 @@ impl StackerDBSync { ); } } + if let Some(naddr) = remove_naddr.take() { + self.replicas.remove(&naddr); + } } // reload from config @@ -668,7 +694,8 @@ impl StackerDBSync { /// We might not be connected to any yet. /// Clears self.replicas, and fills in self.connected_replicas with already-connected neighbors /// Returns Ok(true) if we can proceed to sync - /// Returns Ok(false) if we have no known peers + /// Returns Ok(false) if we should try this again + /// Returns Err(NoSuchNeighbor) if we don't have anyone to talk to /// Returns Err(..) on DB query error pub fn connect_begin(&mut self, network: &mut PeerNetwork) -> Result { if self.replicas.len() == 0 { @@ -686,7 +713,7 @@ impl StackerDBSync { ); if self.replicas.len() == 0 { // nothing to do - return Ok(false); + return Err(net_error::NoSuchNeighbor); } let naddrs = mem::replace(&mut self.replicas, HashSet::new()); @@ -729,11 +756,12 @@ impl StackerDBSync { ); self.num_attempted_connections += 1; self.num_connections += 1; + self.connected_replicas.insert(naddr); } Ok(false) => { // need to retry - self.replicas.insert(naddr); self.num_attempted_connections += 1; + self.replicas.insert(naddr); } Err(_e) => { debug!( @@ -746,7 +774,7 @@ impl StackerDBSync { } } } - Ok(self.replicas.len() == 0) + Ok(self.connected_replicas.len() > 0) } /// Finish up connecting to our replicas. @@ -1154,7 +1182,8 @@ impl StackerDBSync { ); // fill up our comms with $capacity requests - for _i in 0..self.request_capacity { + let mut num_sent = 0; + for _i in 0..self.chunk_push_priorities.len() { if self.comms.count_inflight() >= self.request_capacity { break; } @@ -1173,6 +1202,9 @@ impl StackerDBSync { chunk_push.chunk_data.slot_id, chunk_push.chunk_data.slot_version, ); + + // next-prioritized chunk + cur_priority = (cur_priority + 1) % self.chunk_push_priorities.len(); continue; }; @@ -1213,6 +1245,11 @@ impl StackerDBSync { // next-prioritized chunk cur_priority = (cur_priority + 1) % self.chunk_push_priorities.len(); + + num_sent += 1; + if num_sent > self.request_capacity { + break; + } } self.next_chunk_push_priority = cur_priority; Ok(self @@ -1370,14 +1407,22 @@ impl StackerDBSync { let mut blocked = true; match self.state { StackerDBSyncState::ConnectBegin => { - let done = self.connect_begin(network)?; + let done = match self.connect_begin(network) { + Ok(done) => done, + Err(net_error::NoSuchNeighbor) => { + // nothing to do + self.state = StackerDBSyncState::Finished; + blocked = false; + false + } + Err(e) => { + return Err(e); + } + }; if done { self.state = StackerDBSyncState::ConnectFinish; - } else { - // no replicas; try again - self.state = StackerDBSyncState::Finished; + blocked = false; } - blocked = false; } StackerDBSyncState::ConnectFinish => { let done = self.connect_try_finish(network)?; From 96cf7d567cc5e046b1bfdc644ad67fac0424b097 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 10 Oct 2024 21:52:18 -0400 Subject: [PATCH 1314/1400] fix: fix broken unit test --- stackslib/src/net/stackerdb/tests/sync.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 565a97f422..746a3f0963 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -183,7 +183,12 @@ fn load_stackerdb(peer: &TestPeer, idx: usize) -> Vec<(SlotMetadata, Vec)> { fn check_sync_results(network_sync: &NetworkResult) { for res in network_sync.stacker_db_sync_results.iter() { - assert!(res.num_connections >= res.num_attempted_connections); + assert!( + res.num_connections <= res.num_attempted_connections, + "{} < {}", + res.num_connections, + res.num_attempted_connections + ); } } From 6e0afa8b03e0ab1ea75c9df8a0ff25949b36db63 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 10 Oct 2024 13:10:34 -0700 Subject: [PATCH 1315/1400] Increase pox_sync_sample_secs to 30 Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 7 +++---- testnet/stacks-node/src/tests/signer/v0.rs | 8 ++++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b9145d298c..e25b7799a9 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2108,7 +2108,7 @@ fn multiple_miners() { let node_2_p2p = 51025; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 5; + naka_conf.node.pox_sync_sample_secs = 30; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -3728,7 +3728,7 @@ fn follower_bootup_across_multiple_cycles() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 5; + naka_conf.node.pox_sync_sample_secs = 30; naka_conf.burnchain.max_rbf = 10_000_000; let sender_sk = Secp256k1PrivateKey::new(); @@ -3852,7 +3852,6 @@ fn follower_bootup_across_multiple_cycles() { follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); - follower_conf.node.pox_sync_sample_secs = 30; let node_info = get_chain_info(&naka_conf); follower_conf.node.add_bootstrap_node( @@ -8120,7 +8119,7 @@ fn mock_mining() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 5; + naka_conf.node.pox_sync_sample_secs = 30; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8c48eda5e8..d1ceedfebf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1472,7 +1472,7 @@ fn multiple_miners() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 5; + config.node.pox_sync_sample_secs = 30; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -1767,7 +1767,7 @@ fn miner_forking() { config.node.local_peer_seed = btc_miner_1_seed.clone(); config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); - config.node.pox_sync_sample_secs = 5; + config.node.pox_sync_sample_secs = 30; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { @@ -3444,7 +3444,7 @@ fn multiple_miners_with_nakamoto_blocks() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 5; + config.node.pox_sync_sample_secs = 30; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3707,7 +3707,7 @@ fn partial_tenure_fork() { config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); - config.node.pox_sync_sample_secs = 5; + config.node.pox_sync_sample_secs = 30; config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); From 3b5101fb012ceeba6d1a2402788021e9aa23acd7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 11 Oct 2024 08:21:50 -0400 Subject: [PATCH 1316/1400] chore: cleanup unused imports --- testnet/stacks-node/src/tests/epoch_205.rs | 2 +- testnet/stacks-node/src/tests/epoch_21.rs | 2 +- testnet/stacks-node/src/tests/epoch_23.rs | 2 +- testnet/stacks-node/src/tests/epoch_24.rs | 2 +- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/stackerdb.rs | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 0ad7000631..0cf567058d 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -25,7 +25,7 @@ use stacks_common::types::chainstate::{ use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::{ diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 4490fa5b07..ba95cbd55a 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -35,7 +35,7 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::sleep_ms; use crate::burnchains::bitcoin_regtest_controller::UTXO; -use crate::config::{Config, EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{Config, InitialBalance}; use crate::neon::RunLoopCounter; use crate::operations::BurnchainOpSigner; use crate::stacks_common::address::AddressHashMode; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 7d0a5216a0..8028e3b92c 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -22,7 +22,7 @@ use stacks::core; use stacks::core::STACKS_EPOCH_MAX; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::*; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 9c57a732d0..8a959aaf87 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -35,7 +35,7 @@ use stacks_common::util::hash::{bytes_to_hex, hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::InitialBalance; use crate::stacks_common::codec::StacksMessageCodec; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 345aec4557..be95a65003 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -21,7 +21,7 @@ use stacks::core; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::wait_for; use crate::tests::neon_integrations::{ diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b9145d298c..52ee1383e8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -91,7 +91,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index fbc47e0c3c..f7089c3f33 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -25,7 +25,7 @@ use {reqwest, serde_json}; use super::bitcoin_regtest::BitcoinCoreController; use crate::burnchains::BurnchainController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::tests::neon_integrations::{ neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, }; From e3e656b993b619784e6f2e5685c278f508b69c74 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 11 Oct 2024 11:26:04 -0400 Subject: [PATCH 1317/1400] fix: log reward cycle of reward set, and use mod 0 indexing to locate the reward set to use to validate blocks (instead of the mod 1 indexing) --- stackslib/src/net/download/nakamoto/tenure.rs | 15 ++++++--------- .../download/nakamoto/tenure_downloader_set.rs | 6 ++++++ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 98f102969a..53f9105156 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -325,15 +325,12 @@ impl TenureStartEnd { wt_start.winning_block_id.clone(), wt_end.winning_block_id.clone(), rc, - downloader_block_height_to_reward_cycle( - pox_constants, - first_burn_height, - wt_start.burn_height, - ) - .expect(&format!( - "FATAL: tenure from before system start ({} <= {})", - wt_start.burn_height, first_burn_height - )), + pox_constants + .block_height_to_reward_cycle(first_burn_height, wt_start.burn_height) + .expect(&format!( + "FATAL: tenure from before system start ({} <= {})", + wt_start.burn_height, first_burn_height + )), wt.processed, ); tenure_start_end.fetch_end_block = true; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 160bad309e..88fdf77c7a 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -407,6 +407,12 @@ impl NakamotoTenureDownloaderSet { continue; }; + info!("Download tenure {}", &ch; + "tenure_start_block" => %tenure_info.start_block_id, + "tenrue_end_block" => %tenure_info.end_block_id, + "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, + "tenure_end_reward_cycle" => tenure_info.end_reward_cycle); + debug!( "Download tenure {} (start={}, end={}) (rc {},{})", &ch, From 04b9392dfbbfee7c63b9a1e5fdc1e013cda79b03 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 11 Oct 2024 11:26:41 -0400 Subject: [PATCH 1318/1400] fix: log getnakamotoinv nack remote peer --- stackslib/src/net/inv/nakamoto.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 8971a8230f..3f4fcb6165 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -679,6 +679,7 @@ impl NakamotoTenureInv { } StacksMessageType::Nack(nack_data) => { info!("{:?}: remote peer NACKed our GetNakamotoInv", network.get_local_peer(); + "remote_peer" => %self.neighbor_address, "error_code" => nack_data.error_code); if nack_data.error_code != NackErrorCodes::NoSuchBurnchainBlock { From 616345bf20a6e3f8cbe1f389a94a7db8eb8733c4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 11 Oct 2024 14:26:47 -0400 Subject: [PATCH 1319/1400] fix: typo --- stackslib/src/net/download/nakamoto/tenure_downloader_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 88fdf77c7a..49b32c2634 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -409,7 +409,7 @@ impl NakamotoTenureDownloaderSet { info!("Download tenure {}", &ch; "tenure_start_block" => %tenure_info.start_block_id, - "tenrue_end_block" => %tenure_info.end_block_id, + "tenure_end_block" => %tenure_info.end_block_id, "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, "tenure_end_reward_cycle" => tenure_info.end_reward_cycle); From 787e3e9628f55e5fd0a4f35a8aea0eb63400e3c0 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 11 Oct 2024 14:24:34 -0400 Subject: [PATCH 1320/1400] fix: always use chain_id from config when building txs --- testnet/stacks-node/src/tests/epoch_205.rs | 147 +++-- testnet/stacks-node/src/tests/epoch_21.rs | 80 ++- testnet/stacks-node/src/tests/epoch_22.rs | 25 +- testnet/stacks-node/src/tests/epoch_23.rs | 19 + testnet/stacks-node/src/tests/epoch_24.rs | 12 + testnet/stacks-node/src/tests/epoch_25.rs | 18 +- testnet/stacks-node/src/tests/integrations.rs | 184 ++++-- testnet/stacks-node/src/tests/mempool.rs | 149 ++++- testnet/stacks-node/src/tests/mod.rs | 51 +- .../src/tests/nakamoto_integrations.rs | 247 ++++++-- .../src/tests/neon_integrations.rs | 589 +++++++++++++----- testnet/stacks-node/src/tests/signer/v0.rs | 222 +++++-- testnet/stacks-node/src/tests/stackerdb.rs | 18 +- 13 files changed, 1358 insertions(+), 403 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 0cf567058d..076a5f61f3 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -78,8 +78,14 @@ fn test_exact_block_costs() { (ok 1))) "; - let contract_publish_tx = - make_contract_publish(&spender_sk, 0, 210_000, contract_name, contract_content); + let contract_publish_tx = make_contract_publish( + &spender_sk, + 0, + 210_000, + conf.burnchain.chain_id, + contract_name, + contract_content, + ); // make txs that alternate between let txs: Vec<_> = (1..transactions_to_broadcast + 1) @@ -89,6 +95,7 @@ fn test_exact_block_costs() { &spender_sk, nonce, 200_000, + conf.burnchain.chain_id, &spender_addr_c32, contract_name, "db-get2", @@ -99,6 +106,7 @@ fn test_exact_block_costs() { &spender_sk, nonce, 200_000, + conf.burnchain.chain_id, &spender_addr_c32, contract_name, "db-get2", @@ -307,14 +315,22 @@ fn test_dynamic_db_method_costs() { amount: 200_000_000, }); - let contract_publish_tx = - make_contract_publish(&spender_sk, 0, 210_000, contract_name, contract_content); + let contract_publish_tx = make_contract_publish( + &spender_sk, + 0, + 210_000, + conf.burnchain.chain_id, + contract_name, + contract_content, + ); + let chain_id = conf.burnchain.chain_id; let make_db_get1_call = |nonce| { make_contract_call( &spender_sk, nonce, 200_000, + chain_id, &spender_addr_c32, contract_name, "db-get1", @@ -327,6 +343,7 @@ fn test_dynamic_db_method_costs() { &spender_sk, nonce, 200_000, + chain_id, &spender_addr_c32, contract_name, "db-get2", @@ -800,6 +817,7 @@ fn test_cost_limit_switch_version205() { &creator_sk, 0, 1100000, + conf.burnchain.chain_id, "increment-contract", &giant_contract, ), @@ -828,6 +846,7 @@ fn test_cost_limit_switch_version205() { &alice_sk, 0, 1000, + conf.burnchain.chain_id, &creator_addr.into(), "increment-contract", "increment-many", @@ -862,6 +881,7 @@ fn test_cost_limit_switch_version205() { &bob_sk, 0, 1000, + conf.burnchain.chain_id, &creator_addr.into(), "increment-contract", "increment-many", @@ -902,65 +922,6 @@ fn bigger_microblock_streams_in_2_05() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let txs: Vec> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - // almost fills a whole block - make_contract_publish_microblock_only( - spender_sk, - 0, - 1049230, - &format!("large-{}", ix), - &format!(" - ;; a single one of these transactions consumes over half the runtime budget - (define-constant BUFF_TO_BYTE (list - 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f - 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f - 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f - 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f - 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f - 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f - 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f - 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f - 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f - 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f - 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf - 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf - 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf - 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf - 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef - 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff - )) - (define-private (crash-me-folder (input (buff 1)) (ctr uint)) - (begin - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (+ u1 ctr) - ) - ) - (define-public (crash-me (name (string-ascii 128))) - (begin - (fold crash-me-folder BUFF_TO_BYTE u0) - (print name) - (ok u0) - ) - ) - (begin - (crash-me \"{}\")) - ", - &format!("large-contract-{}", &ix) - ) - ) - }) - .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); for spender_addr in spender_addrs.iter() { @@ -1022,6 +983,66 @@ fn bigger_microblock_streams_in_2_05() { ]); conf.burnchain.pox_2_activation = Some(10_003); + let txs: Vec> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + // almost fills a whole block + make_contract_publish_microblock_only( + spender_sk, + 0, + 1049230, + conf.burnchain.chain_id, + &format!("large-{}", ix), + &format!(" + ;; a single one of these transactions consumes over half the runtime budget + (define-constant BUFF_TO_BYTE (list + 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f + 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f + 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f + 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f + 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f + 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f + 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f + 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f + 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f + 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f + 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf + 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf + 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf + 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf + 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef + 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff + )) + (define-private (crash-me-folder (input (buff 1)) (ctr uint)) + (begin + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (unwrap-panic (index-of BUFF_TO_BYTE input)) + (+ u1 ctr) + ) + ) + (define-public (crash-me (name (string-ascii 128))) + (begin + (fold crash-me-folder BUFF_TO_BYTE u0) + (print name) + (ok u0) + ) + ) + (begin + (crash-me \"{}\")) + ", + &format!("large-contract-{}", &ix) + ) + ) + }) + .collect(); + test_observer::spawn(); test_observer::register_any(&mut conf); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index ba95cbd55a..8f6c466318 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -360,6 +360,7 @@ fn transition_adds_burn_block_height() { &spender_sk, 0, (2 * contract.len()) as u64, + conf.burnchain.chain_id, "test-burn-headers", contract, ); @@ -373,6 +374,7 @@ fn transition_adds_burn_block_height() { &spender_sk, 1, (2 * contract.len()) as u64, + conf.burnchain.chain_id, &spender_addr_c32, "test-burn-headers", "test-burn-headers", @@ -1107,6 +1109,7 @@ fn transition_adds_get_pox_addr_recipients() { &spender_sk, 0, 300, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -1147,6 +1150,7 @@ fn transition_adds_get_pox_addr_recipients() { &spender_sk, 0, 300, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -1184,6 +1188,7 @@ fn transition_adds_get_pox_addr_recipients() { &spender_sks[0], 1, (2 * contract.len()) as u64, + conf.burnchain.chain_id, "test-get-pox-addrs", contract, ); @@ -1209,6 +1214,7 @@ fn transition_adds_get_pox_addr_recipients() { &spender_sks[0], 2, (2 * contract.len()) as u64, + conf.burnchain.chain_id, &spender_addr_c32, "test-get-pox-addrs", "test-get-pox-addrs", @@ -1578,6 +1584,7 @@ fn transition_removes_pox_sunset() { &spender_sk, 0, 260, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -1640,6 +1647,7 @@ fn transition_removes_pox_sunset() { &spender_sk, 1, 260 * 2, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -2273,6 +2281,7 @@ fn test_pox_reorgs_three_flaps() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -2296,7 +2305,9 @@ fn test_pox_reorgs_three_flaps() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -2810,6 +2821,7 @@ fn test_pox_reorg_one_flap() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -2833,7 +2845,9 @@ fn test_pox_reorg_one_flap() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -3235,6 +3249,7 @@ fn test_pox_reorg_flap_duel() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -3258,7 +3273,9 @@ fn test_pox_reorg_flap_duel() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -3670,6 +3687,7 @@ fn test_pox_reorg_flap_reward_cycles() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -3693,7 +3711,9 @@ fn test_pox_reorg_flap_reward_cycles() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -4099,6 +4119,7 @@ fn test_pox_missing_five_anchor_blocks() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -4122,7 +4143,9 @@ fn test_pox_missing_five_anchor_blocks() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -4500,6 +4523,7 @@ fn test_sortition_divergence_pre_21() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -4523,7 +4547,9 @@ fn test_sortition_divergence_pre_21() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up @@ -4798,13 +4824,34 @@ fn trait_invocation_cross_epoch() { let tip_info = get_chain_info(&conf); assert_eq!(tip_info.burn_block_height, epoch_2_05 + 1); - let tx = make_contract_publish(&spender_sk, 0, 10_000, "simple-trait", trait_contract); + let tx = make_contract_publish( + &spender_sk, + 0, + 10_000, + conf.burnchain.chain_id, + "simple-trait", + trait_contract, + ); let trait_txid = submit_tx(&http_origin, &tx); - let tx = make_contract_publish(&spender_sk, 1, 10_000, "impl-simple", impl_contract); + let tx = make_contract_publish( + &spender_sk, + 1, + 10_000, + conf.burnchain.chain_id, + "impl-simple", + impl_contract, + ); let impl_txid = submit_tx(&http_origin, &tx); - let tx = make_contract_publish(&spender_sk, 2, 10_000, "use-simple", use_contract); + let tx = make_contract_publish( + &spender_sk, + 2, + 10_000, + conf.burnchain.chain_id, + "use-simple", + use_contract, + ); let use_txid = submit_tx(&http_origin, &tx); // mine the transactions and advance to epoch 2.1 @@ -4815,7 +4862,14 @@ fn trait_invocation_cross_epoch() { let tip_info = get_chain_info(&conf); assert_eq!(tip_info.burn_block_height, epoch_2_1 + 1); - let tx = make_contract_publish(&spender_sk, 3, 10_000, "invoke-simple", invoke_contract); + let tx = make_contract_publish( + &spender_sk, + 3, + 10_000, + conf.burnchain.chain_id, + "invoke-simple", + invoke_contract, + ); let invoke_txid = submit_tx(&http_origin, &tx); for _ in 0..2 { @@ -4826,6 +4880,7 @@ fn trait_invocation_cross_epoch() { &spender_sk, 4, 10_000, + conf.burnchain.chain_id, &spender_addr_c32, "invoke-simple", "invocation-1", @@ -4837,6 +4892,7 @@ fn trait_invocation_cross_epoch() { &spender_sk, 5, 10_000, + conf.burnchain.chain_id, &spender_addr_c32, "invoke-simple", "invocation-2", @@ -5042,6 +5098,7 @@ fn test_v1_unlock_height_with_current_stackers() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -5078,6 +5135,7 @@ fn test_v1_unlock_height_with_current_stackers() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -5306,6 +5364,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -5354,6 +5413,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index fecf5c4652..9bffca7c8a 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -227,6 +227,7 @@ fn disable_pox() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -275,6 +276,7 @@ fn disable_pox() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -293,6 +295,7 @@ fn disable_pox() { &spender_2_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -324,6 +327,7 @@ fn disable_pox() { &spender_sk, 2, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-increase", @@ -350,6 +354,7 @@ fn disable_pox() { &spender_sk, aborted_increase_nonce, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-increase", @@ -761,6 +766,7 @@ fn pox_2_unlock_all() { &spender_sk, 0, tx_fee, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -811,6 +817,7 @@ fn pox_2_unlock_all() { &spender_sk, 1, tx_fee, + conf.burnchain.chain_id, "unlock-height", "(define-public (unlock-height (x principal)) (ok (get unlock-height (stx-account x))))", ); @@ -820,6 +827,7 @@ fn pox_2_unlock_all() { &spender_sk, 2, tx_fee, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -839,6 +847,7 @@ fn pox_2_unlock_all() { &spender_2_sk, 0, tx_fee, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -869,6 +878,7 @@ fn pox_2_unlock_all() { &spender_sk, 3, tx_fee, + conf.burnchain.chain_id, &to_addr(&spender_sk), "unlock-height", "unlock-height", @@ -888,6 +898,7 @@ fn pox_2_unlock_all() { &spender_sk, 4, tx_fee, + conf.burnchain.chain_id, &to_addr(&spender_sk), "unlock-height", "unlock-height", @@ -977,7 +988,14 @@ fn pox_2_unlock_all() { ); // perform a transfer - let tx = make_stacks_transfer(&spender_sk, 5, tx_fee, &spender_3_addr, 1_000_000); + let tx = make_stacks_transfer( + &spender_sk, + 5, + tx_fee, + conf.burnchain.chain_id, + &spender_3_addr, + 1_000_000, + ); info!("Submit stack transfer tx to {:?}", &http_origin); submit_tx(&http_origin, &tx); @@ -1508,6 +1526,7 @@ fn test_pox_reorg_one_flap() { pk, 0, 1360, + conf_template.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -1531,7 +1550,9 @@ fn test_pox_reorg_one_flap() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .map(|(i, pk)| { + make_random_tx_chain(pk, (25 * i) as u64, conf_template.burnchain.chain_id, false) + }) .collect(); // everyone locks up diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 8028e3b92c..2355f7521d 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -180,6 +180,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "simple-trait", trait_contract, ); @@ -190,6 +191,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "impl-simple", impl_contract, ); @@ -200,6 +202,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "use-simple", use_contract, ); @@ -210,6 +213,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "invoke-simple", invoke_contract, ); @@ -241,6 +245,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-1", @@ -253,6 +258,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-2", @@ -274,6 +280,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-1", @@ -286,6 +293,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-2", @@ -312,6 +320,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-1", @@ -324,6 +333,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-2", @@ -344,6 +354,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, "wrap-simple", wrapper_contract, ); @@ -359,6 +370,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-1", @@ -371,6 +383,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-2", @@ -397,6 +410,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-1", @@ -409,6 +423,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-2", @@ -431,6 +446,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-1", @@ -443,6 +459,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "wrap-simple", "invocation-2", @@ -464,6 +481,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-1", @@ -476,6 +494,7 @@ fn trait_invocation_behavior() { &spender_sk, spender_nonce, fee_amount, + conf.burnchain.chain_id, &contract_addr, "invoke-simple", "invocation-2", diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 8a959aaf87..26ad007ca7 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -249,6 +249,7 @@ fn fix_to_pox_contract() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -298,6 +299,7 @@ fn fix_to_pox_contract() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -332,6 +334,7 @@ fn fix_to_pox_contract() { &spender_sk, aborted_increase_nonce_2_2, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-increase", @@ -357,6 +360,7 @@ fn fix_to_pox_contract() { &spender_sk, aborted_increase_nonce_2_3, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-increase", @@ -395,6 +399,7 @@ fn fix_to_pox_contract() { &spender_sk, 4, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-stx", @@ -414,6 +419,7 @@ fn fix_to_pox_contract() { &spender_2_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-stx", @@ -445,6 +451,7 @@ fn fix_to_pox_contract() { &spender_sk, 5, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-increase", @@ -889,6 +896,7 @@ fn verify_auto_unlock_behavior() { &spender_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -938,6 +946,7 @@ fn verify_auto_unlock_behavior() { &spender_sk, 1, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -1023,6 +1032,7 @@ fn verify_auto_unlock_behavior() { &spender_sk, 2, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-stx", @@ -1042,6 +1052,7 @@ fn verify_auto_unlock_behavior() { &spender_2_sk, 0, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-stx", @@ -1123,6 +1134,7 @@ fn verify_auto_unlock_behavior() { &spender_sk, 3, 3000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-3", "stack-increase", diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index be95a65003..6af1bee626 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -162,7 +162,14 @@ fn microblocks_disabled() { // push us to block 205 next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tx = make_stacks_transfer_mblock_only(&spender_1_sk, 0, 500, &spender_2_addr, 500); + let tx = make_stacks_transfer_mblock_only( + &spender_1_sk, + 0, + 500, + conf.burnchain.chain_id, + &spender_2_addr, + 500, + ); submit_tx(&http_origin, &tx); // wait until just before epoch 2.5 @@ -194,7 +201,14 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - let tx = make_stacks_transfer_mblock_only(&spender_1_sk, 1, 500, &spender_2_addr, 500); + let tx = make_stacks_transfer_mblock_only( + &spender_1_sk, + 1, + 500, + conf.burnchain.chain_id, + &spender_2_addr, + 500, + ); submit_tx(&http_origin, &tx); let mut last_block_height = get_chain_info(&conf).burn_block_height; diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 694d27ca15..236d76b000 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -25,7 +25,7 @@ use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ - StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; @@ -211,8 +211,14 @@ fn integration_test_get_info() { if round == 1 { // block-height = 2 eprintln!("Tenure in 1 started!"); - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "get-info", GET_INFO_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "get-info", + GET_INFO_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -225,8 +231,14 @@ fn integration_test_get_info() { &StacksEpochId::Epoch21, ) .unwrap(); - let publish_tx = - make_contract_publish(&contract_sk, 1, 10, "other", OTHER_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 1, + 10, + CHAIN_ID_TESTNET, + "other", + OTHER_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -239,8 +251,14 @@ fn integration_test_get_info() { &StacksEpochId::Epoch21, ) .unwrap(); - let publish_tx = - make_contract_publish(&contract_sk, 2, 10, "main", CALL_READ_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 2, + 10, + CHAIN_ID_TESTNET, + "main", + CALL_READ_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -272,6 +290,7 @@ fn integration_test_get_info() { &contract_sk, 3, 10, + CHAIN_ID_TESTNET, "impl-trait-contract", IMPL_TRAIT_CONTRACT, ); @@ -294,6 +313,7 @@ fn integration_test_get_info() { &principal_sk, (round - 3).into(), 10, + CHAIN_ID_TESTNET, &to_addr(&contract_sk), "get-info", "update-info", @@ -319,6 +339,7 @@ fn integration_test_get_info() { &spender_sk, (round - 1).into(), 10, + CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 100, ); @@ -797,8 +818,13 @@ fn integration_test_get_info() { eprintln!("Test: POST {} (valid)", path); // tx_xfer is 180 bytes long - let tx_xfer = make_stacks_transfer(&spender_sk, round.into(), 200, - &StacksAddress::from_string(ADDR_4).unwrap().into(), 123); + let tx_xfer = make_stacks_transfer( + &spender_sk, + round.into(), + 200, + CHAIN_ID_TESTNET, + &StacksAddress::from_string(ADDR_4).unwrap().into(), + 123); let res: String = client.post(&path) .header("Content-Type", "application/octet-stream") @@ -829,7 +855,8 @@ fn integration_test_get_info() { eprintln!("Test: POST {} (invalid)", path); // tx_xfer_invalid is 180 bytes long - let tx_xfer_invalid = make_stacks_transfer(&spender_sk, (round + 30).into(), 200, // bad nonce + // bad nonce + let tx_xfer_invalid = make_stacks_transfer(&spender_sk, (round + 30).into(), 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 456); let tx_xfer_invalid_tx = StacksTransaction::consensus_deserialize(&mut &tx_xfer_invalid[..]).unwrap(); @@ -1114,8 +1141,14 @@ fn contract_stx_transfer() { if round == 1 { // block-height = 2 - let xfer_to_contract = - make_stacks_transfer(&sk_3, 0, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk_3, + 0, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); tenure .mem_pool .submit_raw( @@ -1130,8 +1163,14 @@ fn contract_stx_transfer() { .unwrap(); } else if round == 2 { // block-height > 2 - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -1146,8 +1185,14 @@ fn contract_stx_transfer() { .unwrap(); } else if round == 3 { // try to publish again - let publish_tx = - make_contract_publish(&contract_sk, 1, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 1, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, @@ -1170,6 +1215,7 @@ fn contract_stx_transfer() { &sk_2, 0, 10, + CHAIN_ID_TESTNET, &to_addr(&contract_sk), "faucet", "spout", @@ -1194,6 +1240,7 @@ fn contract_stx_transfer() { &sk_3, 1 + i, 200, + CHAIN_ID_TESTNET, &contract_identifier.clone().into(), 1000, ); @@ -1215,8 +1262,14 @@ fn contract_stx_transfer() { .unwrap(); } // this one should fail because the nonce is already in the mempool - let xfer_to_contract = - make_stacks_transfer(&sk_3, 3, 190, &contract_identifier.clone().into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk_3, + 3, + 190, + CHAIN_ID_TESTNET, + &contract_identifier.clone().into(), + 1000, + ); let xfer_to_contract = StacksTransaction::consensus_deserialize(&mut &xfer_to_contract[..]).unwrap(); match tenure @@ -1446,8 +1499,14 @@ fn mine_transactions_out_of_order() { if round == 1 { // block-height = 2 - let xfer_to_contract = - make_stacks_transfer(&sk, 1, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk, + 1, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); tenure .mem_pool .submit_raw( @@ -1462,7 +1521,8 @@ fn mine_transactions_out_of_order() { .unwrap(); } else if round == 2 { // block-height > 2 - let publish_tx = make_contract_publish(&sk, 2, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = + make_contract_publish(&sk, 2, 10, CHAIN_ID_TESTNET, "faucet", FAUCET_CONTRACT); tenure .mem_pool .submit_raw( @@ -1476,8 +1536,14 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } else if round == 3 { - let xfer_to_contract = - make_stacks_transfer(&sk, 3, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk, + 3, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); tenure .mem_pool .submit_raw( @@ -1491,8 +1557,14 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } else if round == 4 { - let xfer_to_contract = - make_stacks_transfer(&sk, 0, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk, + 0, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); tenure .mem_pool .submit_raw( @@ -1593,8 +1665,14 @@ fn mine_contract_twice() { if round == 1 { // block-height = 2 - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, &tenure.parent_block.metadata.anchored_header.block_hash(), @@ -1691,8 +1769,14 @@ fn bad_contract_tx_rollback() { if round == 1 { // block-height = 2 - let xfer_to_contract = - make_stacks_transfer(&sk_3, 0, 10, &contract_identifier.into(), 1000); + let xfer_to_contract = make_stacks_transfer( + &sk_3, + 0, + 10, + CHAIN_ID_TESTNET, + &contract_identifier.into(), + 1000, + ); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, &tenure.parent_block.metadata.anchored_header.block_hash(), @@ -1711,7 +1795,8 @@ fn bad_contract_tx_rollback() { .unwrap(); } else if round == 2 { // block-height = 3 - let xfer_to_contract = make_stacks_transfer(&sk_3, 1, 10, &addr_2.into(), 1000); + let xfer_to_contract = + make_stacks_transfer(&sk_3, 1, 10, CHAIN_ID_TESTNET, &addr_2.into(), 1000); let (consensus_hash, block_hash) = ( &tenure.parent_block.metadata.consensus_hash, &tenure.parent_block.metadata.anchored_header.block_hash(), @@ -1730,7 +1815,8 @@ fn bad_contract_tx_rollback() { .unwrap(); // doesn't consistently get mined by the StacksBlockBuilder, because order matters! - let xfer_to_contract = make_stacks_transfer(&sk_3, 2, 10, &addr_2.into(), 3000); + let xfer_to_contract = + make_stacks_transfer(&sk_3, 2, 10, CHAIN_ID_TESTNET, &addr_2.into(), 3000); tenure .mem_pool .submit_raw( @@ -1744,8 +1830,14 @@ fn bad_contract_tx_rollback() { ) .unwrap(); - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -1759,8 +1851,14 @@ fn bad_contract_tx_rollback() { ) .unwrap(); - let publish_tx = - make_contract_publish(&contract_sk, 1, 10, "faucet", FAUCET_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 1, + 10, + CHAIN_ID_TESTNET, + "faucet", + FAUCET_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -2014,6 +2112,7 @@ fn block_limit_runtime_test() { &contract_sk, 0, 10, + CHAIN_ID_TESTNET, "hello-contract", EXPENSIVE_CONTRACT.as_str(), ); @@ -2042,6 +2141,7 @@ fn block_limit_runtime_test() { sk, 0, 10, + CHAIN_ID_TESTNET, &to_addr(&contract_sk), "hello-contract", "do-it", @@ -2132,8 +2232,14 @@ fn mempool_errors() { if round == 1 { // block-height = 2 - let publish_tx = - make_contract_publish(&contract_sk, 0, 10, "get-info", GET_INFO_CONTRACT); + let publish_tx = make_contract_publish( + &contract_sk, + 0, + 10, + CHAIN_ID_TESTNET, + "get-info", + GET_INFO_CONTRACT, + ); eprintln!("Tenure in 1 started!"); tenure .mem_pool @@ -2176,6 +2282,7 @@ fn mempool_errors() { &spender_sk, 30, // bad nonce -- too much chaining 200, + CHAIN_ID_TESTNET, &send_to, 456, ); @@ -2217,6 +2324,7 @@ fn mempool_errors() { &spender_sk, 0, 1, // bad fee + CHAIN_ID_TESTNET, &send_to, 456, ); @@ -2250,6 +2358,7 @@ fn mempool_errors() { &contract_sk, 1, 2000, // not enough funds! + CHAIN_ID_TESTNET, &send_to, 456, ); @@ -2294,6 +2403,7 @@ fn mempool_errors() { 1 + MAXIMUM_MEMPOOL_TX_CHAINING, 1, 2000, + CHAIN_ID_TESTNET, &send_to, 1000, ); diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index 6221c6cf11..b701e70a15 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -112,8 +112,14 @@ fn mempool_setup_chainstate() { if round == 1 { eprintln!("Tenure in 1 started!"); - let publish_tx1 = - make_contract_publish(&contract_sk, 0, 100, "foo_contract", FOO_CONTRACT); + let publish_tx1 = make_contract_publish( + &contract_sk, + 0, + 100, + CHAIN_ID_TESTNET, + "foo_contract", + FOO_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -127,8 +133,14 @@ fn mempool_setup_chainstate() { ) .unwrap(); - let publish_tx2 = - make_contract_publish(&contract_sk, 1, 100, "trait-contract", TRAIT_CONTRACT); + let publish_tx2 = make_contract_publish( + &contract_sk, + 1, + 100, + CHAIN_ID_TESTNET, + "trait-contract", + TRAIT_CONTRACT, + ); tenure .mem_pool .submit_raw( @@ -146,6 +158,7 @@ fn mempool_setup_chainstate() { &contract_sk, 2, 100, + CHAIN_ID_TESTNET, "use-trait-contract", USE_TRAIT_CONTRACT, ); @@ -166,6 +179,7 @@ fn mempool_setup_chainstate() { &contract_sk, 3, 100, + CHAIN_ID_TESTNET, "implement-trait-contract", IMPLEMENT_TRAIT_CONTRACT, ); @@ -186,6 +200,7 @@ fn mempool_setup_chainstate() { &contract_sk, 4, 100, + CHAIN_ID_TESTNET, "bad-trait-contract", BAD_TRAIT_CONTRACT, ); @@ -234,8 +249,14 @@ fn mempool_setup_chainstate() { // let's throw some transactions at it. // first a couple valid ones: - let tx_bytes = - make_contract_publish(&contract_sk, 5, 1000, "bar_contract", FOO_CONTRACT); + let tx_bytes = make_contract_publish( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + "bar_contract", + FOO_CONTRACT, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state @@ -252,6 +273,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &contract_addr, "foo_contract", "bar", @@ -269,7 +291,8 @@ fn mempool_setup_chainstate() { ) .unwrap(); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 200, &other_addr, 1000); + let tx_bytes = + make_stacks_transfer(&contract_sk, 5, 200, CHAIN_ID_TESTNET, &other_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state @@ -321,6 +344,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &bad_addr, "foo_contract", "bar", @@ -354,7 +378,8 @@ fn mempool_setup_chainstate() { .unwrap() .into(); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 200, &bad_addr, 1000); + let tx_bytes = + make_stacks_transfer(&contract_sk, 5, 200, CHAIN_ID_TESTNET, &bad_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -373,7 +398,8 @@ fn mempool_setup_chainstate() { }); // bad fees - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 0, &other_addr, 1000); + let tx_bytes = + make_stacks_transfer(&contract_sk, 5, 0, CHAIN_ID_TESTNET, &other_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -393,7 +419,8 @@ fn mempool_setup_chainstate() { }); // bad nonce - let tx_bytes = make_stacks_transfer(&contract_sk, 0, 200, &other_addr, 1000); + let tx_bytes = + make_stacks_transfer(&contract_sk, 0, 200, CHAIN_ID_TESTNET, &other_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -413,7 +440,14 @@ fn mempool_setup_chainstate() { }); // not enough funds - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 110000, &other_addr, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 110000, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -434,7 +468,14 @@ fn mempool_setup_chainstate() { // sender == recipient let contract_princ = PrincipalData::from(contract_addr.clone()); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 300, &contract_princ, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 300, + CHAIN_ID_TESTNET, + &contract_princ, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -457,7 +498,14 @@ fn mempool_setup_chainstate() { let mut mainnet_recipient = to_addr(&other_sk); mainnet_recipient.version = C32_ADDRESS_VERSION_MAINNET_SINGLESIG; let mainnet_princ = mainnet_recipient.into(); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 300, &mainnet_princ, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 300, + CHAIN_ID_TESTNET, + &mainnet_princ, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -488,6 +536,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 300, + CHAIN_ID_TESTNET, TransactionAnchorMode::OnChainOnly, TransactionVersion::Mainnet, ); @@ -510,7 +559,8 @@ fn mempool_setup_chainstate() { }); // send amount must be positive - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 300, &other_addr, 0); + let tx_bytes = + make_stacks_transfer(&contract_sk, 5, 300, CHAIN_ID_TESTNET, &other_addr, 0); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -530,7 +580,14 @@ fn mempool_setup_chainstate() { }); // not enough funds - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 110000, &other_addr, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 110000, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -549,7 +606,14 @@ fn mempool_setup_chainstate() { false }); - let tx_bytes = make_stacks_transfer(&contract_sk, 5, 99700, &other_addr, 1000); + let tx_bytes = make_stacks_transfer( + &contract_sk, + 5, + 99700, + CHAIN_ID_TESTNET, + &other_addr, + 1000, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -572,6 +636,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &contract_addr, "bar_contract", "bar", @@ -599,6 +664,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &contract_addr, "foo_contract", "foobar", @@ -626,6 +692,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 200, + CHAIN_ID_TESTNET, &contract_addr, "foo_contract", "bar", @@ -649,8 +716,14 @@ fn mempool_setup_chainstate() { false }); - let tx_bytes = - make_contract_publish(&contract_sk, 5, 1000, "foo_contract", FOO_CONTRACT); + let tx_bytes = make_contract_publish( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + "foo_contract", + FOO_CONTRACT, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -685,7 +758,14 @@ fn mempool_setup_chainstate() { signature: MessageSignature([1; 65]), }; - let tx_bytes = make_poison(&contract_sk, 5, 1000, microblock_1, microblock_2); + let tx_bytes = make_poison( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + microblock_1, + microblock_2, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -716,7 +796,14 @@ fn mempool_setup_chainstate() { signature: MessageSignature([0; 65]), }; - let tx_bytes = make_poison(&contract_sk, 5, 1000, microblock_1, microblock_2); + let tx_bytes = make_poison( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + microblock_1, + microblock_2, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -750,7 +837,14 @@ fn mempool_setup_chainstate() { microblock_1.sign(&other_sk).unwrap(); microblock_2.sign(&other_sk).unwrap(); - let tx_bytes = make_poison(&contract_sk, 5, 1000, microblock_1, microblock_2); + let tx_bytes = make_poison( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + microblock_1, + microblock_2, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -765,7 +859,7 @@ fn mempool_setup_chainstate() { eprintln!("Err: {:?}", e); assert!(matches!(e, MemPoolRejection::Other(_))); - let tx_bytes = make_coinbase(&contract_sk, 5, 1000); + let tx_bytes = make_coinbase(&contract_sk, 5, 1000, CHAIN_ID_TESTNET); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -823,7 +917,14 @@ fn mempool_setup_chainstate() { microblock_1.sign(&secret_key).unwrap(); microblock_2.sign(&secret_key).unwrap(); - let tx_bytes = make_poison(&contract_sk, 5, 1000, microblock_1, microblock_2); + let tx_bytes = make_poison( + &contract_sk, + 5, + 1000, + CHAIN_ID_TESTNET, + microblock_1, + microblock_2, + ); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state @@ -848,6 +949,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 250, + CHAIN_ID_TESTNET, &contract_addr, "use-trait-contract", "baz", @@ -875,6 +977,7 @@ fn mempool_setup_chainstate() { &contract_sk, 5, 250, + CHAIN_ID_TESTNET, &contract_addr, "use-trait-contract", "baz", diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index ba88584f39..cfa1653287 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -94,6 +94,7 @@ lazy_static! { .unwrap(), 0, 10, + CHAIN_ID_TESTNET, "store", STORE_CONTRACT ); @@ -134,6 +135,7 @@ pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( sender_nonce: u64, payer_nonce: u64, tx_fee: u64, + chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, ) -> Vec { @@ -144,6 +146,7 @@ pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( sender_nonce, Some(payer_nonce), tx_fee, + chain_id, anchor_mode, version, ) @@ -154,12 +157,14 @@ pub fn serialize_sign_standard_single_sig_tx( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, ) -> Vec { serialize_sign_standard_single_sig_tx_anchor_mode( payload, sender, nonce, tx_fee, + chain_id, TransactionAnchorMode::OnChainOnly, ) } @@ -169,6 +174,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, anchor_mode: TransactionAnchorMode, ) -> Vec { serialize_sign_standard_single_sig_tx_anchor_mode_version( @@ -176,6 +182,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode( sender, nonce, tx_fee, + chain_id, anchor_mode, TransactionVersion::Testnet, ) @@ -186,6 +193,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, ) -> Vec { @@ -196,6 +204,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( nonce, None, tx_fee, + chain_id, anchor_mode, version, ) @@ -208,6 +217,7 @@ pub fn serialize_sign_tx_anchor_mode_version( sender_nonce: u64, payer_nonce: Option, tx_fee: u64, + chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, ) -> Vec { @@ -234,7 +244,7 @@ pub fn serialize_sign_tx_anchor_mode_version( let mut unsigned_tx = StacksTransaction::new(version, auth, payload); unsigned_tx.anchor_mode = anchor_mode; unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; - unsigned_tx.chain_id = CHAIN_ID_TESTNET; + unsigned_tx.chain_id = chain_id; let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); tx_signer.sign_origin(sender).unwrap(); @@ -255,6 +265,7 @@ pub fn make_contract_publish_versioned( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_name: &str, contract_content: &str, version: Option, @@ -265,23 +276,33 @@ pub fn make_contract_publish_versioned( let payload = TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } pub fn make_contract_publish( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_name: &str, contract_content: &str, ) -> Vec { - make_contract_publish_versioned(sender, nonce, tx_fee, contract_name, contract_content, None) + make_contract_publish_versioned( + sender, + nonce, + tx_fee, + chain_id, + contract_name, + contract_content, + None, + ) } pub fn make_contract_publish_microblock_only_versioned( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_name: &str, contract_content: &str, version: Option, @@ -297,6 +318,7 @@ pub fn make_contract_publish_microblock_only_versioned( sender, nonce, tx_fee, + chain_id, TransactionAnchorMode::OffChainOnly, ) } @@ -305,6 +327,7 @@ pub fn make_contract_publish_microblock_only( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_name: &str, contract_content: &str, ) -> Vec { @@ -312,6 +335,7 @@ pub fn make_contract_publish_microblock_only( sender, nonce, tx_fee, + chain_id, contract_name, contract_content, None, @@ -392,12 +416,13 @@ pub fn make_stacks_transfer( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, recipient: &PrincipalData, amount: u64, ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } pub fn make_sponsored_stacks_transfer_on_testnet( @@ -406,6 +431,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( sender_nonce: u64, payer_nonce: u64, tx_fee: u64, + chain_id: u32, recipient: &PrincipalData, amount: u64, ) -> Vec { @@ -418,6 +444,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( sender_nonce, payer_nonce, tx_fee, + chain_id, TransactionAnchorMode::OnChainOnly, TransactionVersion::Testnet, ) @@ -427,6 +454,7 @@ pub fn make_stacks_transfer_mblock_only( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, recipient: &PrincipalData, amount: u64, ) -> Vec { @@ -437,6 +465,7 @@ pub fn make_stacks_transfer_mblock_only( sender, nonce, tx_fee, + chain_id, TransactionAnchorMode::OffChainOnly, ) } @@ -445,22 +474,24 @@ pub fn make_poison( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, header_1: StacksMicroblockHeader, header_2: StacksMicroblockHeader, ) -> Vec { let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } -pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64) -> Vec { +pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } pub fn make_contract_call( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_addr: &StacksAddress, contract_name: &str, function_name: &str, @@ -476,13 +507,14 @@ pub fn make_contract_call( function_args: function_args.iter().map(|x| x.clone()).collect(), }; - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) + serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } pub fn make_contract_call_mblock_only( sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, + chain_id: u32, contract_addr: &StacksAddress, contract_name: &str, function_name: &str, @@ -503,6 +535,7 @@ pub fn make_contract_call_mblock_only( sender, nonce, tx_fee, + chain_id, TransactionAnchorMode::OffChainOnly, ) } @@ -921,7 +954,7 @@ fn should_succeed_handling_malformed_and_valid_txs() { 1 => { // On round 1, publish the KV contract let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let publish_contract = make_contract_publish(&contract_sk, 0, 10, "store", STORE_CONTRACT); + let publish_contract = make_contract_publish(&contract_sk, 0, 10, CHAIN_ID_TESTNET, "store", STORE_CONTRACT); tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,publish_contract, &ExecutionCost::max_value(), &StacksEpochId::Epoch20, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 52ee1383e8..eb76ac3e7a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -78,7 +78,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ use stacks_common::address::AddressHashMode; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; +use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash, @@ -848,7 +848,7 @@ pub fn boot_to_epoch_3( &signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, 12_u128, u128::MAX, 1, @@ -862,6 +862,7 @@ pub fn boot_to_epoch_3( &stacker_sk, 0, 1000, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -924,6 +925,7 @@ pub fn boot_to_epoch_3( signer_sk, 0, 300, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), SIGNERS_VOTING_NAME, SIGNERS_VOTING_FUNCTION_NAME, @@ -1008,7 +1010,7 @@ pub fn boot_to_pre_epoch_3_boundary( &signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, 12_u128, u128::MAX, 1, @@ -1022,6 +1024,7 @@ pub fn boot_to_pre_epoch_3_boundary( &stacker_sk, 0, 1000, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -1084,6 +1087,7 @@ pub fn boot_to_pre_epoch_3_boundary( signer_sk, 0, 300, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), SIGNERS_VOTING_NAME, SIGNERS_VOTING_FUNCTION_NAME, @@ -1245,7 +1249,7 @@ pub fn setup_epoch_3_reward_set( &signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, lock_period, u128::MAX, 1, @@ -1258,6 +1262,7 @@ pub fn setup_epoch_3_reward_set( &stacker_sk, 0, 1000, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -1527,7 +1532,14 @@ fn simple_neon_integration() { } // Submit a TX - let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -1787,7 +1799,14 @@ fn flash_blocks_on_epoch_3() { } // Submit a TX - let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -2025,8 +2044,14 @@ fn mine_multiple_per_tenure_integration() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -2277,8 +2302,14 @@ fn multiple_miners() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); wait_for(20, || { @@ -2484,7 +2515,7 @@ fn correct_burn_outs() { &sender_signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, 1_u128, u128::MAX, 1, @@ -2496,6 +2527,7 @@ fn correct_burn_outs() { &account.0, account.2.nonce, 1000, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -2837,6 +2869,7 @@ fn block_proposal_api_endpoint() { &account_keys[0], 0, 100, + conf.burnchain.chain_id, &to_addr(&account_keys[1]).into(), 10000, ); @@ -3514,7 +3547,7 @@ fn follower_bootup() { &node_info.node_public_key.unwrap(), naka_conf.node.p2p_bind ), - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, PEER_VERSION_TESTNET, ); @@ -3574,8 +3607,14 @@ fn follower_bootup() { let sender_nonce = account .nonce .max(last_nonce.as_ref().map(|ln| *ln + 1).unwrap_or(0)); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); last_nonce = Some(sender_nonce); @@ -3861,7 +3900,7 @@ fn follower_bootup_across_multiple_cycles() { &node_info.node_public_key.unwrap(), naka_conf.node.p2p_bind ), - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, PEER_VERSION_TESTNET, ); @@ -4153,6 +4192,7 @@ fn burn_ops_integration_test() { &signer_sk_1, 1, 500, + naka_conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "set-signer-key-authorization", @@ -4355,8 +4395,14 @@ fn burn_ops_integration_test() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, 200, &stacker_addr_1.into(), 10000); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + 200, + naka_conf.burnchain.chain_id, + &stacker_addr_1.into(), + 10000, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -4766,8 +4812,14 @@ fn forked_tenure_is_ignored() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in Tenure C to mine a second block"); @@ -4965,6 +5017,7 @@ fn check_block_heights() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract0_name, contract_clarity1, ); @@ -5050,6 +5103,7 @@ fn check_block_heights() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract1_name, contract_clarity1, Some(ClarityVersion::Clarity2), @@ -5066,6 +5120,7 @@ fn check_block_heights() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract3_name, contract_clarity3, ); @@ -5173,8 +5228,14 @@ fn check_block_heights() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -5465,8 +5526,14 @@ fn nakamoto_attempt_time() { let mut sender_nonce = account.nonce; for _ in 0..txs_per_block { - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, tx_fee, &recipient, amount); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &recipient, + amount, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); } @@ -5558,8 +5625,14 @@ fn nakamoto_attempt_time() { 'submit_txs: loop { let acct = &mut account[acct_idx]; for _ in 0..MAXIMUM_MEMPOOL_TX_CHAINING { - let transfer_tx = - make_stacks_transfer(&acct.privk, acct.nonce, tx_fee, &recipient, amount); + let transfer_tx = make_stacks_transfer( + &acct.privk, + acct.nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &recipient, + amount, + ); submit_tx(&http_origin, &transfer_tx); tx_total_size += transfer_tx.len(); tx_count += 1; @@ -5709,6 +5782,7 @@ fn clarity_burn_state() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract_name, contract, ); @@ -5740,6 +5814,7 @@ fn clarity_burn_state() { &sender_sk, sender_nonce, tx_fee, + naka_conf.burnchain.chain_id, &sender_addr, contract_name, "bar", @@ -5828,6 +5903,7 @@ fn clarity_burn_state() { &sender_sk, sender_nonce, tx_fee, + naka_conf.burnchain.chain_id, &sender_addr, contract_name, "bar", @@ -6123,8 +6199,14 @@ fn signer_chainstate() { // submit a tx to trigger an intermediate block let sender_nonce = i; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); let timer = Instant::now(); @@ -6638,7 +6720,14 @@ fn continue_tenure_extend() { .unwrap(); // Submit a TX - let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -6870,6 +6959,7 @@ fn check_block_times() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract0_name, contract_clarity1, ); @@ -6913,6 +7003,7 @@ fn check_block_times() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract1_name, contract_clarity1, Some(ClarityVersion::Clarity2), @@ -6930,6 +7021,7 @@ fn check_block_times() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract3_name, contract_clarity3, ); @@ -7025,8 +7117,14 @@ fn check_block_times() { info!("Mining Nakamoto block"); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -7106,8 +7204,14 @@ fn check_block_times() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -7353,6 +7457,7 @@ fn check_block_info() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract0_name, contract_clarity1, ); @@ -7391,6 +7496,7 @@ fn check_block_info() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract1_name, contract_clarity1, Some(ClarityVersion::Clarity2), @@ -7423,6 +7529,7 @@ fn check_block_info() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract3_name, contract_clarity3, ); @@ -7529,8 +7636,14 @@ fn check_block_info() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -7636,8 +7749,14 @@ fn check_block_info() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -7862,6 +7981,7 @@ fn check_block_info_rewards() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract0_name, contract_clarity1, ); @@ -7900,6 +8020,7 @@ fn check_block_info_rewards() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract1_name, contract_clarity1, Some(ClarityVersion::Clarity2), @@ -7925,6 +8046,7 @@ fn check_block_info_rewards() { &sender_sk, sender_nonce, deploy_fee, + naka_conf.burnchain.chain_id, contract3_name, contract_clarity3, ); @@ -7945,8 +8067,14 @@ fn check_block_info_rewards() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -7972,8 +8100,14 @@ fn check_block_info_rewards() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -8234,7 +8368,7 @@ fn mock_mining() { &node_info.node_public_key.unwrap(), naka_conf.node.p2p_bind ), - CHAIN_ID_TESTNET, + naka_conf.burnchain.chain_id, PEER_VERSION_TESTNET, ); @@ -8299,8 +8433,14 @@ fn mock_mining() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra block let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); loop { @@ -8679,8 +8819,14 @@ fn v3_signer_api_endpoint() { .get_stacks_blocks_processed(); // submit a tx so that the miner will mine an extra stacks block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); wait_for(30, || { @@ -8834,6 +8980,7 @@ fn skip_mining_long_tx() { &sender_2_sk, 0, 9_000, + naka_conf.burnchain.chain_id, "large_contract", &format!( "(define-constant INP_LIST (list {input_list})) @@ -8857,8 +9004,14 @@ fn skip_mining_long_tx() { TEST_SKIP_P2P_BROADCAST.lock().unwrap().replace(false); } else { - let transfer_tx = - make_stacks_transfer(&sender_1_sk, i - 1, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_1_sk, + i - 1, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); wait_for(30, || { diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 14ec15447f..d6373a3b44 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1517,6 +1517,7 @@ fn deep_contract() { &spender_sk, 0, 1000, + conf.burnchain.chain_id, "test-publish", &exceeds_stack_depth_list, ); @@ -1694,11 +1695,25 @@ fn liquid_ustx_integration() { let _sort_height = channel.get_sortitions_processed(); - let publish = make_contract_publish(&spender_sk, 0, 1000, "caller", caller_src); + let publish = make_contract_publish( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ); let replaced_txid = submit_tx(&http_origin, &publish); - let publish = make_contract_publish(&spender_sk, 0, 1100, "caller", caller_src); + let publish = make_contract_publish( + &spender_sk, + 0, + 1100, + conf.burnchain.chain_id, + "caller", + caller_src, + ); submit_tx(&http_origin, &publish); let dropped_txs = test_observer::get_memtx_drops(); @@ -1715,6 +1730,7 @@ fn liquid_ustx_integration() { &spender_sk, 1, 1000, + conf.burnchain.chain_id, &spender_addr, "caller", "execute", @@ -2274,6 +2290,7 @@ fn stx_delegate_btc_integration_test() { &recipient_sk, 0, 293, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "delegate-stack-stx", @@ -2562,6 +2579,7 @@ fn stack_stx_burn_op_test() { &signer_sk_1, 0, 500, + conf.burnchain.chain_id, &boot_code_addr(false), POX_4_NAME, "set-signer-key-authorization", @@ -2923,6 +2941,7 @@ fn vote_for_aggregate_key_burn_op_test() { &spender_sk, 0, 500, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-4", "stack-stx", @@ -3145,6 +3164,7 @@ fn bitcoind_resubmission_test() { &spender_sk, 0, 100, + conf.burnchain.chain_id, &PrincipalData::from(StacksAddress::burn_address(false)), 1000, ); @@ -3488,12 +3508,24 @@ fn microblock_fork_poison_integration_test() { info!("Test microblock"); let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let unconfirmed_tx_bytes = - make_stacks_transfer_mblock_only(&spender_sk, 0, 1000, &recipient.into(), 1000); + let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); let unconfirmed_tx = StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); - let second_unconfirmed_tx_bytes = - make_stacks_transfer_mblock_only(&second_spender_sk, 0, 1000, &recipient.into(), 1500); + let second_unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( + &second_spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1500, + ); let second_unconfirmed_tx = StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); @@ -3722,7 +3754,14 @@ fn microblock_integration_test() { // okay, let's push a transaction that is marked microblock only! let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer_mblock_only(&spender_sk, 0, 1000, &recipient.into(), 1000); + let tx = make_stacks_transfer_mblock_only( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); submit_tx(&http_origin, &tx); info!("Try to mine a microblock-only tx"); @@ -3752,12 +3791,24 @@ fn microblock_integration_test() { // push another two transactions that are marked microblock only let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let unconfirmed_tx_bytes = - make_stacks_transfer_mblock_only(&spender_sk, 1, 1000, &recipient.into(), 1000); + let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); let unconfirmed_tx = StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); - let second_unconfirmed_tx_bytes = - make_stacks_transfer_mblock_only(&second_spender_sk, 0, 1000, &recipient.into(), 1500); + let second_unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( + &second_spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1500, + ); let second_unconfirmed_tx = StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); @@ -4080,6 +4131,7 @@ fn microblock_integration_test() { &spender_sk, next_nonce, 1000, + conf.burnchain.chain_id, &recipient.into(), 1000, ); @@ -4163,6 +4215,14 @@ fn filter_low_fee_tx_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let (mut conf, _) = neon_integration_test_conf(); + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } + let txs: Vec<_> = spender_sks .iter() .enumerate() @@ -4171,22 +4231,28 @@ fn filter_low_fee_tx_integration_test() { if ix < 5 { // low-fee - make_stacks_transfer(&spender_sk, 0, 1000 + (ix as u64), &recipient.into(), 1000) + make_stacks_transfer( + &spender_sk, + 0, + 1000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) } else { // high-fee - make_stacks_transfer(&spender_sk, 0, 2000 + (ix as u64), &recipient.into(), 1000) + make_stacks_transfer( + &spender_sk, + 0, + 2000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) } }) .collect(); - let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -4257,15 +4323,6 @@ fn filter_long_runtime_tx_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let txs: Vec<_> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - make_stacks_transfer(&spender_sk, 0, 1000 + (ix as u64), &recipient.into(), 1000) - }) - .collect(); - let (mut conf, _) = neon_integration_test_conf(); for spender_addr in spender_addrs.iter() { conf.initial_balances.push(InitialBalance { @@ -4278,6 +4335,22 @@ fn filter_long_runtime_tx_integration_test() { conf.miner.first_attempt_time_ms = 0; conf.miner.subsequent_attempt_time_ms = 0; + let txs: Vec<_> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + make_stacks_transfer( + &spender_sk, + 0, + 1000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) + }) + .collect(); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -4343,8 +4416,6 @@ fn miner_submit_twice() { (define-private (bar) (foo 56)) "; - let tx_1 = make_contract_publish(&spender_sk, 0, 50_000, "first-contract", contract_content); - let tx_2 = make_contract_publish(&spender_sk, 1, 50_000, "second-contract", contract_content); let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { @@ -4357,6 +4428,23 @@ fn miner_submit_twice() { conf.miner.first_attempt_time_ms = 20; conf.miner.subsequent_attempt_time_ms = 30_000; + let tx_1 = make_contract_publish( + &spender_sk, + 0, + 50_000, + conf.burnchain.chain_id, + "first-contract", + contract_content, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 50_000, + conf.burnchain.chain_id, + "second-contract", + contract_content, + ); + // note: this test depends on timing of how long it takes to assemble a block, // but it won't flake if the miner behaves correctly: a correct miner should // always be able to mine both transactions by the end of this test. an incorrect @@ -4435,18 +4523,28 @@ fn size_check_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let (mut conf, miner_account) = neon_integration_test_conf(); + // make a bunch of txs that will only fit one per block. let txs: Vec<_> = spender_sks .iter() .enumerate() .map(|(ix, spender_sk)| { if ix % 2 == 0 { - make_contract_publish(spender_sk, 0, 1049230, "large-0", &giant_contract) + make_contract_publish( + spender_sk, + 0, + 1049230, + conf.burnchain.chain_id, + "large-0", + &giant_contract, + ) } else { let tx = make_contract_publish_microblock_only( spender_sk, 0, 1049230, + conf.burnchain.chain_id, "large-0", &giant_contract, ); @@ -4457,8 +4555,6 @@ fn size_check_integration_test() { }) .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), @@ -4603,6 +4699,8 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let (mut conf, miner_account) = neon_integration_test_conf(); + let txs: Vec> = spender_sks .iter() .enumerate() @@ -4613,6 +4711,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { spender_sk, 0, 1100000, + conf.burnchain.chain_id, "large-0", &giant_contract, )] @@ -4623,6 +4722,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { spender_sk, i as u64, 1100000, + conf.burnchain.chain_id, &format!("small-{}", i), &small_contract, ); @@ -4633,8 +4733,6 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { }) .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), @@ -4810,20 +4908,6 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let txs: Vec<_> = spender_sks - .iter() - .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( - spender_sk, - 0, - 600000, - "small", - &small_contract, - ); - tx - }) - .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); for spender_addr in spender_addrs.iter() { @@ -4843,6 +4927,21 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let txs: Vec<_> = spender_sks + .iter() + .map(|spender_sk| { + let tx = make_contract_publish_microblock_only( + spender_sk, + 0, + 600000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + tx + }) + .collect(); + test_observer::spawn(); test_observer::register_any(&mut conf); @@ -4997,20 +5096,6 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { .collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let txs: Vec> = spender_sks - .iter() - .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( - spender_sk, - 0, - 1149230, - "small", - &small_contract, - ); - tx - }) - .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); for spender_addr in spender_addrs.iter() { @@ -5027,6 +5112,21 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { conf.node.max_microblocks = 65536; conf.burnchain.max_rbf = 1000000; + let txs: Vec> = spender_sks + .iter() + .map(|spender_sk| { + let tx = make_contract_publish_microblock_only( + spender_sk, + 0, + 1149230, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + tx + }) + .collect(); + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; conf.burnchain.epochs = Some(epochs); @@ -5160,6 +5260,27 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { let spender_addrs_c32: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let (mut conf, miner_account) = neon_integration_test_conf(); + + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } + + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 15000; + conf.miner.microblock_attempt_time_ms = 120_000; + + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; + conf.burnchain.epochs = Some(epochs); + let txs: Vec> = spender_sks .iter() .enumerate() @@ -5170,6 +5291,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { spender_sk, 0, 1049230, + conf.burnchain.chain_id, &format!("large-{}", ix), &format!(" ;; a single one of these transactions consumes over half the runtime budget @@ -5224,6 +5346,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { spender_sk, i as u64, 210000, + conf.burnchain.chain_id, &format!("small-{}-{}", ix, i), &format!(" ;; a single one of these transactions consumes over half the runtime budget @@ -5276,27 +5399,6 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { }) .collect(); - let (mut conf, miner_account) = neon_integration_test_conf(); - - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 15000; - conf.miner.microblock_attempt_time_ms = 120_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; - conf.burnchain.epochs = Some(epochs); - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -5514,7 +5616,14 @@ fn block_replay_integration_test() { assert_eq!(account.nonce, 0); let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer(&spender_sk, 0, 1000, &recipient.into(), 1000); + let tx = make_stacks_transfer( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); submit_tx(&http_origin, &tx); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -5652,9 +5761,30 @@ fn cost_voting_integration() { assert_eq!(res.nonce, 0); let transactions = vec![ - make_contract_publish(&spender_sk, 0, 1000, "cost-definer", cost_definer_src), - make_contract_publish(&spender_sk, 1, 1000, "caller", caller_src), - make_contract_publish(&spender_sk, 2, 1000, "voter", power_vote_src), + make_contract_publish( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + "cost-definer", + cost_definer_src, + ), + make_contract_publish( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ), + make_contract_publish( + &spender_sk, + 2, + 1000, + conf.burnchain.chain_id, + "voter", + power_vote_src, + ), ]; for tx in transactions.into_iter() { @@ -5668,6 +5798,7 @@ fn cost_voting_integration() { &spender_sk, 3, 1000, + conf.burnchain.chain_id, &spender_addr, "voter", "propose-vote-confirm", @@ -5678,6 +5809,7 @@ fn cost_voting_integration() { &spender_sk, 4, 1000, + conf.burnchain.chain_id, &spender_addr, "caller", "execute-2", @@ -5729,6 +5861,7 @@ fn cost_voting_integration() { &spender_sk, 5, 1000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "cost-voting", "confirm-miners", @@ -5779,6 +5912,7 @@ fn cost_voting_integration() { &spender_sk, 6, 1000, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "cost-voting", "confirm-miners", @@ -5823,6 +5957,7 @@ fn cost_voting_integration() { &spender_sk, 7, 1000, + conf.burnchain.chain_id, &spender_addr, "caller", "execute-2", @@ -5884,11 +6019,6 @@ fn mining_events_integration_test() { let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let addr_2 = to_addr(&spender_sk_2); - let tx = make_contract_publish(&spender_sk, 0, 600000, "small", &small_contract); - let tx_2 = make_contract_publish(&spender_sk, 1, 610000, "small", &small_contract); - let mb_tx = - make_contract_publish_microblock_only(&spender_sk_2, 0, 620000, "small", &small_contract); - let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { @@ -5907,6 +6037,31 @@ fn mining_events_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let tx = make_contract_publish( + &spender_sk, + 0, + 600000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 610000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + let mb_tx = make_contract_publish_microblock_only( + &spender_sk_2, + 0, + 620000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + test_observer::spawn(); test_observer::register( &mut conf, @@ -6146,15 +6301,6 @@ fn block_limit_hit_integration_test() { let third_spender_sk = StacksPrivateKey::new(); let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); - // included in first block - let tx = make_contract_publish(&spender_sk, 0, 555_000, "over", &oversize_contract_src); - // contract limit hit; included in second block - let tx_2 = make_contract_publish(&spender_sk, 1, 555_000, "over-2", &oversize_contract_src); - // skipped over since contract limit was hit; included in second block - let tx_3 = make_contract_publish(&second_spender_sk, 0, 150_000, "max", &max_contract_src); - // included in first block - let tx_4 = make_stacks_transfer(&third_spender_sk, 0, 180, &PrincipalData::from(addr), 100); - let (mut conf, _miner_account) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { @@ -6177,6 +6323,43 @@ fn block_limit_hit_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + // included in first block + let tx = make_contract_publish( + &spender_sk, + 0, + 555_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + // contract limit hit; included in second block + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 555_000, + conf.burnchain.chain_id, + "over-2", + &oversize_contract_src, + ); + // skipped over since contract limit was hit; included in second block + let tx_3 = make_contract_publish( + &second_spender_sk, + 0, + 150_000, + conf.burnchain.chain_id, + "max", + &max_contract_src, + ); + // included in first block + let tx_4 = make_stacks_transfer( + &third_spender_sk, + 0, + 180, + conf.burnchain.chain_id, + &PrincipalData::from(addr), + 100, + ); + test_observer::spawn(); test_observer::register_any(&mut conf); @@ -6330,39 +6513,6 @@ fn microblock_limit_hit_integration_test() { let third_spender_sk = StacksPrivateKey::new(); let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); - // included in the first block - let tx = make_contract_publish_microblock_only( - &spender_sk, - 0, - 555_000, - "over", - &oversize_contract_src, - ); - // contract limit hit; included in second block - let tx_2 = make_contract_publish_microblock_only( - &spender_sk, - 1, - 555_000, - "over-2", - &oversize_contract_src, - ); - // skipped over since contract limit was hit; included in second block - let tx_3 = make_contract_publish_microblock_only( - &second_spender_sk, - 0, - 150_000, - "max", - &max_contract_src, - ); - // included in first block - let tx_4 = make_stacks_transfer_mblock_only( - &third_spender_sk, - 0, - 180, - &PrincipalData::from(addr), - 100, - ); - let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { @@ -6428,6 +6578,43 @@ fn microblock_limit_hit_integration_test() { ]); conf.burnchain.pox_2_activation = Some(10_003); + // included in the first block + let tx = make_contract_publish_microblock_only( + &spender_sk, + 0, + 555_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + // contract limit hit; included in second block + let tx_2 = make_contract_publish_microblock_only( + &spender_sk, + 1, + 555_000, + conf.burnchain.chain_id, + "over-2", + &oversize_contract_src, + ); + // skipped over since contract limit was hit; included in second block + let tx_3 = make_contract_publish_microblock_only( + &second_spender_sk, + 0, + 150_000, + conf.burnchain.chain_id, + "max", + &max_contract_src, + ); + // included in first block + let tx_4 = make_stacks_transfer_mblock_only( + &third_spender_sk, + 0, + 180, + conf.burnchain.chain_id, + &PrincipalData::from(addr), + 100, + ); + test_observer::spawn(); test_observer::register_any(&mut conf); @@ -6569,10 +6756,6 @@ fn block_large_tx_integration_test() { let spender_sk = StacksPrivateKey::new(); let spender_addr = to_addr(&spender_sk); - // higher fee for tx means it will get mined first - let tx = make_contract_publish(&spender_sk, 0, 671_000, "small", &small_contract_src); - let tx_2 = make_contract_publish(&spender_sk, 1, 670_000, "over", &oversize_contract_src); - let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); test_observer::register_any(&mut conf); @@ -6593,6 +6776,24 @@ fn block_large_tx_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + // higher fee for tx means it will get mined first + let tx = make_contract_publish( + &spender_sk, + 0, + 671_000, + conf.burnchain.chain_id, + "small", + &small_contract_src, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 670_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -6691,21 +6892,6 @@ fn microblock_large_tx_integration_test_FLAKY() { let spender_sk = StacksPrivateKey::new(); let addr = to_addr(&spender_sk); - let tx = make_contract_publish_microblock_only( - &spender_sk, - 0, - 150_000, - "small", - &small_contract_src, - ); - let tx_2 = make_contract_publish_microblock_only( - &spender_sk, - 1, - 670_000, - "over", - &oversize_contract_src, - ); - let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); @@ -6728,6 +6914,23 @@ fn microblock_large_tx_integration_test_FLAKY() { conf.burnchain.max_rbf = 10_000_000; conf.node.wait_time_for_blocks = 1_000; + let tx = make_contract_publish_microblock_only( + &spender_sk, + 0, + 150_000, + conf.burnchain.chain_id, + "small", + &small_contract_src, + ); + let tx_2 = make_contract_publish_microblock_only( + &spender_sk, + 1, + 670_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -6981,6 +7184,7 @@ fn pox_integration_test() { &spender_sk, 0, 260, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -7096,6 +7300,7 @@ fn pox_integration_test() { &spender_2_sk, 0, 260, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -7119,6 +7324,7 @@ fn pox_integration_test() { &spender_3_sk, 0, 260, + conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -7461,6 +7667,7 @@ fn atlas_integration_test() { &user_1, 0, 260, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-preorder", @@ -7520,6 +7727,7 @@ fn atlas_integration_test() { &user_1, 1, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-reveal", @@ -7582,6 +7790,7 @@ fn atlas_integration_test() { &user_1, 2, 500, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-import", @@ -7699,6 +7908,7 @@ fn atlas_integration_test() { }; let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); + let chain_id = conf_follower_node.burnchain.chain_id; let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); eprintln!("Chain bootstrapped..."); @@ -7777,6 +7987,7 @@ fn atlas_integration_test() { &user_1, 2 + i, 500, + chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-import", @@ -8245,6 +8456,7 @@ fn atlas_stress_integration_test() { &user_1, 0, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-preorder", @@ -8304,6 +8516,7 @@ fn atlas_stress_integration_test() { &user_1, 1, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-reveal", @@ -8392,6 +8605,7 @@ fn atlas_stress_integration_test() { &user_1, 2 + (batch_size * i + j) as u64, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-import", @@ -8461,6 +8675,7 @@ fn atlas_stress_integration_test() { &user_1, 2 + (batch_size as u64) * (batches as u64), 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "namespace-ready", @@ -8521,6 +8736,7 @@ fn atlas_stress_integration_test() { &users[batches * batch_size + j], 0, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-preorder", @@ -8580,6 +8796,7 @@ fn atlas_stress_integration_test() { &users[batches * batch_size + j], 1, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-register", @@ -8643,6 +8860,7 @@ fn atlas_stress_integration_test() { &users[batches * batch_size + j], 2, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-update", @@ -8705,6 +8923,7 @@ fn atlas_stress_integration_test() { &users[batches * batch_size + j], 3, 1000, + conf_bootstrap_node.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "bns", "name-renewal", @@ -8960,6 +9179,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value &spender_sk, 0, 110000, + conf.burnchain.chain_id, "increment-contract", &max_contract_src, ), @@ -8977,6 +9197,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value &spender_sk, i, // nonce i * 100000, // payment + conf.burnchain.chain_id, &spender_addr.into(), "increment-contract", "increment-many", @@ -9172,15 +9393,27 @@ fn use_latest_tip_integration_test() { // Make microblock with two transactions. let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let transfer_tx = - make_stacks_transfer_mblock_only(&spender_sk, 0, 1000, &recipient.into(), 1000); + let transfer_tx = make_stacks_transfer_mblock_only( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); let caller_src = " (define-public (execute) (ok stx-liquid-supply)) "; - let publish_tx = - make_contract_publish_microblock_only(&spender_sk, 1, 1000, "caller", caller_src); + let publish_tx = make_contract_publish_microblock_only( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ); let tx_1 = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); let tx_2 = StacksTransaction::consensus_deserialize(&mut &publish_tx[..]).unwrap(); @@ -9527,6 +9760,7 @@ fn test_problematic_txs_are_not_stored() { &spender_sk_1, 0, (tx_edge_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-edge", &tx_edge_body, ); @@ -9544,6 +9778,7 @@ fn test_problematic_txs_are_not_stored() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -9561,6 +9796,7 @@ fn test_problematic_txs_are_not_stored() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -9770,6 +10006,7 @@ fn test_problematic_blocks_are_not_mined() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -9787,6 +10024,7 @@ fn test_problematic_blocks_are_not_mined() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -10123,6 +10361,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -10139,6 +10378,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -10518,6 +10758,7 @@ fn test_problematic_microblocks_are_not_mined() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -10536,6 +10777,7 @@ fn test_problematic_microblocks_are_not_mined() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -10898,6 +11140,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-exceeds", &tx_exceeds_body, ); @@ -10916,6 +11159,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, + conf.burnchain.chain_id, "test-high", &tx_high_body, ); @@ -11389,6 +11633,7 @@ enum TxChainStrategy { pub fn make_expensive_tx_chain( privk: &StacksPrivateKey, fee_plus: u64, + chain_id: u32, mblock_only: bool, ) -> Vec> { let addr = to_addr(&privk); @@ -11403,6 +11648,7 @@ pub fn make_expensive_tx_chain( privk, nonce, 1049230 + nonce + fee_plus, + chain_id, &contract_name, &make_runtime_sized_contract(256, nonce, &addr_prefix), ) @@ -11411,6 +11657,7 @@ pub fn make_expensive_tx_chain( privk, nonce, 1049230 + nonce + fee_plus, + chain_id, &contract_name, &make_runtime_sized_contract(256, nonce, &addr_prefix), ) @@ -11423,6 +11670,7 @@ pub fn make_expensive_tx_chain( pub fn make_random_tx_chain( privk: &StacksPrivateKey, fee_plus: u64, + chain_id: u32, mblock_only: bool, ) -> Vec> { let addr = to_addr(&privk); @@ -11448,6 +11696,7 @@ pub fn make_random_tx_chain( privk, nonce, 1049230 + nonce + fee_plus + random_extra_fee, + chain_id, &contract_name, &make_runtime_sized_contract(random_iters, nonce, &addr_prefix), ) @@ -11456,6 +11705,7 @@ pub fn make_random_tx_chain( privk, nonce, 1049230 + nonce + fee_plus + random_extra_fee, + chain_id, &contract_name, &make_runtime_sized_contract(random_iters, nonce, &addr_prefix), ) @@ -11465,7 +11715,7 @@ pub fn make_random_tx_chain( chain } -fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64) -> Vec> { +fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { let addr = to_addr(&privk); let mut chain = vec![]; @@ -11488,6 +11738,7 @@ fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64) -> Vec> privk, nonce, 1049230 + nonce + fee_plus + random_extra_fee, + chain_id, &contract_name, &make_runtime_sized_contract(1, nonce, &addr_prefix), ); @@ -11547,6 +11798,8 @@ fn test_competing_miners_build_on_same_chain( confs.push(conf); } + let chain_id = confs[0].burnchain.chain_id; + let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); for i in 1..num_miners { let chain_id = confs[0].burnchain.chain_id; @@ -11674,8 +11927,12 @@ fn test_competing_miners_build_on_same_chain( .iter() .enumerate() .map(|(i, pk)| match chain_strategy { - TxChainStrategy::Expensive => make_expensive_tx_chain(pk, (25 * i) as u64, mblock_only), - TxChainStrategy::Random => make_random_tx_chain(pk, (25 * i) as u64, mblock_only), + TxChainStrategy::Expensive => { + make_expensive_tx_chain(pk, (25 * i) as u64, chain_id, mblock_only) + } + TxChainStrategy::Random => { + make_random_tx_chain(pk, (25 * i) as u64, chain_id, mblock_only) + } }) .collect(); let mut cnt = 0; @@ -11755,6 +12012,7 @@ fn test_competing_miners_build_anchor_blocks_and_microblocks_on_same_chain() { #[ignore] fn microblock_miner_multiple_attempts() { let (mut conf, miner_account) = neon_integration_test_conf(); + let chain_id = conf.burnchain.chain_id; conf.node.mine_microblocks = true; conf.miner.microblock_attempt_time_ms = 2_000; @@ -11823,7 +12081,7 @@ fn microblock_miner_multiple_attempts() { let all_txs: Vec<_> = privks .iter() .enumerate() - .map(|(i, pk)| make_mblock_tx_chain(pk, (25 * i) as u64)) + .map(|(i, pk)| make_mblock_tx_chain(pk, (25 * i) as u64, chain_id)) .collect(); let _handle = thread::spawn(move || { @@ -11923,6 +12181,7 @@ fn min_txs() { &spender_sk, i as u64, 1000, + conf.burnchain.chain_id, &format!("test-publish-{}", &i), &code, ); @@ -12026,6 +12285,7 @@ fn filter_txs_by_type() { &spender_sk, i as u64, 1000, + conf.burnchain.chain_id, &format!("test-publish-{}", &i), &code, ); @@ -12136,6 +12396,7 @@ fn filter_txs_by_origin() { &spender_sk, i as u64, 1000, + conf.burnchain.chain_id, &format!("test-publish-{}", &i), &code, ); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8c48eda5e8..347c7c6c58 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -139,6 +139,7 @@ impl SignerTest { &stacker_sk, 0, 1000, + self.running_nodes.conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -1140,8 +1141,14 @@ fn forked_tenure_testing( let start_time = Instant::now(); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in Tenure C to mine a second block"); while mined_blocks.load(Ordering::SeqCst) <= blocks_before { @@ -2083,8 +2090,14 @@ fn end_of_tenure() { let start_height = info.stacks_tip_height; // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); @@ -2194,8 +2207,14 @@ fn retry_on_rejection() { let start_time = Instant::now(); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine the first Nakamoto block"); @@ -2231,8 +2250,14 @@ fn retry_on_rejection() { .load(Ordering::SeqCst); // submit a tx so that the miner will mine a block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); @@ -2338,8 +2363,14 @@ fn signers_broadcast_signed_blocks() { // submit a tx so that the miner will mine a blockn let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); debug!("Transaction sent; waiting for block-mining"); @@ -2484,8 +2515,14 @@ fn empty_sortition() { // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); @@ -3040,8 +3077,14 @@ fn signer_set_rollover() { info!("---- Mining a block to trigger the signer set -----"); // submit a tx so that the miner will mine an extra block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); signer_test.mine_nakamoto_block(short_timeout); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); @@ -3091,6 +3134,7 @@ fn signer_set_rollover() { &stacker_sk, 0, 1000, + signer_test.running_nodes.conf.burnchain.chain_id, &StacksAddress::burn_address(false), "pox-4", "stack-stx", @@ -3163,8 +3207,14 @@ fn signer_set_rollover() { info!("---- Mining a block to verify new signer set -----"); let sender_nonce = 1; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); signer_test.mine_nakamoto_block(short_timeout); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); @@ -3248,8 +3298,14 @@ fn min_gap_between_blocks() { // Submit a tx so that the miner will mine a block let sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block to be processed. Ensure it does not arrive before the gap is exceeded"); @@ -3574,8 +3630,14 @@ fn multiple_miners_with_nakamoto_blocks() { let blocks_processed_before = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); // submit a tx so that the miner will mine an extra block - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); @@ -3915,8 +3977,14 @@ fn partial_tenure_fork() { // submit a tx so that the miner will mine an extra block let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); // This may fail if the forking miner wins too many tenures and this account's // nonces get too high (TooMuchChaining) match submit_tx_fallible(&http_origin, &transfer_tx) { @@ -4100,8 +4168,14 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let blocks_before = mined_blocks.load(Ordering::SeqCst); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); wait_for(short_timeout_secs, || { @@ -4143,8 +4217,14 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} to mine block N+1"); @@ -4171,8 +4251,14 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(Vec::new()); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} to mine block N+1'"); @@ -4271,8 +4357,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { // submit a tx so that the miner will mine a stacks block N let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); @@ -4325,8 +4417,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine block N+1"); @@ -4381,8 +4479,14 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { .replace(Vec::new()); // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N+2"); wait_for(30, || { @@ -4474,8 +4578,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); @@ -4516,8 +4626,14 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { test_observer::clear(); // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); @@ -4684,8 +4800,14 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to mine block N"); @@ -4727,8 +4849,14 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .get_peer_info() .expect("Failed to get peer info"); - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); sender_nonce += 1; let tx = submit_tx(&http_origin, &transfer_tx); @@ -4890,8 +5018,14 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } // Induce block N+2 to get mined - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+2"); diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index f7089c3f33..aa620d349b 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -206,7 +206,14 @@ fn test_stackerdb_load_store() { let http_origin = format!("http://{}", &conf.node.rpc_bind); eprintln!("Send contract-publish..."); - let tx = make_contract_publish(&privks[0], 0, 10_000, "hello-world", stackerdb_contract); + let tx = make_contract_publish( + &privks[0], + 0, + 10_000, + conf.burnchain.chain_id, + "hello-world", + stackerdb_contract, + ); submit_tx(&http_origin, &tx); // mine it @@ -336,7 +343,14 @@ fn test_stackerdb_event_observer() { let http_origin = format!("http://{}", &conf.node.rpc_bind); eprintln!("Send contract-publish..."); - let tx = make_contract_publish(&privks[0], 0, 10_000, "hello-world", stackerdb_contract); + let tx = make_contract_publish( + &privks[0], + 0, + 10_000, + conf.burnchain.chain_id, + "hello-world", + stackerdb_contract, + ); submit_tx(&http_origin, &tx); // mine it From e6d54fbe3193d05d809d82594d4e13e229e0c5cd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 11 Oct 2024 14:25:02 -0400 Subject: [PATCH 1321/1400] test: add integration test verifying `chain_id` config --- .../src/tests/nakamoto_integrations.rs | 332 ++++++++++++++++++ 1 file changed, 332 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index eb76ac3e7a..4b33112115 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3958,6 +3958,338 @@ fn follower_bootup_across_multiple_cycles() { follower_thread.join().unwrap(); } +/// Boot up a node and a follower with a non-default chain id +#[test] +#[ignore] +fn follower_bootup_custom_chain_id() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.burnchain.chain_id = 0x87654321; + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let tenure_count = 5; + let inter_blocks_per_tenure = 9; + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr.clone()).to_string(), + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, + ); + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr.clone()).to_string(), + 100000, + ); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + let mut follower_conf = naka_conf.clone(); + follower_conf.node.miner = false; + follower_conf.events_observers.clear(); + follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); + follower_conf.node.seed = vec![0x01; 32]; + follower_conf.node.local_peer_seed = vec![0x02; 32]; + + let rpc_port = gen_random_port(); + let p2p_port = gen_random_port(); + + let localhost = "127.0.0.1"; + follower_conf.node.rpc_bind = format!("{localhost}:{rpc_port}"); + follower_conf.node.p2p_bind = format!("{localhost}:{p2p_port}"); + follower_conf.node.data_url = format!("http://{localhost}:{rpc_port}"); + follower_conf.node.p2p_address = format!("{localhost}:{p2p_port}"); + follower_conf.node.pox_sync_sample_secs = 30; + + let node_info = get_chain_info(&naka_conf); + follower_conf.node.add_bootstrap_node( + &format!( + "{}@{}", + &node_info.node_public_key.unwrap(), + naka_conf.node.p2p_bind + ), + naka_conf.burnchain.chain_id, + PEER_VERSION_TESTNET, + ); + + let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap(); + let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); + let follower_coord_channel = follower_run_loop.coordinator_channels(); + + debug!( + "Booting follower-thread ({},{})", + &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind + ); + debug!( + "Booting follower-thread: neighbors = {:?}", + &follower_conf.node.bootstrap_node + ); + + // spawn a follower thread + let follower_thread = thread::Builder::new() + .name("follower-thread".into()) + .spawn(move || follower_run_loop.start(None, 0)) + .unwrap(); + + debug!("Booted follower-thread"); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); + + let mut last_tip = BlockHeaderHash([0x00; 32]); + let mut last_nonce = None; + + debug!( + "follower_bootup: Miner mines interum blocks for tenure {}", + tenure_ix + ); + + // mine the interim blocks + for _ in 0..inter_blocks_per_tenure { + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + let account = loop { + // submit a tx so that the miner will mine an extra block + let Ok(account) = get_account_result(&http_origin, &sender_addr) else { + debug!("follower_bootup: Failed to load miner account"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + break account; + }; + + let sender_nonce = account + .nonce + .max(last_nonce.as_ref().map(|ln| *ln + 1).unwrap_or(0)); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + last_nonce = Some(sender_nonce); + + let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); + + debug!("follower_bootup: Miner account: {:?}", &account); + debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + + let now = get_epoch_time_secs(); + while get_epoch_time_secs() < now + 10 { + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: Could not get miner chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + let Ok(follower_info) = get_chain_info_result(&follower_conf) else { + debug!("follower_bootup: Could not get follower chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + if follower_info.burn_block_height < info.burn_block_height { + debug!("follower_bootup: Follower is behind miner's burnchain view"); + thread::sleep(Duration::from_millis(100)); + continue; + } + + if info.stacks_tip == last_tip { + debug!( + "follower_bootup: Miner stacks tip hasn't changed ({})", + &info.stacks_tip + ); + thread::sleep(Duration::from_millis(100)); + continue; + } + + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + if blocks_processed > blocks_processed_before { + break; + } + + debug!("follower_bootup: No blocks processed yet"); + thread::sleep(Duration::from_millis(100)); + } + + // compare chain tips + loop { + let Ok(info) = get_chain_info_result(&naka_conf) else { + debug!("follower_bootup: failed to load tip info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + + let Ok(follower_info) = get_chain_info_result(&follower_conf) else { + debug!("follower_bootup: Could not get follower chain info"); + thread::sleep(Duration::from_millis(100)); + continue; + }; + if info.stacks_tip == follower_info.stacks_tip { + debug!( + "follower_bootup: Follower has advanced to miner's tip {}", + &info.stacks_tip + ); + } else { + debug!( + "follower_bootup: Follower has NOT advanced to miner's tip: {} != {}", + &info.stacks_tip, follower_info.stacks_tip + ); + } + + last_tip = info.stacks_tip; + break; + } + } + + debug!("follower_bootup: Wait for next block-commit"); + let start_time = Instant::now(); + while commits_submitted.load(Ordering::SeqCst) <= commits_before { + if start_time.elapsed() >= Duration::from_secs(20) { + panic!("Timed out waiting for block-commit"); + } + thread::sleep(Duration::from_millis(100)); + } + debug!("follower_bootup: Block commit submitted"); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + assert_eq!( + tip.stacks_block_height, + block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), + "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" + ); + + // wait for follower to reach the chain tip + loop { + sleep_ms(1000); + let follower_node_info = get_chain_info(&follower_conf); + + info!( + "Follower tip is now {}/{}", + &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip + ); + if follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash + && follower_node_info.stacks_tip == tip.anchored_header.block_hash() + { + break; + } + } + + // Verify both nodes have the correct chain id + let miner_info = get_chain_info(&naka_conf); + assert_eq!(miner_info.network_id, 0x87654321); + + let follower_info = get_chain_info(&follower_conf); + assert_eq!(follower_info.network_id, 0x87654321); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + follower_coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + follower_run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); + follower_thread.join().unwrap(); +} + #[test] #[ignore] /// Test out various burn operations being processed in Nakamoto. From 7b238b5a5217d19a9e50a0f0206679cf5ce98198 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 11 Oct 2024 15:04:42 -0400 Subject: [PATCH 1322/1400] chore: address PR feedback and fix failing unit test --- stackslib/src/net/stackerdb/sync.rs | 36 ++++++++++++----------- stackslib/src/net/stackerdb/tests/sync.rs | 9 +++++- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index d6610c20fc..237f582d26 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -226,24 +226,29 @@ impl StackerDBSync { if self.last_eviction_time + 60 < get_epoch_time_secs() { self.last_eviction_time = get_epoch_time_secs(); if self.replicas.len() > 0 { - eviction_index = Some(thread_rng().gen::() % self.replicas.len()); + eviction_index = Some(thread_rng().gen_range(0..self.replicas.len())); } } - let mut remove_naddr = None; - for (i, naddr) in self.replicas.iter().enumerate() { - if let Some(eviction_index) = eviction_index.as_ref() { - if *eviction_index == i { - debug!( - "{:?}: {}: don't reuse connection for replica {:?}", - network.get_local_peer(), - &self.smart_contract_id, - &naddr, - ); - remove_naddr = Some(naddr.clone()); - continue; - } + let remove_naddr = eviction_index.and_then(|idx| { + let removed = self.replicas.iter().nth(idx).cloned(); + if let Some(naddr) = removed.as_ref() { + debug!( + "{:?}: {}: don't reuse connection for replica {:?}", + network.get_local_peer(), + &self.smart_contract_id, + &naddr, + ); } + removed + }); + + if let Some(naddr) = remove_naddr { + self.replicas.remove(&naddr); + } + + // retain the remaining replica connections + for naddr in self.replicas.iter() { if let Some(event_id) = network.get_event_id(&naddr.to_neighbor_key(network)) { self.comms.pin_connection(event_id); debug!( @@ -255,9 +260,6 @@ impl StackerDBSync { ); } } - if let Some(naddr) = remove_naddr.take() { - self.replicas.remove(&naddr); - } } // reload from config diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index 746a3f0963..5f6e8a7bed 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -199,7 +199,14 @@ fn test_reconnect(network: &mut PeerNetwork) { .expect("FATAL: did not replace stacker dbs"); for (_sc, stacker_db_sync) in stacker_db_syncs.iter_mut() { - stacker_db_sync.connect_begin(network).unwrap(); + match stacker_db_sync.connect_begin(network) { + Ok(x) => {} + Err(net_error::PeerNotConnected) => {} + Err(net_error::NoSuchNeighbor) => {} + Err(e) => { + panic!("Failed to connect_begin: {:?}", &e); + } + } } network.stacker_db_syncs = Some(stacker_db_syncs); From c4b4635da08cc817dd41ea65dce257e1de7c2311 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 11 Oct 2024 20:27:03 -0700 Subject: [PATCH 1323/1400] Do not issue a BurnchainTipChanged error unless there is a new sortition Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index af539db5b1..dad7719c44 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1201,11 +1201,14 @@ impl BlockMinerThread { } /// Check if the tenure needs to change -- if so, return a BurnchainTipChanged error + /// The tenure should change if there is a new burnchain tip with a valid sortition fn check_burn_tip_changed(&self, sortdb: &SortitionDB) -> Result<(), NakamotoNodeError> { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if cur_burn_chain_tip.consensus_hash != self.burn_block.consensus_hash { + if cur_burn_chain_tip.consensus_hash != self.burn_block.consensus_hash + && cur_burn_chain_tip.sortition_id != self.burn_block.sortition_id + { info!("Miner: Cancel block assembly; burnchain tip has changed"); self.globals.counters.bump_missed_tenures(); Err(NakamotoNodeError::BurnchainTipChanged) From 98f187c1aef54a6c706acc95e68bf20c5f042c8d Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sat, 12 Oct 2024 13:16:01 -0700 Subject: [PATCH 1324/1400] feat: update continue_tenure_extend to ensure naka blocks after TenureExtend --- .../src/tests/nakamoto_integrations.rs | 39 ++++++++++++++++--- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e25b7799a9..eb0ba19648 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -91,7 +91,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; @@ -6506,10 +6506,10 @@ fn continue_tenure_extend() { // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; - let send_fee = 100; + let send_fee = 200; naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), - send_amt * 2 + send_fee, + (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -6519,6 +6519,7 @@ fn continue_tenure_extend() { ); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); + let mut transfer_nonce = 0; test_observer::spawn(); test_observer::register_any(&mut naka_conf); @@ -6637,7 +6638,8 @@ fn continue_tenure_extend() { .unwrap(); // Submit a TX - let transfer_tx = make_stacks_transfer(&sender_sk, 0, send_fee, &recipient, send_amt); + let transfer_tx = + make_stacks_transfer(&sender_sk, transfer_nonce, send_fee, &recipient, send_amt); let transfer_tx_hex = format!("0x{}", to_hex(&transfer_tx)); let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -6694,6 +6696,20 @@ fn continue_tenure_extend() { }) .unwrap(); + // Mine 3 nakamoto tenures + for i in 0..3 { + info!("Triggering Nakamoto blocks after extend ({})", i + 1); + transfer_nonce += 1; + let transfer_tx = + make_stacks_transfer(&sender_sk, transfer_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + wait_for(10, || { + let sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(sender_nonce >= transfer_nonce) + }) + .expect("Timed out waiting for transfer TX to confirm"); + } + info!("Resuming commit ops to mine regular tenures."); test_skip_commit_op.0.lock().unwrap().replace(false); @@ -6731,7 +6747,9 @@ fn continue_tenure_extend() { let mut tenure_extends = vec![]; let mut tenure_block_founds = vec![]; let mut transfer_tx_included = false; + let mut last_block_had_extend = false; for block in test_observer::get_blocks() { + let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); if raw_tx == &transfer_tx_hex { @@ -6745,12 +6763,21 @@ fn continue_tenure_extend() { let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); match &parsed.payload { TransactionPayload::TenureChange(payload) => match payload.cause { - TenureChangeCause::Extended => tenure_extends.push(parsed), - TenureChangeCause::BlockFound => tenure_block_founds.push(parsed), + TenureChangeCause::Extended => { + has_extend = true; + tenure_extends.push(parsed); + } + TenureChangeCause::BlockFound => { + if last_block_had_extend { + panic!("Expected a Nakamoto block to happen after tenure extend block"); + } + tenure_block_founds.push(parsed); + } }, _ => {} }; } + last_block_had_extend = has_extend; } assert!( !tenure_extends.is_empty(), From 738db8d1fa0ff020034a191e4026fea41ca34243 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sat, 12 Oct 2024 15:14:06 -0700 Subject: [PATCH 1325/1400] feat: signer test for naka blocks after TenureExtend --- .../stacks-node/src/nakamoto_node/miner.rs | 6 +- .../src/nakamoto_node/sign_coordinator.rs | 8 +- testnet/stacks-node/src/tests/signer/v0.rs | 125 +++++++++++++++++- 3 files changed, 133 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index dad7719c44..a27f617fff 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -383,7 +383,7 @@ impl BlockMinerThread { "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - return Err(e); + continue; } _ => { error!("Error while gathering signatures: {e:?}. Will try mining again."; @@ -1247,7 +1247,9 @@ impl ParentStacksBlockInfo { let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash + && burn_chain_tip.sortition_id != check_burn_block.sortition_id + { info!( "New canonical burn chain tip detected. Will not try to mine."; "new_consensus_hash" => %burn_chain_tip.consensus_hash, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index f570009be5..cf0eaa6769 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -251,11 +251,13 @@ impl SignCoordinator { } /// Check if the tenure needs to change - fn check_burn_tip_changed(sortdb: &SortitionDB, consensus_hash: &ConsensusHash) -> bool { + fn check_burn_tip_changed(sortdb: &SortitionDB, burn_block: &BlockSnapshot) -> bool { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if cur_burn_chain_tip.consensus_hash != *consensus_hash { + if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash + && cur_burn_chain_tip.sortition_id != burn_block.sortition_id + { info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); true } else { @@ -365,7 +367,7 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - if Self::check_burn_tip_changed(&sortdb, &burn_tip.consensus_hash) { + if Self::check_burn_tip_changed(&sortdb, &burn_tip) { debug!("SignCoordinator: Exiting due to new burnchain tip"); return Err(NakamotoNodeError::BurnchainTipChanged); } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d1ceedfebf..52427daf46 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -34,6 +34,7 @@ use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoC use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; +use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; @@ -42,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::MerkleHashFunc; +use stacks::util::hash::{hex_bytes, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -4942,6 +4943,128 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { assert_ne!(block_n_2, block_n); } +#[test] +#[ignore] +/// Test that we can mine a tenure extend and then continue mining afterwards. +fn continue_after_tenure_extend() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * 5)], + ); + let timeout = Duration::from_secs(200); + let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("------------------------- Mine Normal Tenure -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + info!("------------------------- Extend Tenure -------------------------"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + + // It's possible that we have a pending block commit already. + // Mine two BTC blocks to "flush" this commit. + + let mut blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + + for i in 0..2 { + info!( + "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", + i + 1 + ); + + blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + wait_for(60, || { + let blocks_processed_after = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed_after > blocks_processed_before) + }) + .expect("Timed out waiting for tenure extend block"); + } + + // The last block should have a single instruction in it, the tenure extend + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) + if payload.cause == TenureChangeCause::Extended => {} + _ => panic!("Expected tenure extend transaction, got {:?}", parsed), + }; + + // Verify that the miner can continue mining in the tenure with the tenure extend + info!("------------------------- Mine After Tenure Extend -------------------------"); + let mut sender_nonce = 0; + blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + for _ in 0..5 { + // submit a tx so that the miner will mine an extra block + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(30, || { + let blocks_processed_after = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed_after > blocks_processed_before) + }) + .expect("Timed out waiting for block proposal"); + blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + info!("Block {blocks_processed_before} processed, continuing"); + } + + signer_test.shutdown(); +} + #[test] #[ignore] /// Test that signers can successfully sign a block proposal in the 0th tenure of a reward cycle From ff8f55d7657695cf656438f2376c7772fbb2839a Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sat, 12 Oct 2024 15:43:14 -0700 Subject: [PATCH 1326/1400] fix: add new signer test to bitcoin-tests --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 9 ++------- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 4115118eaf..7ad1014cde 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -118,6 +118,7 @@ jobs: - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle + - tests::signer::v0::continue_after_tenure_extend - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 52427daf46..6336cc5d87 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4988,18 +4988,13 @@ fn continue_after_tenure_extend() { // It's possible that we have a pending block commit already. // Mine two BTC blocks to "flush" this commit. - let mut blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - for i in 0..2 { info!( "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", i + 1 ); - blocks_processed_before = coord_channel + let mut blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); @@ -5035,7 +5030,7 @@ fn continue_after_tenure_extend() { // Verify that the miner can continue mining in the tenure with the tenure extend info!("------------------------- Mine After Tenure Extend -------------------------"); let mut sender_nonce = 0; - blocks_processed_before = coord_channel + let mut blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); From db9d22e4463971452793acfa66ec622096237a59 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:32:42 +0200 Subject: [PATCH 1327/1400] Return `tenure_height` in `v2/info` --- stacks-signer/src/client/mod.rs | 1 + stackslib/src/net/api/getinfo.rs | 46 +++++++++++++++++++++++++++----- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 3fe9f09354..2cb8155f61 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -322,6 +322,7 @@ pub(crate) mod tests { stacks_tip_consensus_hash: generate_random_consensus_hash(), unanchored_tip: None, unanchored_seq: Some(0), + tenure_height: None, exit_at_block_height: None, is_fully_synced: false, genesis_chainstate_hash: Sha256Sum::zero(), diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 237205f63a..cae7190849 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -27,11 +27,12 @@ use stacks_common::util::hash::{Hash160, Sha256Sum}; use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::core::mempool::MemPoolDB; use crate::net::http::{ parse_json, Error, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, + HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, }; use crate::net::httpcore::{ HttpPreambleExtensions, RPCRequestHandler, StacksHttpRequest, StacksHttpResponse, @@ -81,6 +82,7 @@ pub struct RPCPeerInfoData { pub genesis_chainstate_hash: Sha256Sum, pub unanchored_tip: Option, pub unanchored_seq: Option, + pub tenure_height: Option, pub exit_at_block_height: Option, pub is_fully_synced: bool, #[serde(default)] @@ -106,6 +108,7 @@ impl RPCPeerInfoData { chainstate: &StacksChainState, exit_at_block_height: Option, genesis_chainstate_hash: &Sha256Sum, + coinbase_height: Option, ibd: bool, ) -> RPCPeerInfoData { let server_version = version_string( @@ -148,7 +151,7 @@ impl RPCPeerInfoData { stacks_tip_consensus_hash: network.stacks_tip.consensus_hash.clone(), unanchored_tip: unconfirmed_tip, unanchored_seq: unconfirmed_seq, - exit_at_block_height: exit_at_block_height, + exit_at_block_height, is_fully_synced, genesis_chainstate_hash: genesis_chainstate_hash.clone(), node_public_key: Some(public_key_buf), @@ -169,6 +172,7 @@ impl RPCPeerInfoData { .map(|cid| format!("{}", cid)) .collect(), ), + tenure_height: coinbase_height, } } } @@ -217,16 +221,46 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { let ibd = node.ibd; - let rpc_peer_info = - node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { - RPCPeerInfoData::from_network( + + let rpc_peer_info: Result = + node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { + let header = self + .get_stacks_chain_tip(&preamble, sortdb, chainstate) + .map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e)), + ) + })?; + + let coinbase_height = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &header.index_block_hash(), + ) + .map_err(|e| { + StacksHttpResponse::new_error( + &preamble, + &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e)), + ) + })?; + + Ok(RPCPeerInfoData::from_network( network, chainstate, rpc_args.exit_at_block_height.clone(), &rpc_args.genesis_chainstate_hash, + coinbase_height, ibd, - ) + )) }); + + let rpc_peer_info = match rpc_peer_info { + Ok(rpc_peer_info) => rpc_peer_info, + Err(response) => { + return response.try_into_contents().map_err(NetError::from); + } + }; + let mut preamble = HttpResponsePreamble::ok_json(&preamble); preamble.set_canonical_stacks_tip_height(Some(node.canonical_stacks_tip_height())); let body = HttpResponseContents::try_from_json(&rpc_peer_info)?; From 53018c16e070b776fce5b4b12e2f380b4314de37 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:33:32 +0200 Subject: [PATCH 1328/1400] Add unit tests --- stackslib/src/net/api/tests/getinfo.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs index 7d8aeff01c..2a0ae5eaf9 100644 --- a/stackslib/src/net/api/tests/getinfo.rs +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -50,7 +50,7 @@ fn test_try_parse_request() { .try_parse_request(&parsed_preamble.expect_request(), &bytes[offset..]) .unwrap(); - // parsed request consumes headers that would not be in a constructed reqeuest + // parsed request consumes headers that would not be in a constructed request parsed_request.clear_headers(); parsed_request.add_header( "X-Canonical-Stacks-Tip-Height".to_string(), @@ -66,7 +66,7 @@ fn test_getinfo_compat() { let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false}"#; let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; - let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf", "tenure_height": 2423}"#; // they all parse for json_obj in &[ @@ -102,4 +102,6 @@ fn test_try_make_response() { Some(1) ); let resp = response.decode_peer_info().unwrap(); + + assert_eq!(resp.tenure_height, Some(1)); } From f5d818fcdfd85aea005c746b4e278b16fee514f2 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:33:47 +0200 Subject: [PATCH 1329/1400] Add integration tests --- .../src/tests/nakamoto_integrations.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e25b7799a9..5c058ffee4 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -91,7 +91,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; @@ -5014,6 +5014,11 @@ fn check_block_heights() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); + + // With the first Nakamoto block, the chain tip and the number of tenures + // must be the same (before Nakamoto every block counts as a tenure) + assert_eq!(info.tenure_height.unwrap(), info.stacks_tip_height); + let mut last_burn_block_height; let mut last_stacks_block_height = info.stacks_tip_height as u128; let mut last_tenure_height = last_stacks_block_height as u128; @@ -5145,6 +5150,9 @@ fn check_block_heights() { ); last_tenure_height = bh1; + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_eq!(info.tenure_height.unwrap(), bh3 as u64); + let sbh = heights3 .get("stacks-block-height") .unwrap() @@ -5247,6 +5255,9 @@ fn check_block_heights() { "Tenure height should not have changed" ); + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_eq!(info.tenure_height.unwrap(), bh3 as u64); + let sbh = heights3 .get("stacks-block-height") .unwrap() @@ -5287,6 +5298,12 @@ fn check_block_heights() { "Should have mined 1 + (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" ); + let info = get_chain_info_result(&naka_conf).unwrap(); + assert_eq!( + info.tenure_height.unwrap(), + block_height_pre_3_0 + tenure_count + ); + coord_channel .lock() .expect("Mutex poisoned") From e415848c46f6e0b576cabf6e8e9b446612cdc2f4 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:34:01 +0200 Subject: [PATCH 1330/1400] Cleanup unused imports --- stackslib/src/net/api/tests/gettenureinfo.rs | 1 - stackslib/src/net/tests/relay/nakamoto.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/stackslib/src/net/api/tests/gettenureinfo.rs b/stackslib/src/net/api/tests/gettenureinfo.rs index db53a5daca..51a8a11785 100644 --- a/stackslib/src/net/api/tests/gettenureinfo.rs +++ b/stackslib/src/net/api/tests/gettenureinfo.rs @@ -25,7 +25,6 @@ use stacks_common::types::net::PeerHost; use stacks_common::types::Address; use super::test_rpc; -use crate::net::api::getinfo::RPCPeerInfoData; use crate::net::api::tests::TestRPC; use crate::net::api::*; use crate::net::connection::ConnectionOptions; diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index fb9db70d5b..606f1f3fb2 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -54,7 +54,6 @@ use crate::chainstate::stacks::tests::{ use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityConnection; use crate::core::*; -use crate::net::api::getinfo::RPCPeerInfoData; use crate::net::asn::*; use crate::net::chat::*; use crate::net::codec::*; From ad16188d029fa82da82c4a46137308322c7e032d Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:34:41 +0200 Subject: [PATCH 1331/1400] Update OpenAPI specs --- docs/rpc/api/core-node/get-info.example.json | 1 + docs/rpc/api/core-node/get-info.schema.json | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/docs/rpc/api/core-node/get-info.example.json b/docs/rpc/api/core-node/get-info.example.json index afc42e6f68..77ece128c3 100644 --- a/docs/rpc/api/core-node/get-info.example.json +++ b/docs/rpc/api/core-node/get-info.example.json @@ -11,6 +11,7 @@ "stacks_tip": "b1807a2d3f7f8c7922f7c1d60d7c34145ade05d789640dc7dc9ec1021e07bb54", "stacks_tip_consensus_hash": "17f76e597bab45646956f38dd39573085d72cbc0", "unanchored_tip": "0000000000000000000000000000000000000000000000000000000000000000", + "tenure_height": 523, "exit_at_block_height": null, "is_fully_synced": false } diff --git a/docs/rpc/api/core-node/get-info.schema.json b/docs/rpc/api/core-node/get-info.schema.json index 16b560ed5e..e997a2d19c 100644 --- a/docs/rpc/api/core-node/get-info.schema.json +++ b/docs/rpc/api/core-node/get-info.schema.json @@ -17,6 +17,7 @@ "stacks_tip", "stacks_tip_consensus_hash", "unanchored_tip", + "tenure_height", "exit_at_block_height", "is_fully_synced" ], @@ -69,6 +70,10 @@ "type": "string", "description": "the latest microblock hash if any microblocks were processed. if no microblock has been processed for the current block, a 000.., hex array is returned" }, + "tenure_height": { + "type": "integer", + "description": "the latest Stacks tenure height" + }, "exit_at_block_height": { "type": "integer", "description": "the block height at which the testnet network will be reset. not applicable for mainnet" From 30354d75b15b5bd0aa3ab89ed0f25f4fce786d2c Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Mon, 14 Oct 2024 13:42:43 +0200 Subject: [PATCH 1332/1400] Update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ccb9b5cac..eeb514f6b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-tenure-info?` added - `get-block-info?` removed - Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint +- Added `tenure_height` to `/v2/info` endpoint - Added optional `timeout_ms` to `events_observer` configuration ## [2.5.0.0.7] From cc89b148491ea624f04ac264f1a1be7f607aa606 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 14 Oct 2024 08:48:07 -0700 Subject: [PATCH 1333/1400] Fix occasional port bind error happening in mock mining by checking if in feature == testing and reserving test observer port Signed-off-by: Jacinta Ferrant --- stackslib/src/net/poll.rs | 2 +- testnet/stacks-node/src/tests/mod.rs | 7 +++++- .../src/tests/nakamoto_integrations.rs | 24 +++++++++---------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/stackslib/src/net/poll.rs b/stackslib/src/net/poll.rs index bdda12e6d4..0362745f90 100644 --- a/stackslib/src/net/poll.rs +++ b/stackslib/src/net/poll.rs @@ -89,7 +89,7 @@ impl NetworkState { } fn bind_address(addr: &SocketAddr) -> Result { - if !cfg!(test) { + if !cfg!(test) && !cfg!(feature = "testing") { mio_net::TcpListener::bind(addr).map_err(|e| { error!("Failed to bind to {:?}: {:?}", addr, e); net_error::BindError diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index ba88584f39..c054be7245 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -23,6 +23,7 @@ use clarity::vm::events::STXEventType; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; +use neon_integrations::test_observer::EVENT_OBSERVER_PORT; use rand::Rng; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::stacks::db::StacksChainState; @@ -109,11 +110,15 @@ pub fn gen_random_port() -> u16 { let mut rng = rand::thread_rng(); let range_len = (1024..u16::MAX).len(); loop { + // Note it needs to be +1 because we reserve one port for the event observer assert!( - USED_PORTS.lock().unwrap().len() < range_len, + USED_PORTS.lock().unwrap().len() + 1 < range_len, "No more available ports" ); let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 + if port == EVENT_OBSERVER_PORT { + continue; + } if insert_new_port(port) { return port; } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e25b7799a9..4cf9736559 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -8132,16 +8132,16 @@ fn mock_mining() { let send_amt = 100; let send_fee = 180; - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; - naka_conf.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); - naka_conf.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); - naka_conf.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); - naka_conf.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + naka_conf.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + naka_conf.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + naka_conf.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + naka_conf.node.p2p_address = format!("{localhost}:{node_1_p2p}"); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.add_initial_balance( @@ -8221,10 +8221,10 @@ fn mock_mining() { follower_conf.node.seed = vec![0x01; 32]; follower_conf.node.local_peer_seed = vec![0x02; 32]; - follower_conf.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - follower_conf.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - follower_conf.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - follower_conf.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + follower_conf.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + follower_conf.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + follower_conf.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + follower_conf.node.p2p_address = format!("{localhost}:{node_2_p2p}"); let node_info = get_chain_info(&naka_conf); follower_conf.node.add_bootstrap_node( From d526f07b665759121401bbd035cc48495b44a32a Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 14 Oct 2024 09:17:36 -0700 Subject: [PATCH 1334/1400] Initialize the USED_PORTS with the EVENT_OBSERVER_PORT Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 9 +++++---- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index c054be7245..a9e36c55df 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -101,7 +101,11 @@ lazy_static! { } lazy_static! { - static ref USED_PORTS: Mutex> = Mutex::new(HashSet::new()); + static ref USED_PORTS: Mutex> = Mutex::new({ + let mut set = HashSet::new(); + set.insert(EVENT_OBSERVER_PORT); + set + }); } /// Generate a random port number between 1024 and 65534 (inclusive) and insert it into the USED_PORTS set. @@ -116,9 +120,6 @@ pub fn gen_random_port() -> u16 { "No more available ports" ); let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 - if port == EVENT_OBSERVER_PORT { - continue; - } if insert_new_port(port) { return port; } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4cf9736559..02484405cb 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -91,7 +91,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; use super::bitcoin_regtest::BitcoinCoreController; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::config::{EventKeyType, InitialBalance}; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; From 2ea393699899af2d4885434125f4a19007400c71 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 14 Oct 2024 09:21:41 -0700 Subject: [PATCH 1335/1400] Fix range check Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a9e36c55df..4393b0ab90 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -116,7 +116,7 @@ pub fn gen_random_port() -> u16 { loop { // Note it needs to be +1 because we reserve one port for the event observer assert!( - USED_PORTS.lock().unwrap().len() + 1 < range_len, + USED_PORTS.lock().unwrap().len() < range_len, "No more available ports" ); let port = rng.gen_range(1024..u16::MAX); // use a non-privileged port between 1024 and 65534 From daeed4cdeb5db514fb2ce39c10813a3147a1f7e8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 14 Oct 2024 10:03:20 -0700 Subject: [PATCH 1336/1400] Remove outdated comment Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 4393b0ab90..d2d0760b70 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -114,7 +114,6 @@ pub fn gen_random_port() -> u16 { let mut rng = rand::thread_rng(); let range_len = (1024..u16::MAX).len(); loop { - // Note it needs to be +1 because we reserve one port for the event observer assert!( USED_PORTS.lock().unwrap().len() < range_len, "No more available ports" From 3412534e22194d906be042afb9690c7de17a04e6 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 14 Oct 2024 14:42:42 -0400 Subject: [PATCH 1337/1400] feat: ensure pending payloads are retrieved in order --- testnet/stacks-node/src/event_dispatcher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 8c6d66cb8f..cd9053caaa 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -354,7 +354,8 @@ impl EventObserver { fn get_pending_payloads( conn: &Connection, ) -> Result, db_error> { - let mut stmt = conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads")?; + let mut stmt = + conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads ORDER BY id")?; let payload_iter = stmt.query_and_then( [], |row| -> Result<(i64, String, serde_json::Value, u64), db_error> { From be32137b097777f9dd80123a10d2bb5fdba8e83a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 14 Oct 2024 15:44:32 -0400 Subject: [PATCH 1338/1400] test: add test to verify payload resend --- testnet/stacks-node/src/event_dispatcher.rs | 112 ++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index cd9053caaa..771df60318 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -321,6 +321,10 @@ impl RewardSetEventPayload { } } +#[cfg(test)] +static TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD: std::sync::Mutex> = + std::sync::Mutex::new(None); + impl EventObserver { fn init_db(db_path: &str) -> Result { let conn = Connection::open(db_path)?; @@ -377,6 +381,16 @@ impl EventObserver { } fn process_pending_payloads(conn: &Connection) { + #[cfg(test)] + if TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD + .lock() + .unwrap() + .unwrap_or(false) + { + warn!("Fault injection: skipping retry of payload"); + return; + } + let pending_payloads = match Self::get_pending_payloads(conn) { Ok(payloads) => payloads, Err(e) => { @@ -2171,4 +2185,102 @@ mod test { rx.recv_timeout(Duration::from_secs(5)) .expect("Server did not receive request in time"); } + + #[test] + fn test_send_payload_with_db_force_restart() { + let port = get_random_port(); + let timeout = Duration::from_secs(3); + let dir = tempdir().unwrap(); + let working_dir = dir.path().to_path_buf(); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + info!("Starting mock server on port {}", port); + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let mut attempt = 0; + let mut _request_holder = None; + while let Ok(mut request) = server.recv() { + attempt += 1; + match attempt { + 1 => { + debug!("Mock server received request attempt 1"); + // Do not reply, forcing the sender to timeout and retry, + // but don't drop the request or it will receive a 500 error, + _request_holder = Some(request); + } + 2 => { + debug!("Mock server received request attempt 2"); + + // Verify the payload + let mut payload = String::new(); + request.as_reader().read_to_string(&mut payload).unwrap(); + let expected_payload = r#"{"key":"value"}"#; + assert_eq!(payload, expected_payload); + + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + } + 3 => { + debug!("Mock server received request attempt 3"); + + // Verify the payload + let mut payload = String::new(); + request.as_reader().read_to_string(&mut payload).unwrap(); + let expected_payload = r#"{"key":"value2"}"#; + assert_eq!(payload, expected_payload); + + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // When we receive attempt 3 (message 1, re-sent message 1, message 2), + // notify the test that the request was processed successfully + tx.send(()).unwrap(); + break; + } + _ => panic!("Unexpected request attempt"), + } + } + }); + + let observer = EventObserver::new( + Some(working_dir.clone()), + format!("127.0.0.1:{}", port), + timeout, + ); + + let payload = json!({"key": "value"}); + let payload2 = json!({"key": "value2"}); + + // Disable retrying so that it sends the payload only once + // and that payload will be ignored by the test server. + TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD + .lock() + .unwrap() + .replace(true); + + info!("Sending payload 1"); + + // Send the payload + observer.send_payload(&payload, "/test"); + + // Re-enable retrying + TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD + .lock() + .unwrap() + .replace(false); + + info!("Sending payload 2"); + + // Send another payload + observer.send_payload(&payload2, "/test"); + + // Wait for the server to process the requests + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } } From f3562118b92667b8e898ae7d8ea5053716340182 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:47:10 -0700 Subject: [PATCH 1339/1400] adding some sample configs for signer event_observer --- .../conf/mainnet-follower-conf.toml | 2 +- testnet/stacks-node/conf/mainnet-signer.toml | 24 ++++++ .../conf/testnet-follower-conf.toml | 15 ++++ testnet/stacks-node/conf/testnet-signer.toml | 80 +++++++++++++++++++ 4 files changed, 120 insertions(+), 1 deletion(-) create mode 100644 testnet/stacks-node/conf/mainnet-signer.toml create mode 100644 testnet/stacks-node/conf/testnet-signer.toml diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 291f333523..941b349034 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -11,5 +11,5 @@ peer_host = "127.0.0.1" # Used for sending events to a local stacks-blockchain-api service # [[events_observer]] # endpoint = "localhost:3700" -# retry_count = 255 # events_keys = ["*"] +# timeout_ms = 60_000 diff --git a/testnet/stacks-node/conf/mainnet-signer.toml b/testnet/stacks-node/conf/mainnet-signer.toml new file mode 100644 index 0000000000..226fcae806 --- /dev/null +++ b/testnet/stacks-node/conf/mainnet-signer.toml @@ -0,0 +1,24 @@ +[node] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* +rpc_bind = "0.0.0.0:20443" +p2p_bind = "0.0.0.0:20444" +prometheus_bind = "0.0.0.0:9153" + +[burnchain] +mode = "mainnet" +peer_host = "127.0.0.1" + +# Used for sending events to a local stacks-blockchain-api service +# [[events_observer]] +# endpoint = "localhost:3700" +# events_keys = ["*"] +# timeout_ms = 60_000 + +# Used if running a local stacks-signer service +# [[events_observer]] +# endpoint = "127.0.0.1:30000" +# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + +# Used if running a local stacks-signer service +# [connection_options] +# auth_token = "" # fill with a unique password diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index 54814c610c..80226c5b89 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -12,6 +12,21 @@ peer_port = 18444 pox_prepare_length = 100 pox_reward_length = 900 +# Used for sending events to a local stacks-blockchain-api service +# [[events_observer]] +# endpoint = "localhost:3700" +# events_keys = ["*"] +# timeout_ms = 60_000 + +# Used if running a local stacks-signer service +# [[events_observer]] +# endpoint = "127.0.0.1:30000" +# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + +# Used if running a local stacks-signer service +# [connection_options] +# auth_token = "" # fill with a unique password + [[ustx_balance]] address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" amount = 10000000000000000 diff --git a/testnet/stacks-node/conf/testnet-signer.toml b/testnet/stacks-node/conf/testnet-signer.toml new file mode 100644 index 0000000000..80226c5b89 --- /dev/null +++ b/testnet/stacks-node/conf/testnet-signer.toml @@ -0,0 +1,80 @@ +[node] +# working_dir = "/dir/to/save/chainstate" # defaults to: /tmp/stacks-node-[0-9]* +rpc_bind = "0.0.0.0:20443" +p2p_bind = "0.0.0.0:20444" +bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444" +prometheus_bind = "0.0.0.0:9153" + +[burnchain] +mode = "krypton" +peer_host = "bitcoin.regtest.hiro.so" +peer_port = 18444 +pox_prepare_length = 100 +pox_reward_length = 900 + +# Used for sending events to a local stacks-blockchain-api service +# [[events_observer]] +# endpoint = "localhost:3700" +# events_keys = ["*"] +# timeout_ms = 60_000 + +# Used if running a local stacks-signer service +# [[events_observer]] +# endpoint = "127.0.0.1:30000" +# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + +# Used if running a local stacks-signer service +# [connection_options] +# auth_token = "" # fill with a unique password + +[[ustx_balance]] +address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST319CF5WV77KYR1H3GT0GZ7B8Q4AQPY42ETP1VPF" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST221Z6TDTC5E0BYR2V624Q2ST6R0Q71T78WTAX6H" +amount = 10000000000000000 + +[[ustx_balance]] +address = "ST2TFVBMRPS5SSNP98DQKQ5JNB2B6NZM91C4K3P7B" +amount = 10000000000000000 + +[[burnchain.epochs]] +epoch_name = "1.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.0" +start_height = 0 + +[[burnchain.epochs]] +epoch_name = "2.05" +start_height = 1 + +[[burnchain.epochs]] +epoch_name = "2.1" +start_height = 2 + +[[burnchain.epochs]] +epoch_name = "2.2" +start_height = 3 + +[[burnchain.epochs]] +epoch_name = "2.3" +start_height = 4 + +[[burnchain.epochs]] +epoch_name = "2.4" +start_height = 5 + +[[burnchain.epochs]] +epoch_name = "2.5" +start_height = 6 + +[[burnchain.epochs]] +epoch_name = "3.0" +start_height = 56_457 From e2bd98230483a3ee18d03cbfccfc795dbdcb2004 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Mon, 14 Oct 2024 17:17:40 -0700 Subject: [PATCH 1340/1400] feat: add chain_id to signer config --- stacks-signer/src/client/stackerdb.rs | 1 + stacks-signer/src/client/stacks_client.rs | 28 +++++--- stacks-signer/src/config.rs | 71 ++++++++++++++++--- stacks-signer/src/main.rs | 2 +- stacks-signer/src/tests/chainstate.rs | 2 + .../tests/conf/signer-custom-chain-id.toml | 7 ++ .../src/tests/nakamoto_integrations.rs | 3 +- testnet/stacks-node/src/tests/signer/mod.rs | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 1 + 9 files changed, 97 insertions(+), 19 deletions(-) create mode 100644 stacks-signer/src/tests/conf/signer-custom-chain-id.toml diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index b3f6528232..0fc43350db 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -255,6 +255,7 @@ mod tests { Some(100_000), None, Some(9000), + None, ); let config = GlobalConfig::load_from_str(&signer_config[0]).unwrap(); let signer_config = generate_signer_config(&config, 5); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 5caf9d3f42..cae6a210b7 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -45,7 +45,7 @@ use serde::Deserialize; use serde_json::json; use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; +use stacks_common::consts::CHAIN_ID_MAINNET; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; @@ -99,7 +99,7 @@ impl From<&GlobalConfig> for StacksClient { stacks_address: config.stacks_address, http_origin: format!("http://{}", config.node_host), tx_version: config.network.to_transaction_version(), - chain_id: config.network.to_chain_id(), + chain_id: config.to_chain_id(), stacks_node_client: reqwest::blocking::Client::new(), mainnet: config.network.is_mainnet(), auth_password: config.auth_password.clone(), @@ -114,6 +114,7 @@ impl StacksClient { node_host: String, auth_password: String, mainnet: bool, + chain_id: u32, ) -> Self { let pubkey = StacksPublicKey::from_private(&stacks_private_key); let tx_version = if mainnet { @@ -121,11 +122,6 @@ impl StacksClient { } else { TransactionVersion::Testnet }; - let chain_id = if mainnet { - CHAIN_ID_MAINNET - } else { - CHAIN_ID_TESTNET - }; let stacks_address = StacksAddress::p2pkh(mainnet, &pubkey); Self { stacks_private_key, @@ -145,7 +141,13 @@ impl StacksClient { node_host: String, auth_password: String, ) -> Result { - let mut stacks_client = Self::new(stacks_private_key, node_host, auth_password, true); + let mut stacks_client = Self::new( + stacks_private_key, + node_host, + auth_password, + true, + CHAIN_ID_MAINNET, + ); let pubkey = StacksPublicKey::from_private(&stacks_private_key); let info = stacks_client.get_peer_info()?; if info.network_id == CHAIN_ID_MAINNET { @@ -154,7 +156,7 @@ impl StacksClient { stacks_client.tx_version = TransactionVersion::Mainnet; } else { stacks_client.mainnet = false; - stacks_client.chain_id = CHAIN_ID_TESTNET; + stacks_client.chain_id = info.network_id; stacks_client.tx_version = TransactionVersion::Testnet; } stacks_client.stacks_address = StacksAddress::p2pkh(stacks_client.mainnet, &pubkey); @@ -1219,4 +1221,12 @@ mod tests { write_response(mock.server, response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), reward_cycle as u128); } + + #[test] + fn get_chain_id_from_config() { + let mock = MockServerClient::from_config( + GlobalConfig::load_from_file("./src/tests/conf/signer-custom-chain-id.toml").unwrap(), + ); + assert_eq!(mock.client.chain_id, 0x80000100); + } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 9f72e171e5..375ed1a171 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -76,14 +76,6 @@ impl std::fmt::Display for Network { } impl Network { - /// Converts a Network enum variant to a corresponding chain id - pub const fn to_chain_id(&self) -> u32 { - match self { - Self::Mainnet => CHAIN_ID_MAINNET, - Self::Testnet | Self::Mocknet => CHAIN_ID_TESTNET, - } - } - /// Convert a Network enum variant to a corresponding address version pub const fn to_address_version(&self) -> u8 { match self { @@ -163,6 +155,8 @@ pub struct GlobalConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// An optional custom Chain ID + chain_id: Option, } /// Internal struct for loading up the config file @@ -190,6 +184,8 @@ struct RawConfigFile { pub first_proposal_burn_block_timing_secs: Option, /// How much time to wait for a miner to propose a block following a sortition in milliseconds pub block_proposal_timeout_ms: Option, + /// An optional custom Chain ID + pub chain_id: Option, } impl RawConfigFile { @@ -278,6 +274,7 @@ impl TryFrom for GlobalConfig { metrics_endpoint, first_proposal_burn_block_timing, block_proposal_timeout, + chain_id: raw_data.chain_id, }) } } @@ -308,6 +305,7 @@ impl GlobalConfig { Some(endpoint) => endpoint.to_string(), None => "None".to_string(), }; + let chain_id = format!("{:x}", self.to_chain_id()); format!( r#" Stacks node host: {node_host} @@ -315,6 +313,7 @@ Signer endpoint: {endpoint} Stacks address: {stacks_address} Public key: {public_key} Network: {network} +Chain ID: 0x{chain_id} Database path: {db_path} Metrics endpoint: {metrics_endpoint} "#, @@ -329,6 +328,14 @@ Metrics endpoint: {metrics_endpoint} metrics_endpoint = metrics_endpoint, ) } + + /// Get the chain ID for the network + pub fn to_chain_id(&self) -> u32 { + self.chain_id.unwrap_or_else(|| match self.network { + Network::Mainnet => CHAIN_ID_MAINNET, + Network::Testnet | Network::Mocknet => CHAIN_ID_TESTNET, + }) + } } impl Display for GlobalConfig { @@ -356,6 +363,7 @@ pub fn build_signer_config_tomls( max_tx_fee_ustx: Option, tx_fee_ustx: Option, mut metrics_port_start: Option, + chain_id: Option, ) -> Vec { let mut signer_config_tomls = vec![]; @@ -421,6 +429,15 @@ metrics_endpoint = "{metrics_endpoint}" metrics_port_start = Some(metrics_port + 1); } + if let Some(chain_id) = chain_id { + signer_config_toml = format!( + r#" +{signer_config_toml} +chain_id = {chain_id} +"# + ) + } + signer_config_tomls.push(signer_config_toml); } @@ -453,6 +470,7 @@ mod tests { None, None, Some(4000), + None, ); let config = @@ -460,6 +478,8 @@ mod tests { assert_eq!(config.auth_password, "melon"); assert_eq!(config.metrics_endpoint, Some("localhost:4000".to_string())); + let global_config = GlobalConfig::try_from(config).unwrap(); + assert_eq!(global_config.to_chain_id(), CHAIN_ID_TESTNET); } #[test] @@ -473,8 +493,10 @@ Signer endpoint: 127.0.0.1:30000 Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet +Chain ID: 0x80000000 Database path: :memory: Metrics endpoint: 0.0.0.0:9090 +Chain ID: 2147483648 "#; let expected_str_v6 = r#" @@ -483,6 +505,7 @@ Signer endpoint: [::1]:30000 Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet +Chain ID: 0x80000000 Database path: :memory: Metrics endpoint: 0.0.0.0:9090 "#; @@ -531,5 +554,37 @@ db_path = ":memory:" ); let config = GlobalConfig::load_from_str(&config_toml).unwrap(); assert_eq!(config.stacks_address.to_string(), expected_addr); + assert_eq!(config.to_chain_id(), CHAIN_ID_MAINNET); + } + + #[test] + fn test_custom_chain_id() { + let pk = StacksPrivateKey::from_hex( + "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", + ) + .unwrap(); + + let node_host = "localhost"; + let network = Network::Testnet; + let password = "melon"; + let config_tomls = build_signer_config_tomls( + &[pk], + node_host, + None, + &network, + password, + rand::random(), + 3000, + None, + None, + Some(4000), + Some(0x80000100), + ); + + let config = + RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); + assert_eq!(config.chain_id, Some(0x80000100)); + let global_config = GlobalConfig::try_from(config).unwrap(); + assert_eq!(global_config.to_chain_id(), 0x80000100); } } diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 520d455258..bb680aae0b 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -121,7 +121,7 @@ fn handle_generate_stacking_signature( &private_key, // args.reward_cycle.into(), args.method.topic(), - config.network.to_chain_id(), + config.to_chain_id(), args.period.into(), args.max_amount, args.auth_id, diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 432325daf2..886480f063 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -32,6 +32,7 @@ use clarity::util::vrf::VRFProof; use libsigner::BlockProposal; use slog::slog_info; use stacks_common::bitvec::BitVec; +use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::info; use stacks_common::types::chainstate::{ ConsensusHash, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, @@ -96,6 +97,7 @@ fn setup_test_environment( SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 10000).to_string(), "FOO".into(), false, + CHAIN_ID_TESTNET, ); let signer_db_dir = "/tmp/stacks-node-tests/signer-units/"; diff --git a/stacks-signer/src/tests/conf/signer-custom-chain-id.toml b/stacks-signer/src/tests/conf/signer-custom-chain-id.toml new file mode 100644 index 0000000000..1d1de36f1f --- /dev/null +++ b/stacks-signer/src/tests/conf/signer-custom-chain-id.toml @@ -0,0 +1,7 @@ +stacks_private_key = "126e916e77359ccf521e168feea1fcb9626c59dc375cae00c7464303381c7dff01" +node_host = "127.0.0.1:20444" +endpoint = "localhost:30001" +network = "testnet" +auth_password = "12345" +db_path = ":memory:" +chain_id = 0x80000100 diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 690a47f71d..93f4ac4106 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -78,7 +78,7 @@ use stacks::util_lib::signed_structured_data::pox4::{ use stacks_common::address::AddressHashMode; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::STACKS_EPOCH_MAX; +use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash, @@ -6400,6 +6400,7 @@ fn signer_chainstate() { .clone() .unwrap_or("".into()), false, + CHAIN_ID_TESTNET, ); wait_for_first_naka_block_commit(60, &commits_submitted); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5dcbc9a16a..2e67234285 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -173,6 +173,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = (0..new_num_signers) From 8e1e73161e015ff7a6e4f77e78d3655222d47df3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 14 Oct 2024 21:24:43 -0400 Subject: [PATCH 1341/1400] test: improve fault injection in event dispatcher --- testnet/stacks-node/src/event_dispatcher.rs | 42 ++++++++++++--------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 771df60318..43714f3573 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -322,8 +322,7 @@ impl RewardSetEventPayload { } #[cfg(test)] -static TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD: std::sync::Mutex> = - std::sync::Mutex::new(None); +static TEST_EVENT_OBSERVER_SKIP_RETRY: std::sync::Mutex> = std::sync::Mutex::new(None); impl EventObserver { fn init_db(db_path: &str) -> Result { @@ -381,16 +380,6 @@ impl EventObserver { } fn process_pending_payloads(conn: &Connection) { - #[cfg(test)] - if TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD - .lock() - .unwrap() - .unwrap_or(false) - { - warn!("Fault injection: skipping retry of payload"); - return; - } - let pending_payloads = match Self::get_pending_payloads(conn) { Ok(payloads) => payloads, Err(e) => { @@ -405,6 +394,17 @@ impl EventObserver { for (id, url, payload, timeout_ms) in pending_payloads { let timeout = Duration::from_millis(timeout_ms); Self::send_payload_directly(&payload, &url, timeout); + + #[cfg(test)] + if TEST_EVENT_OBSERVER_SKIP_RETRY + .lock() + .unwrap() + .unwrap_or(false) + { + warn!("Fault injection: delete_payload"); + return; + } + if let Err(e) = Self::delete_payload(conn, id) { error!( "Event observer: failed to delete pending payload from database"; @@ -459,6 +459,17 @@ impl EventObserver { ); } } + + #[cfg(test)] + if TEST_EVENT_OBSERVER_SKIP_RETRY + .lock() + .unwrap() + .unwrap_or(false) + { + warn!("Fault injection: skipping retry of payload"); + return; + } + sleep(backoff); backoff *= 2; } @@ -2258,10 +2269,7 @@ mod test { // Disable retrying so that it sends the payload only once // and that payload will be ignored by the test server. - TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD - .lock() - .unwrap() - .replace(true); + TEST_EVENT_OBSERVER_SKIP_RETRY.lock().unwrap().replace(true); info!("Sending payload 1"); @@ -2269,7 +2277,7 @@ mod test { observer.send_payload(&payload, "/test"); // Re-enable retrying - TEST_EVENT_OBSERVER_SKIP_SEND_PAYLOAD + TEST_EVENT_OBSERVER_SKIP_RETRY .lock() .unwrap() .replace(false); From d253dd7f325ac1f2064aa58482430d186c259f78 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 15 Oct 2024 10:10:11 -0400 Subject: [PATCH 1342/1400] feat: support custom chain ids in blockstack-cli `--testnet` will use the default testnet chain id and `--testnet=0x80000100` will set it to `0x80000100`. --- stackslib/src/blockstack_cli.rs | 75 +++++++++++++++++++++++++++------ 1 file changed, 63 insertions(+), 12 deletions(-) diff --git a/stackslib/src/blockstack_cli.rs b/stackslib/src/blockstack_cli.rs index 6fb9f45ed6..dbecb0393d 100644 --- a/stackslib/src/blockstack_cli.rs +++ b/stackslib/src/blockstack_cli.rs @@ -73,7 +73,9 @@ For usage information on those methods, call `blockstack-cli [method] -h` `blockstack-cli` accepts flag options as well: - --testnet instruct the transaction generator to use a testnet version byte instead of MAINNET (default) + --testnet[=chain-id] + instruct the transaction generator to use a testnet version byte instead of MAINNET (default) + optionally, you can specify a custom chain ID to use for the transaction "; @@ -185,6 +187,7 @@ enum CliError { ClarityGeneralError(ClarityError), Message(String), Usage, + InvalidChainId(std::num::ParseIntError), } impl std::error::Error for CliError { @@ -204,6 +207,7 @@ impl std::fmt::Display for CliError { CliError::ClarityGeneralError(e) => write!(f, "Clarity error: {}", e), CliError::Message(e) => write!(f, "{}", e), CliError::Usage => write!(f, "{}", USAGE), + CliError::InvalidChainId(e) => write!(f, "Invalid chain ID: {}", e), } } } @@ -848,18 +852,26 @@ fn main() { } fn main_handler(mut argv: Vec) -> Result { - let tx_version = if let Some(ix) = argv.iter().position(|x| x == "--testnet") { - argv.remove(ix); - TransactionVersion::Testnet - } else { - TransactionVersion::Mainnet - }; + let mut tx_version = TransactionVersion::Mainnet; + let mut chain_id = CHAIN_ID_MAINNET; + + // Look for the `--testnet` flag + if let Some(ix) = argv.iter().position(|x| x.starts_with("--testnet")) { + let flag = argv.remove(ix); + + // Check if `--testnet=` is used + if let Some(custom_chain_id) = flag.split('=').nth(1) { + // Attempt to parse the custom chain ID from hex + chain_id = u32::from_str_radix(custom_chain_id.trim_start_matches("0x"), 16) + .map_err(|err| CliError::InvalidChainId(err))?; + } else { + // Use the default testnet chain ID + chain_id = CHAIN_ID_TESTNET; + } - let chain_id = if tx_version == TransactionVersion::Testnet { - CHAIN_ID_TESTNET - } else { - CHAIN_ID_MAINNET - }; + // Set the transaction version to Testnet + tx_version = TransactionVersion::Testnet; + } if let Some((method, args)) = argv.split_first() { match method.as_str() { @@ -1220,4 +1232,43 @@ mod test { let result = main_handler(to_string_vec(&header_args)).unwrap(); eprintln!("result:\n{}", result); } + + #[test] + fn custom_chain_id() { + // Standard chain id + let tt_args = [ + "--testnet", + "token-transfer", + "043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3", + "1", + "0", + "ST1A14RBKJ289E3DP89QAZE2RRHDPWP5RHMYFRCHV", + "10", + ]; + + let result = main_handler(to_string_vec(&tt_args)); + assert!(result.is_ok()); + + let result = result.unwrap(); + let tx = decode_transaction(&[result], TransactionVersion::Testnet).unwrap(); + assert!(tx.contains("chain_id\":2147483648")); + + // Custom chain id + let tt_args = [ + "--testnet=0x12345678", + "token-transfer", + "043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3", + "1", + "0", + "ST1A14RBKJ289E3DP89QAZE2RRHDPWP5RHMYFRCHV", + "10", + ]; + + let result = main_handler(to_string_vec(&tt_args)); + assert!(result.is_ok()); + + let result = result.unwrap(); + let tx = decode_transaction(&[result], TransactionVersion::Testnet).unwrap(); + assert!(tx.contains("chain_id\":305419896")); + } } From e64de155feb7ae89f7e67ee65e6858af15794daf Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 15 Oct 2024 08:54:45 -0700 Subject: [PATCH 1343/1400] Only return BurnchainTipChanged error iff the sortition id also changed Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 2 +- testnet/stacks-node/src/neon_node.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a27f617fff..ffc3c49fab 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -383,7 +383,7 @@ impl BlockMinerThread { "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - continue; + return Err(e); } _ => { error!("Error while gathering signatures: {e:?}. Will try mining again."; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dcfa855c9b..f192f88969 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4324,7 +4324,9 @@ impl ParentStacksBlockInfo { let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash + && burn_chain_tip.sortition_id != check_burn_block.sortition_id + { info!( "New canonical burn chain tip detected. Will not try to mine."; "new_consensus_hash" => %burn_chain_tip.consensus_hash, From b6c2e5572166161fd14bd98831ac47021b59fa5f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 15 Oct 2024 15:03:31 -0700 Subject: [PATCH 1344/1400] Use the ACTUAL burnchain tip in run_sign_v0 instead of the election block snapshot Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 26 +++---------------- .../src/nakamoto_node/sign_coordinator.rs | 4 +-- testnet/stacks-node/src/neon_node.rs | 4 +-- testnet/stacks-node/src/tests/signer/v0.rs | 16 +++++++++++- 4 files changed, 20 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ffc3c49fab..a08c0ab353 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -531,22 +531,6 @@ impl BlockMinerThread { )) })?; - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to retrieve chain tip: {:?}", - e - )) - }) - .and_then(|result| { - result.ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure("Failed to retrieve chain tip".into()) - }) - })?; - let reward_set = self.load_signer_set()?; if self.config.get_node_config(false).mock_mining { @@ -574,7 +558,7 @@ impl BlockMinerThread { let signature = coordinator.run_sign_v0( new_block, - &tip, + &self.burn_block, &self.burnchain, &sort_db, &mut chain_state, @@ -1206,9 +1190,7 @@ impl BlockMinerThread { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if cur_burn_chain_tip.consensus_hash != self.burn_block.consensus_hash - && cur_burn_chain_tip.sortition_id != self.burn_block.sortition_id - { + if cur_burn_chain_tip.consensus_hash != self.burn_block.consensus_hash { info!("Miner: Cancel block assembly; burnchain tip has changed"); self.globals.counters.bump_missed_tenures(); Err(NakamotoNodeError::BurnchainTipChanged) @@ -1247,9 +1229,7 @@ impl ParentStacksBlockInfo { let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash - && burn_chain_tip.sortition_id != check_burn_block.sortition_id - { + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { info!( "New canonical burn chain tip detected. Will not try to mine."; "new_consensus_hash" => %burn_chain_tip.consensus_hash, diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index cf0eaa6769..2694d1d9ca 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -255,9 +255,7 @@ impl SignCoordinator { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash - && cur_burn_chain_tip.sortition_id != burn_block.sortition_id - { + if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); true } else { diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index f192f88969..dcfa855c9b 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4324,9 +4324,7 @@ impl ParentStacksBlockInfo { let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash - && burn_chain_tip.sortition_id != check_burn_block.sortition_id - { + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { info!( "New canonical burn chain tip detected. Will not try to mine."; "new_consensus_hash" => %burn_chain_tip.consensus_hash, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d888d41c70..e063279f97 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5121,7 +5121,11 @@ fn continue_after_tenure_extend() { // It's possible that we have a pending block commit already. // Mine two BTC blocks to "flush" this commit. - + let burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; for i in 0..2 { info!( "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", @@ -5147,6 +5151,16 @@ fn continue_after_tenure_extend() { .expect("Timed out waiting for tenure extend block"); } + wait_for(30, || { + let new_burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; + Ok(new_burn_height == burn_height + 2) + }) + .expect("Timed out waiting for burnchain to advance"); + // The last block should have a single instruction in it, the tenure extend let blocks = test_observer::get_blocks(); let last_block = blocks.last().unwrap(); From 07a06c0ccddd9d8e8e844bbfa4d8b3cbba0efbdc Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 16 Oct 2024 12:20:22 +0100 Subject: [PATCH 1345/1400] Update stackslib/src/net/api/getinfo.rs Co-authored-by: Aaron Blankstein --- stackslib/src/net/api/getinfo.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index cae7190849..3ffc9b6908 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -235,7 +235,7 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { let coinbase_height = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), - &header.index_block_hash(), + &StacksBlockId::new(&network.stacks_tip.consensus_hash, &network.stacks_tip.block_hash), ) .map_err(|e| { StacksHttpResponse::new_error( From 6e4db07ebf5516f83fc28a1150ab0262c65ba4e7 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 16 Oct 2024 12:22:18 +0100 Subject: [PATCH 1346/1400] chore: reuse cached state also in `postmempoolquery.rs` --- stackslib/src/net/api/getinfo.rs | 14 ++++---------- stackslib/src/net/api/postmempoolquery.rs | 15 +++++++++------ 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 3ffc9b6908..5f08044242 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -224,18 +224,12 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { let rpc_peer_info: Result = node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { - let header = self - .get_stacks_chain_tip(&preamble, sortdb, chainstate) - .map_err(|e| { - StacksHttpResponse::new_error( - &preamble, - &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e)), - ) - })?; - let coinbase_height = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), - &StacksBlockId::new(&network.stacks_tip.consensus_hash, &network.stacks_tip.block_hash), + &StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash, + ), ) .map_err(|e| { StacksHttpResponse::new_error( diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 2155863220..c6a830569c 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -116,9 +116,9 @@ impl StacksMemPoolStream { Self { tx_query, - last_randomized_txid: last_randomized_txid, + last_randomized_txid, num_txs: 0, - max_txs: max_txs, + max_txs, coinbase_height, corked: false, finished: false, @@ -276,10 +276,13 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { let page_id = self.page_id.take(); let stream_res = node.with_node_state(|network, sortdb, chainstate, mempool, _rpc_args| { - let header = self.get_stacks_chain_tip(&preamble, sortdb, chainstate) - .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load chain tip: {:?}", &e))))?; - - let coinbase_height = NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &header.index_block_hash()) + let coinbase_height = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &StacksBlockId::new( + &network.stacks_tip.consensus_hash, + &network.stacks_tip.block_hash + ), + ) .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e))))? .unwrap_or(0); From f5e71b3da64022c312856f036ff7560563455197 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 16 Oct 2024 12:23:29 +0100 Subject: [PATCH 1347/1400] feat: make the deserializer more backwards compatible Co-authored-by: Aaron Blankstein --- stackslib/src/net/api/getinfo.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 5f08044242..846e5a18d2 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -82,6 +82,8 @@ pub struct RPCPeerInfoData { pub genesis_chainstate_hash: Sha256Sum, pub unanchored_tip: Option, pub unanchored_seq: Option, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub tenure_height: Option, pub exit_at_block_height: Option, pub is_fully_synced: bool, From 2497d07c5a2715b85e5bec589e82f0ef508f1466 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Wed, 16 Oct 2024 12:27:18 +0100 Subject: [PATCH 1348/1400] chore: formatting --- stackslib/src/net/api/postmempoolquery.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index c6a830569c..3710db7dc8 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -279,7 +279,7 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { let coinbase_height = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), &StacksBlockId::new( - &network.stacks_tip.consensus_hash, + &network.stacks_tip.consensus_hash, &network.stacks_tip.block_hash ), ) From 9c11fd6af24a66389d745ea14e21c067d60eabe0 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 16 Oct 2024 09:53:39 -0700 Subject: [PATCH 1349/1400] feat: better exposing of signer version --- stacks-signer/src/cli.rs | 27 ++------------------------ stacks-signer/src/lib.rs | 11 +++++++++++ stacks-signer/src/main.rs | 7 ++++++- stacks-signer/src/monitoring/server.rs | 2 ++ 4 files changed, 21 insertions(+), 26 deletions(-) diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 3b74635cbc..97829b6977 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -29,7 +29,6 @@ use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; -use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, @@ -39,31 +38,9 @@ use stacks_common::address::{ use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; -extern crate alloc; +use crate::VERSION_STRING; -const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); -const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); -#[cfg(debug_assertions)] -const BUILD_TYPE: &str = "debug"; -#[cfg(not(debug_assertions))] -const BUILD_TYPE: &str = "release"; - -lazy_static! { - static ref VERSION_STRING: String = { - let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); - let git_branch = GIT_BRANCH.unwrap_or(""); - let git_commit = GIT_COMMIT.unwrap_or(""); - format!( - "{} ({}:{}, {} build, {} [{}])", - pkg_version, - git_branch, - git_commit, - BUILD_TYPE, - std::env::consts::OS, - std::env::consts::ARCH - ) - }; -} +extern crate alloc; #[derive(Parser, Debug)] #[command(author, version, about)] diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 20c2bc2ca8..3555435eaa 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -48,8 +48,10 @@ mod tests; use std::fmt::{Debug, Display}; use std::sync::mpsc::{channel, Receiver, Sender}; +use blockstack_lib::version_string; use chainstate::SortitionsView; use config::GlobalConfig; +use lazy_static::lazy_static; use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait}; use runloop::SignerResult; use slog::{slog_info, slog_warn}; @@ -59,6 +61,14 @@ use crate::client::StacksClient; use crate::config::SignerConfig; use crate::runloop::RunLoop; +lazy_static! { + /// The version string for the signer + pub static ref VERSION_STRING: String = { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + version_string("stacks-signer", pkg_version) + }; +} + /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { /// Create a new `Signer` instance @@ -113,6 +123,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner /// Create a new spawned signer pub fn new(config: GlobalConfig) -> Self { let endpoint = config.endpoint; + info!("Stacks signer version {:?}", VERSION_STRING.as_str()); info!("Starting signer with config: {:?}", config); warn!( "Reminder: The signer is primarily designed for use with a local or subnet network stacks node. \ diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index bb680aae0b..56f322b185 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -47,6 +47,7 @@ use stacks_signer::config::GlobalConfig; use stacks_signer::monitor_signers::SignerMonitor; use stacks_signer::utils::stackerdb_session; use stacks_signer::v0::SpawnedSigner; +use stacks_signer::VERSION_STRING; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; @@ -157,7 +158,11 @@ fn handle_generate_stacking_signature( fn handle_check_config(args: RunSignerArgs) { let config = GlobalConfig::try_from(&args.config).unwrap(); - println!("Config: {}", config); + println!( + "Signer version: {}\nConfig: \n{}", + VERSION_STRING.to_string(), + config + ); } fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs index ffde008c9f..f5e3cceef1 100644 --- a/stacks-signer/src/monitoring/server.rs +++ b/stacks-signer/src/monitoring/server.rs @@ -28,6 +28,7 @@ use crate::client::{ClientError, StacksClient}; use crate::config::{GlobalConfig, Network}; use crate::monitoring::prometheus::gather_metrics_string; use crate::monitoring::{update_signer_nonce, update_stacks_tip_height}; +use crate::VERSION_STRING; #[derive(thiserror::Error, Debug)] /// Monitoring server errors @@ -215,6 +216,7 @@ impl MonitoringServer { "signerPublicKey": to_hex(&self.public_key.to_bytes_compressed()), "network": self.network.to_string(), "stxAddress": self.stacks_client.get_signer_address().to_string(), + "version": VERSION_STRING.to_string(), })) .expect("Failed to serialize JSON") } From d7f0ba2404313eae7eb5f0782244dad93b2614e4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 12:27:52 -0500 Subject: [PATCH 1350/1400] feat: interpret block heights as tenure heights in Clarity 2 contracts executing post-3.0 --- clarity/src/vm/database/clarity_db.rs | 31 ++ clarity/src/vm/docs/mod.rs | 8 + clarity/src/vm/functions/database.rs | 18 ++ clarity/src/vm/test_util/mod.rs | 8 + stackslib/src/chainstate/nakamoto/mod.rs | 2 +- .../chainstate/stacks/boot/contract_tests.rs | 8 + stackslib/src/clarity_cli.rs | 8 + stackslib/src/clarity_vm/database/mod.rs | 110 +++++++ stackslib/src/cli.rs | 285 +++++++++++++++++- 9 files changed, 475 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index cdf411fc3e..4f64bc08bb 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -127,6 +127,11 @@ pub trait HeadersDB { id_bhh: &StacksBlockId, epoch: &StacksEpochId, ) -> Option; + fn get_stacks_height_for_tenure_height( + &self, + tip: &StacksBlockId, + tenure_height: u32, + ) -> Option; } pub trait BurnStateDB { @@ -285,6 +290,13 @@ impl HeadersDB for NullHeadersDB { ) -> Option { None } + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + _tenure_height: u32, + ) -> Option { + None + } } #[allow(clippy::panic)] @@ -915,6 +927,25 @@ impl<'a> ClarityDatabase<'a> { } } + pub fn get_block_height_for_tenure_height( + &mut self, + tenure_height: u32, + ) -> Result> { + let current_tenure_height = self.get_tenure_height()?; + if current_tenure_height < tenure_height { + return Ok(None); + } + if current_tenure_height == tenure_height { + return Ok(Some(self.get_current_block_height())); + } + let current_height = self.get_current_block_height(); + // query from the parent + let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; + Ok(self + .headers_db + .get_stacks_height_for_tenure_height(&query_tip, tenure_height.into())) + } + /// Get the last-known burnchain block height. /// Note that this is _not_ the burnchain height in which this block was mined! /// This is the burnchain block height of the parent of the Stacks block at the current Stacks diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 65b08e3102..d718ff5366 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2863,6 +2863,14 @@ mod test { ) -> Option { Some(12000) } + + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + Some(tenure_height) + } } struct DocBurnStateDB {} diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index f048a59536..eecb5e2ba0 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -769,6 +769,24 @@ pub fn special_get_block_info( _ => return Ok(Value::none()), }; + let height_value = if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity3 { + if env.global_context.epoch_id < StacksEpochId::Epoch30 { + height_value + } else { + // interpretting height_value as a tenure height + let height_opt = env + .global_context + .database + .get_block_height_for_tenure_height(height_value)?; + match height_opt { + Some(x) => x, + None => return Ok(Value::none()), + } + } + } else { + height_value + }; + let current_block_height = env.global_context.database.get_current_block_height(); if height_value >= current_block_height { return Ok(Value::none()); diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 2df79766a2..f2b6d4dd09 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -229,6 +229,14 @@ impl HeadersDB for UnitTestHeaderDB { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 3000) } + + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + Some(tenure_height) + } } impl BurnStateDB for UnitTestBurnStateDB { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e000c9c582..8abbe058f5 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -3886,7 +3886,7 @@ impl NakamotoChainState { /// Append a Nakamoto Stacks block to the Stacks chain state. /// NOTE: This does _not_ set the block as processed! The caller must do this. - fn append_block<'a>( + pub(crate) fn append_block<'a>( chainstate_tx: &mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, burn_dbconn: &mut SortitionHandleConn, diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 04b74ba2e9..4d4e875ba3 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -649,6 +649,14 @@ impl HeadersDB for TestSimHeadersDB { // if the block is defined at all, then return a constant self.get_burn_block_height_for_block(id_bhh).map(|_| 3000) } + + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + Some(tenure_height) + } } #[test] diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 21cf55dea6..f23be191ff 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -766,6 +766,14 @@ impl HeadersDB for CLIHeadersDB { // if the block is defined at all, then return a constant get_cli_block_height(&self.conn(), id_bhh).map(|_| 3000) } + + fn get_stacks_height_for_tenure_height( + &self, + _tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + Some(tenure_height) + } } fn get_eval_input(invoked_by: &str, args: &[String]) -> EvalInput { diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 81f0bac43c..b12c4470ef 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -47,7 +47,22 @@ pub trait GetTenureStartId { tip: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, DBError>; + fn get_tenure_ch_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError>; fn conn(&self) -> &Connection; + fn get_tenure_block_id_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + let Some(tenure_ch) = self.get_tenure_ch_at_cb_height(tip, coinbase_height)? else { + return Ok(None); + }; + self.get_tenure_block_id(tip, &tenure_ch) + } } impl GetTenureStartId for StacksDBConn<'_> { @@ -66,6 +81,21 @@ impl GetTenureStartId for StacksDBConn<'_> { .map(|block_id| TenureBlockId::from(block_id))) } + fn get_tenure_ch_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + let opt_out = self + .get_indexed( + tip, + &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), + )? + .map(|hex_inp| nakamoto_keys::parse_consensus_hash(&hex_inp)) + .flatten(); + Ok(opt_out) + } + fn conn(&self) -> &Connection { self.sqlite() } @@ -87,6 +117,21 @@ impl GetTenureStartId for StacksDBTx<'_> { .map(|block_id| TenureBlockId::from(block_id))) } + fn get_tenure_ch_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + let opt_out = self + .get_indexed_ref( + tip, + &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), + )? + .map(|hex_inp| nakamoto_keys::parse_consensus_hash(&hex_inp)) + .flatten(); + Ok(opt_out) + } + fn conn(&self) -> &Connection { self.sqlite() } @@ -105,6 +150,15 @@ impl GetTenureStartId for MARF { fn conn(&self) -> &Connection { self.sqlite_conn() } + + fn get_tenure_ch_at_cb_height( + &self, + tip: &StacksBlockId, + coinbase_height: u64, + ) -> Result, DBError> { + let dbconn = StacksDBConn::new(self, ()); + dbconn.get_tenure_ch_at_cb_height(tip, coinbase_height) + } } pub struct HeadersDBConn<'a>(pub StacksDBConn<'a>); @@ -188,6 +242,22 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { }) } + fn get_stacks_height_for_tenure_height( + &self, + tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + let tenure_block_id = + GetTenureStartId::get_tenure_block_id_at_cb_height(&self.0, tip, tenure_height.into()) + .expect("FATAL: bad DB data for tenure height lookups")?; + get_stacks_header_column(self.0.conn(), &tenure_block_id.0, "block_height", |r| { + u64::from_row(r) + .expect("FATAL: malformed block_height") + .try_into() + .expect("FATAL: blockchain too long") + }) + } + fn get_vrf_seed_for_block( &self, id_bhh: &StacksBlockId, @@ -417,6 +487,25 @@ impl<'a> HeadersDB for ChainstateTx<'a> { let tenure_id_bhh = get_first_block_in_tenure(self.deref(), id_bhh, Some(epoch)); get_matured_reward(self.deref(), &tenure_id_bhh, epoch).map(|x| x.total().into()) } + + fn get_stacks_height_for_tenure_height( + &self, + tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + let tenure_block_id = GetTenureStartId::get_tenure_block_id_at_cb_height( + self.deref(), + tip, + tenure_height.into(), + ) + .expect("FATAL: bad DB data for tenure height lookups")?; + get_stacks_header_column(self.deref(), &tenure_block_id.0, "block_height", |r| { + u64::from_row(r) + .expect("FATAL: malformed block_height") + .try_into() + .expect("FATAL: blockchain too long") + }) + } } impl HeadersDB for MARF { @@ -572,6 +661,27 @@ impl HeadersDB for MARF { let tenure_id_bhh = get_first_block_in_tenure(self, id_bhh, Some(epoch)); get_matured_reward(self, &tenure_id_bhh, epoch).map(|x| x.total().into()) } + + fn get_stacks_height_for_tenure_height( + &self, + tip: &StacksBlockId, + tenure_height: u32, + ) -> Option { + let tenure_block_id = + GetTenureStartId::get_tenure_block_id_at_cb_height(self, tip, tenure_height.into()) + .expect("FATAL: bad DB data for tenure height lookups")?; + get_stacks_header_column( + self.sqlite_conn(), + &tenure_block_id.0, + "block_height", + |r| { + u64::from_row(r) + .expect("FATAL: malformed block_height") + .try_into() + .expect("FATAL: blockchain too long") + }, + ) + } } /// Select a specific column from the headers table, specifying whether to use diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 493ab18de5..9ff6e55644 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -22,6 +22,7 @@ use std::time::Instant; use std::{env, fs, io, process, thread}; use clarity::types::chainstate::SortitionId; +use db::blocks::DummyEventDispatcher; use db::ChainstateTx; use regex::Regex; use rusqlite::{Connection, OpenFlags}; @@ -30,12 +31,16 @@ use stacks_common::types::sqlite::NO_PARAMS; use crate::burnchains::db::BurnchainDB; use crate::burnchains::PoxConstants; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleContext}; +use crate::chainstate::burn::db::sortdb::{ + get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleContext, +}; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::StagingBlock; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::miner::*; -use crate::chainstate::stacks::*; +use crate::chainstate::stacks::{Error as ChainstateError, *}; use crate::clarity_vm::clarity::ClarityInstance; use crate::core::*; use crate::util_lib::db::IndexDBTx; @@ -519,3 +524,279 @@ fn replay_block( } }; } + +fn replay_block_nakamoto( + sort_db: &mut SortitionDB, + stacks_chain_state: &mut StacksChainState, + mut chainstate_tx: ChainstateTx, + clarity_instance: &mut ClarityInstance, + block: &NakamotoBlock, + block_size: u64, +) -> Result<(), ChainstateError> { + // find corresponding snapshot + let next_ready_block_snapshot = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &block.header.consensus_hash)? + .unwrap_or_else(|| { + panic!( + "CORRUPTION: staging Nakamoto block {}/{} does not correspond to a burn block", + &block.header.consensus_hash, + &block.header.block_hash() + ) + }); + + debug!("Process staging Nakamoto block"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash + ); + + let elected_height = sort_db + .get_consensus_hash_height(&block.header.consensus_hash)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + let elected_in_cycle = sort_db + .pox_constants + .block_height_to_reward_cycle(sort_db.first_block_height, elected_height) + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock( + "Elected in block height before first_block_height".into(), + ) + })?; + let active_reward_set = OnChainRewardSetProvider::(None) + .read_reward_set_nakamoto_of_cycle( + elected_in_cycle, + stacks_chain_state, + sort_db, + &block.header.parent_block_id, + true, + ) + .map_err(|e| { + warn!( + "Cannot process Nakamoto block: could not load reward set that elected the block"; + "err" => ?e, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id, + ); + ChainstateError::NoSuchBlockError + })?; + let (mut chainstate_tx, clarity_instance) = stacks_chain_state.chainstate_tx_begin()?; + + // find parent header + let Some(parent_header_info) = + NakamotoChainState::get_block_header(&chainstate_tx.tx, &block.header.parent_block_id)? + else { + // no parent; cannot process yet + info!("Cannot process Nakamoto block: missing parent header"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + return Ok(()); + }; + + // sanity check -- must attach to parent + let parent_block_id = StacksBlockId::new( + &parent_header_info.consensus_hash, + &parent_header_info.anchored_header.block_hash(), + ); + if parent_block_id != block.header.parent_block_id { + drop(chainstate_tx); + + let msg = "Discontinuous Nakamoto Stacks block"; + warn!("{}", &msg; + "child parent_block_id" => %block.header.parent_block_id, + "expected parent_block_id" => %parent_block_id, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id() + ); + return Err(ChainstateError::InvalidStacksBlock(msg.into())); + } + + // set the sortition handle's pointer to the block's burnchain view. + // this is either: + // (1) set by the tenure change tx if one exists + // (2) the same as parent block id + + let burnchain_view = if let Some(tenure_change) = block.get_tenure_tx_payload() { + if let Some(ref parent_burn_view) = parent_header_info.burn_view { + // check that the tenure_change's burn view descends from the parent + let parent_burn_view_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + parent_burn_view, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock( + "Failed to load burn view of parent block ID".into(), + ) + })?; + let handle = sort_db.index_handle_at_ch(&tenure_change.burn_view_consensus_hash)?; + let connected_sort_id = get_ancestor_sort_id( + &handle, + parent_burn_view_sn.block_height, + &handle.context.chain_tip, + )? + .ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: could not find parent block's burnchain view"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock( + "Failed to load burn view of parent block ID".into(), + ) + })?; + if connected_sort_id != parent_burn_view_sn.sortition_id { + warn!( + "Cannot process Nakamoto block: parent block's burnchain view does not connect to own burn view"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Does not connect to burn view of parent block ID".into(), + )); + } + } + tenure_change.burn_view_consensus_hash + } else { + parent_header_info.burn_view.clone().ok_or_else(|| { + warn!( + "Cannot process Nakamoto block: parent block does not have a burnchain view and current block has no tenure tx"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + ChainstateError::InvalidStacksBlock("Failed to load burn view of parent block ID".into()) + })? + }; + let Some(burnchain_view_sn) = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &burnchain_view)? + else { + // This should be checked already during block acceptance and parent block processing + // - The check for expected burns returns `NoSuchBlockError` if the burnchain view + // could not be found for a block with a tenure tx. + // We error here anyways, but the check during block acceptance makes sure that the staging + // db doesn't get into a situation where it continuously tries to retry such a block (because + // such a block shouldn't land in the staging db). + warn!( + "Cannot process Nakamoto block: failed to find Sortition ID associated with burnchain view"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "burn_view_consensus_hash" => %burnchain_view, + ); + return Ok(()); + }; + + // find commit and sortition burns if this is a tenure-start block + let Ok(new_tenure) = block.is_wellformed_tenure_start_block() else { + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid tenure change tx(s)".into(), + )); + }; + + let (commit_burn, sortition_burn) = if new_tenure { + // find block-commit to get commit-burn + let block_commit = SortitionDB::get_block_commit( + sort_db.conn(), + &next_ready_block_snapshot.winning_block_txid, + &next_ready_block_snapshot.sortition_id, + )? + .expect("FATAL: no block-commit for tenure-start block"); + + let sort_burn = + SortitionDB::get_block_burn_amount(sort_db.conn(), &next_ready_block_snapshot)?; + (block_commit.burn_fee, sort_burn) + } else { + (0, 0) + }; + + // attach the block to the chain state and calculate the next chain tip. + let pox_constants = sort_db.pox_constants.clone(); + + // NOTE: because block status is updated in a separate transaction, we need `chainstate_tx` + // and `clarity_instance` to go out of scope before we can issue the it (since we need a + // mutable reference to `stacks_chain_state` to start it). This means ensuring that, in the + // `Ok(..)` case, the `clarity_commit` gets dropped beforehand. In order to do this, we first + // run `::append_block()` here, and capture both the Ok(..) and Err(..) results as + // Option<..>'s. Then, if we errored, we can explicitly drop the `Ok(..)` option (even + // though it will always be None), which gets the borrow-checker to believe that it's safe + // to access `stacks_chain_state` again. In the `Ok(..)` case, it's instead sufficient so + // simply commit the block before beginning the second transaction to mark it processed. + + let mut burn_view_handle = sort_db.index_handle(&burnchain_view_sn.sortition_id); + let (ok_opt, err_opt) = match NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut burn_view_handle, + &burnchain_view, + &pox_constants, + &parent_header_info, + &next_ready_block_snapshot.burn_header_hash, + next_ready_block_snapshot + .block_height + .try_into() + .expect("Failed to downcast u64 to u32"), + next_ready_block_snapshot.burn_header_timestamp, + &block, + block_size, + commit_burn, + sortition_burn, + &active_reward_set, + ) { + Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), + Err(e) => (None, Some(e)), + }; + + if let Some(e) = err_opt { + // force rollback + drop(ok_opt); + drop(chainstate_tx); + + warn!( + "Failed to append {}/{}: {:?}", + &block.header.consensus_hash, + &block.header.block_hash(), + &e; + "stacks_block_id" => %block.header.block_id() + ); + + // as a separate transaction, mark this block as processed and orphaned. + // This is done separately so that the staging blocks DB, which receives writes + // from the network to store blocks, will be available for writes while a block is + // being processed. Therefore, it's *very important* that block-processing happens + // within the same, single thread. Also, it's *very important* that this update + // succeeds, since *we have already processed* the block. + return Err(e); + }; + + let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + + assert_eq!( + receipt.header.anchored_header.block_hash(), + block.header.block_hash() + ); + assert_eq!(receipt.header.consensus_hash, block.header.consensus_hash); + + info!( + "Advanced to new tip! {}/{}", + &receipt.header.consensus_hash, + &receipt.header.anchored_header.block_hash() + ); + Ok(()) +} From 9abf6f1ea0ef1b47d28cf3c99c241d3f955140df Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 16 Oct 2024 14:47:41 -0400 Subject: [PATCH 1351/1400] test: update `check_block_times` integration test --- .../src/tests/nakamoto_integrations.rs | 533 ++++++++++-------- 1 file changed, 289 insertions(+), 244 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 93f4ac4106..094b547824 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7220,6 +7220,155 @@ fn continue_tenure_extend() { run_loop_thread.join().unwrap(); } +fn get_block_times( + naka_conf: &Config, + sender_addr: &StacksAddress, + block_height: u128, + tenure_height: u128, +) -> (u128, u128, u128, u128, u128, u128, u128) { + let contract0_name = "test-contract-0"; + let contract1_name = "test-contract-1"; + let contract3_name = "test-contract-3"; + + info!("Getting block times at block {block_height}, {tenure_height}..."); + + let time0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-time", + vec![&clarity::vm::Value::UInt(block_height)], + ); + let time0 = time0_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time_now0_value = call_read_only( + &naka_conf, + &sender_addr, + contract0_name, + "get-last-time", + vec![], + ); + let time0_now = time_now0_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time1_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-time", + vec![&clarity::vm::Value::UInt(block_height)], + ); + let time1 = time1_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time1_now_value = call_read_only( + &naka_conf, + &sender_addr, + contract1_name, + "get-last-time", + vec![], + ); + let time1_now = time1_now_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time3_tenure_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-time", + vec![&clarity::vm::Value::UInt(tenure_height)], + ); + let time3_tenure = time3_tenure_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time3_block_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-block-time", + vec![&clarity::vm::Value::UInt(block_height)], + ); + let time3_block = time3_block_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + let time3_now_tenure_value = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-last-tenure-time", + vec![], + ); + let time3_now_tenure = time3_now_tenure_value + .expect_optional() + .unwrap() + .unwrap() + .expect_u128() + .unwrap(); + + info!("Reported times:"; + "time0" => time0, + "time0_now" => time0_now, + "time1" => time1, + "time1_now" => time1_now, + "time3_block" => time3_block, + "time3_tenure" => time3_tenure, + "time3_now_tenure" => time3_now_tenure + ); + + assert_eq!( + time0, time1, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + assert_eq!( + time0_now, time1_now, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + assert_eq!( + time0, time3_tenure, + "Tenure time should match Clarity 2 block time" + ); + assert_eq!(time0_now, time1_now, "Time should match across contracts"); + assert_eq!( + time0_now, time3_now_tenure, + "Clarity 3 tenure time should match Clarity 2 block time" + ); + + ( + time0, + time0_now, + time1, + time1_now, + time3_tenure, + time3_block, + time3_now_tenure, + ) +} + #[test] #[ignore] /// Verify the timestamps using `get-block-info?`, `get-stacks-block-info?`, and `get-tenure-info?`. @@ -7267,7 +7416,6 @@ fn check_block_times() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); @@ -7284,8 +7432,10 @@ fn check_block_times() { // Deploy this version with the Clarity 1 / 2 before epoch 3 let contract0_name = "test-contract-0"; - let contract_clarity1 = - "(define-read-only (get-time (height uint)) (get-block-info? time height))"; + let contract_clarity1 = r#" + (define-read-only (get-time (height uint)) (get-block-info? time height)) + (define-read-only (get-last-time) (get-block-info? time (- block-height u1))) + "#; let contract_tx0 = make_contract_publish( &sender_sk, @@ -7312,6 +7462,19 @@ fn check_block_times() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); + let epochs = naka_conf.burnchain.epochs.clone().unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_start = epoch_3.start_height; + let mut last_stacks_block_height = 0; + let mut last_tenure_height = 0; + next_block_and(&mut btc_regtest_controller, 60, || { + let info = get_chain_info_result(&naka_conf).unwrap(); + last_stacks_block_height = info.stacks_tip_height as u128; + last_tenure_height = last_stacks_block_height; + Ok(info.burn_block_height == epoch_3_start) + }) + .unwrap(); + let time0_value = call_read_only( &naka_conf, &sender_addr, @@ -7327,8 +7490,6 @@ fn check_block_times() { .unwrap(); info!("Time from pre-epoch 3.0: {}", time0); - wait_for_first_naka_block_commit(60, &commits_submitted); - // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; let contract_tx1 = make_contract_publish_versioned( @@ -7345,9 +7506,11 @@ fn check_block_times() { // This version uses the Clarity 3 functions let contract3_name = "test-contract-3"; - let contract_clarity3 = - "(define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) - (define-read-only (get-tenure-time (height uint)) (get-tenure-info? time height))"; + let contract_clarity3 = r#" + (define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) + (define-read-only (get-tenure-time (height uint)) (get-tenure-info? time height)) + (define-read-only (get-last-tenure-time) (get-tenure-info? time (- tenure-height u1))) + "#; let contract_tx3 = make_contract_publish( &sender_sk, @@ -7360,258 +7523,140 @@ fn check_block_times() { submit_tx(&http_origin, &contract_tx3); sender_nonce += 1; - // sleep to ensure seconds have changed - thread::sleep(Duration::from_secs(3)); - - next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) - .unwrap(); - - // make sure that the contracts are published - wait_for(30, || { + let mut stacks_block_height = 0; + wait_for(60, || { let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; - Ok(cur_sender_nonce >= sender_nonce) + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) }) .expect("Timed out waiting for contracts to publish"); + last_stacks_block_height = stacks_block_height; - let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info.stacks_tip_height); - let last_stacks_block_height = info.stacks_tip_height as u128; - let last_tenure_height = last_stacks_block_height as u128; - - let time0_value = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let time0 = time0_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - - let time1_value = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let time1 = time1_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0, time1, - "Time from pre- and post-epoch 3.0 contracts should match" - ); - - let time3_tenure_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-time", - vec![&clarity::vm::Value::UInt(last_tenure_height - 2)], - ); - let time3_tenure = time3_tenure_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0, time3_tenure, - "Tenure time should match Clarity 2 block time" - ); - - let time3_block_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let time3_block = time3_block_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() + // Repeat these tests for 5 tenures + for _ in 0..5 { + next_block_and(&mut btc_regtest_controller, 60, || { + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + Ok(stacks_block_height > last_stacks_block_height) + }) .unwrap(); + last_stacks_block_height = stacks_block_height; + last_tenure_height += 1; + info!("New tenure {last_tenure_height}, Stacks height: {last_stacks_block_height}"); - // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(2)); - - // Mine a Nakamoto block - info!("Mining Nakamoto block"); - - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - naka_conf.burnchain.chain_id, - &recipient, - send_amt, - ); - sender_nonce += 1; - submit_tx(&http_origin, &transfer_tx); + let (time0, _time0_now, _time1, _time1_now, _time3_tenure, time3_block, _time3_now_tenure) = + get_block_times( + &naka_conf, + &sender_addr, + last_stacks_block_height - 1, + last_tenure_height - 1, + ); - // make sure that the contracts are published - wait_for(30, || { - let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; - Ok(cur_sender_nonce >= sender_nonce) - }) - .expect("Timed out waiting for transfer to complete"); + // Mine a Nakamoto block + info!("Mining Nakamoto block"); - let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info.stacks_tip_height); - let last_stacks_block_height = info.stacks_tip_height as u128; - - let time0a_value = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let time0a = time0a_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert!( - time0a - time0 >= 1, - "get-block-info? time should have changed. time_0 = {time0}. time_0_a = {time0a}" - ); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); - let time1a_value = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let time1a = time1a_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0a, time1a, - "Time from pre- and post-epoch 3.0 contracts should match" - ); + // wait for the block to be mined + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) + }) + .expect("Timed out waiting for block"); + last_stacks_block_height = stacks_block_height; + + info!("New Stacks block {last_stacks_block_height} in tenure {last_tenure_height}"); + + let ( + time0a, + _time0a_now, + _time1a, + _time1a_now, + _time3a_tenure, + time3a_block, + _time3a_now_tenure, + ) = get_block_times( + &naka_conf, + &sender_addr, + last_stacks_block_height - 1, + last_tenure_height - 1, + ); - let time3a_block_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let time3a_block = time3a_block_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert!( - time3a_block - time3_block >= 1, - "get-stacks-block-info? time should have changed" - ); + assert!( + time0a - time0 >= 1, + "get-block-info? time should have changed. time_0 = {time0}. time_0_a = {time0a}" + ); + assert!( + time3a_block - time3_block >= 1, + "get-stacks-block-info? time should have changed" + ); - // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(1)); + // Mine a Nakamoto block + info!("Mining Nakamoto block"); - // Mine a Nakamoto block - info!("Mining Nakamoto block"); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - naka_conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); + // wait for the block to be mined + wait_for(30, || { + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) + }) + .expect("Timed out waiting for block"); + last_stacks_block_height = stacks_block_height; + + let ( + time0b, + _time0b_now, + time1b, + _time1b_now, + _time3b_tenure, + time3b_block, + _time3b_now_tenure, + ) = get_block_times( + &naka_conf, + &sender_addr, + last_stacks_block_height - 1, + last_tenure_height - 1, + ); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); + assert_eq!( + time0a, time0b, + "get-block-info? time should not have changed" + ); + assert_eq!( + time0b, time1b, + "Time from pre- and post-epoch 3.0 contracts should match" + ); + assert!( + time3b_block - time3a_block >= 1, + "get-stacks-block-info? time should have changed" + ); } - let time0b_value = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height)], - ); - let time0b = time0b_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0a, time0b, - "get-block-info? time should not have changed" - ); - - let time1b_value = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height)], - ); - let time1b = time1b_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - assert_eq!( - time0b, time1b, - "Time from pre- and post-epoch 3.0 contracts should match" - ); - - let time3b_block_value = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-time", - vec![&clarity::vm::Value::UInt(last_stacks_block_height)], - ); - let time3b_block = time3b_block_value - .expect_optional() - .unwrap() - .unwrap() - .expect_u128() - .unwrap(); - - assert!( - time3b_block - time3a_block >= 1, - "get-stacks-block-info? time should have changed" - ); - coord_channel .lock() .expect("Mutex poisoned") From 249136c0133b6f20c64b5b1a0a5c798b959886a6 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 16 Oct 2024 15:01:04 -0400 Subject: [PATCH 1352/1400] fix: use tenure height for Clarity 2 contracts --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 094b547824..50385ead8b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7230,14 +7230,14 @@ fn get_block_times( let contract1_name = "test-contract-1"; let contract3_name = "test-contract-3"; - info!("Getting block times at block {block_height}, {tenure_height}..."); + info!("Getting block times at block {block_height}, tenure {tenure_height}..."); let time0_value = call_read_only( &naka_conf, &sender_addr, contract0_name, "get-time", - vec![&clarity::vm::Value::UInt(block_height)], + vec![&clarity::vm::Value::UInt(tenure_height)], ); let time0 = time0_value .expect_optional() @@ -7265,7 +7265,7 @@ fn get_block_times( &sender_addr, contract1_name, "get-time", - vec![&clarity::vm::Value::UInt(block_height)], + vec![&clarity::vm::Value::UInt(tenure_height)], ); let time1 = time1_value .expect_optional() From 5f3197c8da91807232e151b08b6538882a48a18c Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 14:53:08 -0500 Subject: [PATCH 1353/1400] use tenure-height as block-height for 2.x blocks, fix interpretation of marf key --- clarity/src/vm/database/clarity_db.rs | 10 +- stackslib/src/chainstate/nakamoto/keys.rs | 2 +- stackslib/src/clarity_vm/database/mod.rs | 53 +++---- .../src/tests/nakamoto_integrations.rs | 129 ++++++++---------- 4 files changed, 86 insertions(+), 108 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 4f64bc08bb..3a37edf75c 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -935,10 +935,14 @@ impl<'a> ClarityDatabase<'a> { if current_tenure_height < tenure_height { return Ok(None); } - if current_tenure_height == tenure_height { - return Ok(Some(self.get_current_block_height())); - } let current_height = self.get_current_block_height(); + // check if we're querying a 2.x block + let id_bhh = self.get_index_block_header_hash(tenure_height)?; + let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; + if !epoch.uses_nakamoto_blocks() { + return Ok(Some(tenure_height)); + } + // query from the parent let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; Ok(self diff --git a/stackslib/src/chainstate/nakamoto/keys.rs b/stackslib/src/chainstate/nakamoto/keys.rs index 2944c70aff..bf33c4448e 100644 --- a/stackslib/src/chainstate/nakamoto/keys.rs +++ b/stackslib/src/chainstate/nakamoto/keys.rs @@ -23,7 +23,7 @@ pub fn ongoing_tenure_id() -> &'static str { "nakamoto::tenures::ongoing_tenure_id" } -/// MARF key to map the coinbase height of a tenure to its consensus hash +/// MARF key to map the coinbase height of a tenure to its first block ID pub fn ongoing_tenure_coinbase_height(coinbase_height: u64) -> String { format!( "nakamoto::tenures::ongoing_tenure_coinbase_height::{}", diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index b12c4470ef..e26f9be6ba 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -47,22 +47,12 @@ pub trait GetTenureStartId { tip: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, DBError>; - fn get_tenure_ch_at_cb_height( - &self, - tip: &StacksBlockId, - coinbase_height: u64, - ) -> Result, DBError>; - fn conn(&self) -> &Connection; fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, coinbase_height: u64, - ) -> Result, DBError> { - let Some(tenure_ch) = self.get_tenure_ch_at_cb_height(tip, coinbase_height)? else { - return Ok(None); - }; - self.get_tenure_block_id(tip, &tenure_ch) - } + ) -> Result, DBError>; + fn conn(&self) -> &Connection; } impl GetTenureStartId for StacksDBConn<'_> { @@ -81,17 +71,17 @@ impl GetTenureStartId for StacksDBConn<'_> { .map(|block_id| TenureBlockId::from(block_id))) } - fn get_tenure_ch_at_cb_height( + fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, coinbase_height: u64, - ) -> Result, DBError> { + ) -> Result, DBError> { let opt_out = self .get_indexed( tip, &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), )? - .map(|hex_inp| nakamoto_keys::parse_consensus_hash(&hex_inp)) + .map(|hex_inp| nakamoto_keys::parse_block_id(&hex_inp)) .flatten(); Ok(opt_out) } @@ -117,17 +107,17 @@ impl GetTenureStartId for StacksDBTx<'_> { .map(|block_id| TenureBlockId::from(block_id))) } - fn get_tenure_ch_at_cb_height( + fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, coinbase_height: u64, - ) -> Result, DBError> { + ) -> Result, DBError> { let opt_out = self .get_indexed_ref( tip, &nakamoto_keys::ongoing_tenure_coinbase_height(coinbase_height), )? - .map(|hex_inp| nakamoto_keys::parse_consensus_hash(&hex_inp)) + .map(|hex_inp| nakamoto_keys::parse_block_id(&hex_inp)) .flatten(); Ok(opt_out) } @@ -151,13 +141,13 @@ impl GetTenureStartId for MARF { self.sqlite_conn() } - fn get_tenure_ch_at_cb_height( + fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, coinbase_height: u64, - ) -> Result, DBError> { + ) -> Result, DBError> { let dbconn = StacksDBConn::new(self, ()); - dbconn.get_tenure_ch_at_cb_height(tip, coinbase_height) + dbconn.get_tenure_block_id_at_cb_height(tip, coinbase_height) } } @@ -250,7 +240,7 @@ impl<'a> HeadersDB for HeadersDBConn<'a> { let tenure_block_id = GetTenureStartId::get_tenure_block_id_at_cb_height(&self.0, tip, tenure_height.into()) .expect("FATAL: bad DB data for tenure height lookups")?; - get_stacks_header_column(self.0.conn(), &tenure_block_id.0, "block_height", |r| { + get_stacks_header_column(self.0.conn(), &tenure_block_id, "block_height", |r| { u64::from_row(r) .expect("FATAL: malformed block_height") .try_into() @@ -499,7 +489,7 @@ impl<'a> HeadersDB for ChainstateTx<'a> { tenure_height.into(), ) .expect("FATAL: bad DB data for tenure height lookups")?; - get_stacks_header_column(self.deref(), &tenure_block_id.0, "block_height", |r| { + get_stacks_header_column(self.deref(), &tenure_block_id, "block_height", |r| { u64::from_row(r) .expect("FATAL: malformed block_height") .try_into() @@ -670,17 +660,12 @@ impl HeadersDB for MARF { let tenure_block_id = GetTenureStartId::get_tenure_block_id_at_cb_height(self, tip, tenure_height.into()) .expect("FATAL: bad DB data for tenure height lookups")?; - get_stacks_header_column( - self.sqlite_conn(), - &tenure_block_id.0, - "block_height", - |r| { - u64::from_row(r) - .expect("FATAL: malformed block_height") - .try_into() - .expect("FATAL: blockchain too long") - }, - ) + get_stacks_header_column(self.sqlite_conn(), &tenure_block_id, "block_height", |r| { + u64::from_row(r) + .expect("FATAL: malformed block_height") + .try_into() + .expect("FATAL: blockchain too long") + }) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 50385ead8b..c81946cf66 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -70,6 +70,7 @@ use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; +use stacks::types::chainstate::StacksBlockId; use stacks::util::hash::hex_bytes; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -7671,6 +7672,8 @@ fn assert_block_info( miner: &Value, miner_spend: &clarity::vm::Value, ) { + info!("block info tuple data: {tuple0:#?}"); + assert!(tuple0 .get("burnchain-header-hash") .unwrap() @@ -7816,7 +7819,7 @@ fn check_block_info() { // Deploy this version with the Clarity 1 / 2 before epoch 3 let contract0_name = "test-contract-0"; - let contract_clarity1 = "(define-read-only (get-info (height uint)) + let contract_clarity1 = "(define-read-only (get-block-info (height uint)) { burnchain-header-hash: (get-block-info? burnchain-header-hash height), id-header-hash: (get-block-info? id-header-hash height), @@ -7859,7 +7862,7 @@ fn check_block_info() { &naka_conf, &sender_addr, contract0_name, - "get-info", + "get-block-info", vec![&clarity::vm::Value::UInt(1)], ); let tuple0 = result0.expect_tuple().unwrap().data_map; @@ -7929,25 +7932,36 @@ fn check_block_info() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let last_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); + + let get_block_info = |contract_name: &str, query_height: u128| { + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(query_height)], + ); + result.expect_tuple().unwrap().data_map + }; + + let tuple0 = get_block_info(contract0_name, last_tenure_height - 1); assert_block_info(&tuple0, &miner, &miner_spend); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_tenure_height - 1); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -7981,14 +7995,8 @@ fn check_block_info() { tuple0.get("miner-spend-winner") ); - let result3_block = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let tuple3_block1 = result3_block.expect_tuple().unwrap().data_map; + // this will point to the last block in the prior tenure (which should have been a 2.x block) + let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); assert_eq!( tuple3_block1.get("id-header-hash"), tuple0.get("id-header-hash") @@ -8038,25 +8046,17 @@ fn check_block_info() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let last_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let tuple0 = get_block_info(contract0_name, last_tenure_height); assert_block_info(&tuple0, &miner, &miner_spend); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_tenure_height); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -8102,11 +8102,15 @@ fn check_block_info() { let tuple3_block2 = result3_block.expect_tuple().unwrap().data_map; // There should have been a block change, so these should be different. assert_ne!(tuple3_block1, tuple3_block2); + + // tuple 0 fetches the id-header-hash for the first block of the tenure (block1) + + let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); assert_eq!( - tuple3_block2.get("id-header-hash"), + tuple3_block1.get("id-header-hash"), tuple0.get("id-header-hash") ); - assert_eq!(tuple3_block2.get("header-hash"), tuple0.get("header-hash")); + assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); assert!(tuple3_block2 .get("time") .unwrap() @@ -8150,25 +8154,17 @@ fn check_block_info() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let last_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let tuple0 = get_block_info(contract0_name, last_tenure_height); assert_block_info(&tuple0, &miner, &miner_spend); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_tenure_height); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -8181,21 +8177,14 @@ fn check_block_info() { let tuple3_tenure1a = result3_tenure.expect_tuple().unwrap().data_map; assert_eq!(tuple3_tenure1, tuple3_tenure1a); - let result3_block = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple3_block3 = result3_block.expect_tuple().unwrap().data_map; + let tuple3_block3 = get_block_info(contract3_name, last_stacks_block_height - 1); // There should have been a block change, so these should be different. assert_ne!(tuple3_block3, tuple3_block2); assert_eq!( - tuple3_block3.get("id-header-hash"), + tuple3_block1.get("id-header-hash"), tuple0.get("id-header-hash") ); - assert_eq!(tuple3_block3.get("header-hash"), tuple0.get("header-hash")); + assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); assert!(tuple3_block3 .get("time") .unwrap() From 1789dccc1b3aaaf955cae9a1b1460b504514b110 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 16 Oct 2024 13:52:41 -0700 Subject: [PATCH 1354/1400] Add multiple_miners_with_custom_chain_id test to test chain id in the signer in an integration test Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/config.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 282 +++++++++++++++++++++ 3 files changed, 284 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 4115118eaf..be3d562d46 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -118,6 +118,7 @@ jobs: - tests::signer::v0::mine_2_nakamoto_reward_cycles - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle + - tests::signer::v0::multiple_miners_with_custom_chain_id - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 375ed1a171..3392906682 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -156,7 +156,7 @@ pub struct GlobalConfig { /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, /// An optional custom Chain ID - chain_id: Option, + pub chain_id: Option, } /// Internal struct for loading up the config file diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 89192d274d..448bd4c644 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5170,3 +5170,285 @@ fn signing_in_0th_tenure_of_reward_cycle() { } assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); } + +/// This test involves two miners with a custom chain id, each mining tenures with 6 blocks each. +/// Half of the signers are attached to each miner, so the test also verifies that +/// the signers' messages successfully make their way to the active miner. +#[test] +#[ignore] +fn multiple_miners_with_custom_chain_id() { + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = 51024; + let node_1_p2p = 51023; + let node_2_rpc = 51026; + let node_2_p2p = 51025; + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + let chain_id = 0x87654321; + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr.clone(), + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.chain_id = Some(chain_id) + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.chain_id = chain_id; + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + None, + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 1; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + let mut sender_nonce = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); + btc_blocks_mined += 1; + + // wait for the new block to be processed + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + info!( + "Nakamoto blocks mined: {}", + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) + ); + + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + info!( + "Mined interim block {}:{}", + btc_blocks_mined, interim_block_ix + ); + } + + let blocks = get_nakamoto_headers(&conf); + let mut seen_burn_hashes = HashSet::new(); + miner_1_tenures = 0; + miner_2_tenures = 0; + for header in blocks.iter() { + if seen_burn_hashes.contains(&header.burn_header_hash) { + continue; + } + seen_burn_hashes.insert(header.burn_header_hash.clone()); + + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + if miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_1_tenures += 1; + } + if miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_2_tenures += 1; + } + } + info!( + "Miner 1 tenures: {}, Miner 2 tenures: {}", + miner_1_tenures, miner_2_tenures + ); + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) + ); + assert_eq!( + btc_blocks_mined, + u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + ); + + // Verify both nodes have the correct chain id + let miner1_info = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(miner1_info.network_id, chain_id); + + let miner2_info = get_chain_info(&conf_node_2); + assert_eq!(miner2_info.network_id, chain_id); + + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} From d0418f1476c4e7d7c52ff6fab12aebf75ba45814 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 16:14:25 -0500 Subject: [PATCH 1355/1400] test: update the check_block_info_rewards test --- .../src/tests/nakamoto_integrations.rs | 147 ++++++++---------- 1 file changed, 62 insertions(+), 85 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c81946cf66..2a7a6fb497 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7932,7 +7932,6 @@ fn check_block_info() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; - let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let (chainstate, _) = StacksChainState::open( naka_conf.is_mainnet(), naka_conf.burnchain.chain_id, @@ -7941,6 +7940,7 @@ fn check_block_info() { ) .unwrap(); + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let last_tenure_height: u128 = NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) .unwrap() @@ -8070,19 +8070,13 @@ fn check_block_info() { // There should have been a tenure change, so these should be different. assert_ne!(tuple3_tenure0, tuple3_tenure1); assert_eq!( - tuple3_tenure1.get("burnchain-header-hash"), - tuple0.get("burnchain-header-hash") - ); - assert_eq!( - tuple3_tenure1.get("miner-address"), - tuple0.get("miner-address") - ); - assert_eq!(tuple3_tenure1.get("time"), tuple0.get("time")); - assert_eq!(tuple3_tenure1.get("vrf-seed"), tuple0.get("vrf-seed")); - assert_eq!( - tuple3_tenure1.get("block-reward"), - tuple0.get("block-reward") + tuple3_tenure1["burnchain-header-hash"], + tuple0["burnchain-header-hash"] ); + assert_eq!(tuple3_tenure1["miner-address"], tuple0["miner-address"]); + assert_eq!(tuple3_tenure1["time"], tuple0["time"]); + assert_eq!(tuple3_tenure1["vrf-seed"], tuple0["vrf-seed"]); + assert_eq!(tuple3_tenure1["block-reward"], tuple0["block-reward"]); assert_eq!( tuple3_tenure1.get("miner-spend-total"), tuple0.get("miner-spend-total") @@ -8106,14 +8100,9 @@ fn check_block_info() { // tuple 0 fetches the id-header-hash for the first block of the tenure (block1) let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); - assert_eq!( - tuple3_block1.get("id-header-hash"), - tuple0.get("id-header-hash") - ); - assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); - assert!(tuple3_block2 - .get("time") - .unwrap() + assert_eq!(tuple3_block1["id-header-hash"], tuple0["id-header-hash"]); + assert_eq!(tuple3_block1["header-hash"], tuple0["header-hash"]); + assert!(tuple3_block2["time"] .clone() .expect_optional() .unwrap() @@ -8180,14 +8169,9 @@ fn check_block_info() { let tuple3_block3 = get_block_info(contract3_name, last_stacks_block_height - 1); // There should have been a block change, so these should be different. assert_ne!(tuple3_block3, tuple3_block2); - assert_eq!( - tuple3_block1.get("id-header-hash"), - tuple0.get("id-header-hash") - ); - assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); - assert!(tuple3_block3 - .get("time") - .unwrap() + assert_eq!(tuple3_block1["id-header-hash"], tuple0["id-header-hash"]); + assert_eq!(tuple3_block1["header-hash"], tuple0["header-hash"]); + assert!(tuple3_block3["time"] .clone() .expect_optional() .unwrap() @@ -8329,7 +8313,7 @@ fn check_block_info_rewards() { // Deploy this version with the Clarity 1 / 2 before epoch 3 let contract0_name = "test-contract-0"; - let contract_clarity1 = "(define-read-only (get-info (height uint)) + let contract_clarity1 = "(define-read-only (get-block-info (height uint)) { burnchain-header-hash: (get-block-info? burnchain-header-hash height), id-header-hash: (get-block-info? id-header-hash height), @@ -8354,6 +8338,17 @@ fn check_block_info_rewards() { sender_nonce += 1; submit_tx(&http_origin, &contract_tx0); + let get_block_info = |contract_name: &str, query_height: u128| { + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(query_height)], + ); + result.expect_tuple().unwrap().data_map + }; + boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -8368,14 +8363,7 @@ fn check_block_info_rewards() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(1)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let tuple0 = get_block_info(contract0_name, 1); info!("Info from pre-epoch 3.0: {:?}", tuple0); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8489,8 +8477,22 @@ fn check_block_info_rewards() { let info = get_chain_info_result(&naka_conf).unwrap(); info!("Chain info: {:?}", info); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let last_stacks_block_height = info.stacks_tip_height as u128; let last_nakamoto_block = last_stacks_block_height; + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let last_nakamoto_block_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); // Mine more than 2 burn blocks to get the last block's reward matured // (only 2 blocks maturation time in tests) @@ -8511,36 +8513,32 @@ fn check_block_info_rewards() { let last_stacks_block_height = info.stacks_tip_height as u128; let blocks = test_observer::get_blocks(); + let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let last_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + .unwrap() + .unwrap() + .into(); + // Check the block reward is now matured in one of the tenure-change blocks let mature_height = last_stacks_block_height - 4; let expected_reward = get_expected_reward_for_height(&blocks, mature_height); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(mature_height)], + let tuple0 = get_block_info(contract0_name, last_tenure_height - 4); + info!( + "block rewards"; + "fetched" => %tuple0["block-reward"], + "expected" => expected_reward, ); - let tuple0 = result0.expect_tuple().unwrap().data_map; assert_eq!( - tuple0 - .get("block-reward") - .unwrap() + tuple0["block-reward"] .clone() .expect_optional() .unwrap() .unwrap(), - Value::UInt(expected_reward as u128) + Value::UInt(expected_reward) ); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(mature_height)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_tenure_height - 4); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -8551,41 +8549,23 @@ fn check_block_info_rewards() { vec![&clarity::vm::Value::UInt(mature_height)], ); let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; - assert_eq!( - tuple3_tenure.get("block-reward"), - tuple0.get("block-reward") - ); + assert_eq!(tuple3_tenure["block-reward"], tuple0["block-reward"]); // Check the block reward is now matured in one of the Nakamoto blocks let expected_reward = get_expected_reward_for_height(&blocks, last_nakamoto_block); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_nakamoto_block)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; + let tuple0 = get_block_info(contract0_name, last_nakamoto_block_tenure_height); + assert_eq!( - tuple0 - .get("block-reward") - .unwrap() + tuple0["block-reward"] .clone() .expect_optional() .unwrap() .unwrap(), - Value::UInt(expected_reward as u128) + Value::UInt(expected_reward) ); - let result1 = call_read_only( - &naka_conf, - &sender_addr, - contract1_name, - "get-info", - vec![&clarity::vm::Value::UInt(last_nakamoto_block)], - ); - let tuple1 = result1.expect_tuple().unwrap().data_map; + let tuple1 = get_block_info(contract1_name, last_nakamoto_block_tenure_height); assert_eq!(tuple0, tuple1); let result3_tenure = call_read_only( @@ -8596,10 +8576,7 @@ fn check_block_info_rewards() { vec![&clarity::vm::Value::UInt(last_nakamoto_block)], ); let tuple3_tenure = result3_tenure.expect_tuple().unwrap().data_map; - assert_eq!( - tuple3_tenure.get("block-reward"), - tuple0.get("block-reward") - ); + assert_eq!(tuple3_tenure["block-reward"], tuple0["block-reward"]); coord_channel .lock() From 5202e880b84b233cc1a7a63cab5d912269811061 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 16:30:12 -0500 Subject: [PATCH 1356/1400] fix: check current_height vs supplied tenure_height --- clarity/src/vm/database/clarity_db.rs | 4 +++- clarity/src/vm/tests/contracts.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 3a37edf75c..2ee31a8b41 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -936,13 +936,15 @@ impl<'a> ClarityDatabase<'a> { return Ok(None); } let current_height = self.get_current_block_height(); + if current_height <= tenure_height { + return Ok(None); + } // check if we're querying a 2.x block let id_bhh = self.get_index_block_header_hash(tenure_height)?; let epoch = self.get_stacks_epoch_for_block(&id_bhh)?; if !epoch.uses_nakamoto_blocks() { return Ok(Some(tenure_height)); } - // query from the parent let query_tip = self.get_index_block_header_hash(current_height.saturating_sub(1))?; Ok(self diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 3c4dc14b2e..9cb5aea4b1 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -139,7 +139,7 @@ fn test_get_block_info_eval( .unwrap(); let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); - + eprintln!("{}", contracts[i]); let eval_result = env.eval_read_only(&contract_identifier, "(test-func)"); match expected[i] { // any (some UINT) is okay for checking get-block-info? time From b9e1bb99635f3df32ef3eb6847d4a53250cb85dd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 16 Oct 2024 18:37:56 -0400 Subject: [PATCH 1357/1400] test: resolve issues with `check_block_times` --- .../src/tests/nakamoto_integrations.rs | 50 ++++++++++--------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 2a7a6fb497..0aab59c4b9 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7294,7 +7294,7 @@ fn get_block_times( &sender_addr, contract3_name, "get-tenure-time", - vec![&clarity::vm::Value::UInt(tenure_height)], + vec![&clarity::vm::Value::UInt(block_height)], ); let time3_tenure = time3_tenure_value .expect_optional() @@ -7349,15 +7349,7 @@ fn get_block_times( time0_now, time1_now, "Time from pre- and post-epoch 3.0 contracts should match" ); - assert_eq!( - time0, time3_tenure, - "Tenure time should match Clarity 2 block time" - ); assert_eq!(time0_now, time1_now, "Time should match across contracts"); - assert_eq!( - time0_now, time3_now_tenure, - "Clarity 3 tenure time should match Clarity 2 block time" - ); ( time0, @@ -7394,7 +7386,7 @@ fn check_block_times() { let deploy_fee = 3000; naka_conf.add_initial_balance( PrincipalData::from(sender_addr.clone()).to_string(), - 3 * deploy_fee + (send_amt + send_fee) * 2, + 3 * deploy_fee + (send_amt + send_fee) * 12, ); naka_conf.add_initial_balance( PrincipalData::from(sender_signer_addr.clone()).to_string(), @@ -7471,7 +7463,7 @@ fn check_block_times() { next_block_and(&mut btc_regtest_controller, 60, || { let info = get_chain_info_result(&naka_conf).unwrap(); last_stacks_block_height = info.stacks_tip_height as u128; - last_tenure_height = last_stacks_block_height; + last_tenure_height = last_stacks_block_height + 1; Ok(info.burn_block_height == epoch_3_start) }) .unwrap(); @@ -7510,7 +7502,7 @@ fn check_block_times() { let contract_clarity3 = r#" (define-read-only (get-block-time (height uint)) (get-stacks-block-info? time height)) (define-read-only (get-tenure-time (height uint)) (get-tenure-info? time height)) - (define-read-only (get-last-tenure-time) (get-tenure-info? time (- tenure-height u1))) + (define-read-only (get-last-tenure-time) (get-tenure-info? time (- stacks-block-height u1))) "#; let contract_tx3 = make_contract_publish( @@ -7546,7 +7538,7 @@ fn check_block_times() { last_tenure_height += 1; info!("New tenure {last_tenure_height}, Stacks height: {last_stacks_block_height}"); - let (time0, _time0_now, _time1, _time1_now, _time3_tenure, time3_block, _time3_now_tenure) = + let (time0, time0_now, _time1, _time1_now, time3_tenure, time3_block, time3_now_tenure) = get_block_times( &naka_conf, &sender_addr, @@ -7554,6 +7546,15 @@ fn check_block_times() { last_tenure_height - 1, ); + assert_eq!( + time0, time3_tenure, + "Tenure time should match Clarity 2 block time" + ); + assert_eq!( + time0_now, time3_now_tenure, + "Clarity 3 tenure time should match Clarity 2 block time in the first block of a tenure" + ); + // Mine a Nakamoto block info!("Mining Nakamoto block"); @@ -7588,7 +7589,7 @@ fn check_block_times() { _time1a_now, _time3a_tenure, time3a_block, - _time3a_now_tenure, + time3a_now_tenure, ) = get_block_times( &naka_conf, &sender_addr, @@ -7596,9 +7597,9 @@ fn check_block_times() { last_tenure_height - 1, ); - assert!( - time0a - time0 >= 1, - "get-block-info? time should have changed. time_0 = {time0}. time_0_a = {time0a}" + assert_eq!( + time0a, time0, + "get-block-info? time should not have changed" ); assert!( time3a_block - time3_block >= 1, @@ -7618,6 +7619,7 @@ fn check_block_times() { send_amt, ); submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; // wait for the block to be mined wait_for(30, || { @@ -7632,11 +7634,11 @@ fn check_block_times() { let ( time0b, _time0b_now, - time1b, + _time1b, _time1b_now, _time3b_tenure, time3b_block, - _time3b_now_tenure, + time3b_now_tenure, ) = get_block_times( &naka_conf, &sender_addr, @@ -7645,17 +7647,17 @@ fn check_block_times() { ); assert_eq!( - time0a, time0b, + time0b, time0a, "get-block-info? time should not have changed" ); - assert_eq!( - time0b, time1b, - "Time from pre- and post-epoch 3.0 contracts should match" - ); assert!( time3b_block - time3a_block >= 1, "get-stacks-block-info? time should have changed" ); + assert_eq!( + time3b_now_tenure, time3a_now_tenure, + "get-tenure-info? time should not have changed" + ); } coord_channel From ec5a03a37ca0b998fe4773db7686a10691a636ef Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 16 Oct 2024 20:53:16 -0500 Subject: [PATCH 1358/1400] test: stricter (and more clear?) assertions in check_block_info --- .../src/tests/nakamoto_integrations.rs | 543 +++++++++++------- 1 file changed, 321 insertions(+), 222 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0aab59c4b9..99a65ed4cc 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7750,6 +7750,17 @@ fn assert_block_info( ); } +fn parse_block_id(optional_buff32: &Value) -> StacksBlockId { + let bytes = optional_buff32 + .clone() + .expect_optional() + .unwrap() + .unwrap() + .expect_buff(32) + .unwrap(); + StacksBlockId::from_vec(&bytes).unwrap() +} + #[test] #[ignore] /// Verify all properties in `get-block-info?`, `get-stacks-block-info?`, and `get-tenure-info?`. @@ -7782,6 +7793,7 @@ fn check_block_info() { ); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); + let contract3_name = "test-contract-3"; test_observer::spawn(); test_observer::register_any(&mut naka_conf); @@ -7812,6 +7824,36 @@ fn check_block_info() { let mut sender_nonce = 0; + let get_block_info = |contract_name: &str, query_height: u128| { + let result = call_read_only( + &naka_conf, + &sender_addr, + contract_name, + "get-block-info", + vec![&clarity::vm::Value::UInt(query_height)], + ); + result.expect_tuple().unwrap().data_map + }; + + let get_tenure_info = |query_height: u128| { + let result = call_read_only( + &naka_conf, + &sender_addr, + contract3_name, + "get-tenure-info", + vec![&clarity::vm::Value::UInt(query_height)], + ); + result.expect_tuple().unwrap().data_map + }; + + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let miner = clarity::vm::Value::Principal( PrincipalData::parse_standard_principal("ST25WA53N4PWF8XZGQH2J5A4CGCWV4JADPM8MHTRV") .unwrap() @@ -7834,6 +7876,25 @@ fn check_block_info() { miner-spend-winner: (get-block-info? miner-spend-winner height), } )"; + // This version uses the Clarity 3 functions + let contract_clarity3 = "(define-read-only (get-block-info (height uint)) + { + id-header-hash: (get-stacks-block-info? id-header-hash height), + header-hash: (get-stacks-block-info? header-hash height), + time: (get-stacks-block-info? time height), + } + ) + (define-read-only (get-tenure-info (height uint)) + { + burnchain-header-hash: (get-tenure-info? burnchain-header-hash height), + miner-address: (get-tenure-info? miner-address height), + time: (get-tenure-info? time height), + vrf-seed: (get-tenure-info? vrf-seed height), + block-reward: (get-tenure-info? block-reward height), + miner-spend-total: (get-tenure-info? miner-spend-total height), + miner-spend-winner: (get-tenure-info? miner-spend-winner height), + } + )"; let contract_tx0 = make_contract_publish( &sender_sk, @@ -7855,20 +7916,13 @@ fn check_block_info() { &mut btc_regtest_controller, ); - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + let info = get_chain_info(&naka_conf); + let last_pre_nakamoto_block_height = info.stacks_tip_height.into(); - info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); - let result0 = call_read_only( - &naka_conf, - &sender_addr, - contract0_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(1)], - ); - let tuple0 = result0.expect_tuple().unwrap().data_map; - info!("Info from pre-epoch 3.0: {:?}", tuple0); + let c0_block_ht_1_pre_3 = get_block_info(contract0_name, 1); + info!("Info from pre-epoch 3.0: {:?}", c0_block_ht_1_pre_3); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -7886,27 +7940,6 @@ fn check_block_info() { sender_nonce += 1; submit_tx(&http_origin, &contract_tx1); - // This version uses the Clarity 3 functions - let contract3_name = "test-contract-3"; - let contract_clarity3 = "(define-read-only (get-block-info (height uint)) - { - id-header-hash: (get-stacks-block-info? id-header-hash height), - header-hash: (get-stacks-block-info? header-hash height), - time: (get-stacks-block-info? time height), - } - ) - (define-read-only (get-tenure-info (height uint)) - { - burnchain-header-hash: (get-tenure-info? burnchain-header-hash height), - miner-address: (get-tenure-info? miner-address height), - time: (get-tenure-info? time height), - vrf-seed: (get-tenure-info? vrf-seed height), - block-reward: (get-tenure-info? block-reward height), - miner-spend-total: (get-tenure-info? miner-spend-total height), - miner-spend-winner: (get-tenure-info? miner-spend-winner height), - } - )"; - let contract_tx3 = make_contract_publish( &sender_sk, sender_nonce, @@ -7919,8 +7952,6 @@ fn check_block_info() { submit_tx(&http_origin, &contract_tx3); // sleep to ensure seconds have changed - thread::sleep(Duration::from_secs(3)); - next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -7931,98 +7962,148 @@ fn check_block_info() { }) .expect("Timed out waiting for contracts to publish"); - let info = get_chain_info_result(&naka_conf).unwrap(); + // the first test we want to do is around the behavior of + // looking up 2.x blocks. + + // look up block height 1 with all 3 contracts after nakamoto activates + let c0_block_ht_1_post_3 = get_block_info(contract0_name, 1); + let c1_block_ht_1_post_3 = get_block_info(contract1_name, 1); + let c3_block_ht_1_post_3 = get_block_info(contract3_name, 1); + assert_eq!(c0_block_ht_1_post_3, c0_block_ht_1_pre_3); + assert_eq!(c0_block_ht_1_post_3, c1_block_ht_1_post_3); + for (key, value) in c3_block_ht_1_post_3.iter() { + assert_eq!(&c0_block_ht_1_post_3[key], value); + } + + // look up last 2.x height with all 3 contracts + let c0_last_2x_block = get_block_info(contract0_name, last_pre_nakamoto_block_height); + let c1_last_2x_block = get_block_info(contract1_name, last_pre_nakamoto_block_height); + let c3_last_2x_block = get_block_info(contract3_name, last_pre_nakamoto_block_height); + assert_eq!(c0_last_2x_block, c1_last_2x_block); + for (key, value) in c3_last_2x_block.iter() { + assert_eq!(&c0_last_2x_block[key], value); + } + + // now we want to test the behavior of the first block in a tenure + // so, we'll issue a bitcoin block, and not submit any transactions + // (which will keep the miner from issuing any blocks after the first + // one in the tenure) + + let info = get_chain_info(&naka_conf); info!("Chain info: {:?}", info); let last_stacks_block_height = info.stacks_tip_height as u128; - let (chainstate, _) = StacksChainState::open( - naka_conf.is_mainnet(), - naka_conf.burnchain.chain_id, - &naka_conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let last_tenure_height: u128 = NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) .unwrap() .unwrap() .into(); + let last_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &last_stacks_tip, + &info.stacks_tip_consensus_hash, + ) + .unwrap() + .unwrap(); + let last_tenure_start_block_id = last_tenure_start_block_header.index_block_hash(); + let last_tenure_start_block_ht = last_tenure_start_block_header.stacks_block_height.into(); - let get_block_info = |contract_name: &str, query_height: u128| { - let result = call_read_only( - &naka_conf, - &sender_addr, - contract_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(query_height)], - ); - result.expect_tuple().unwrap().data_map - }; - - let tuple0 = get_block_info(contract0_name, last_tenure_height - 1); - assert_block_info(&tuple0, &miner, &miner_spend); - - let tuple1 = get_block_info(contract1_name, last_tenure_height - 1); - assert_eq!(tuple0, tuple1); - - let result3_tenure = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 2)], - ); - let tuple3_tenure0 = result3_tenure.expect_tuple().unwrap().data_map; - assert_eq!( - tuple3_tenure0.get("burnchain-header-hash"), - tuple0.get("burnchain-header-hash") - ); - assert_eq!( - tuple3_tenure0.get("miner-address"), - tuple0.get("miner-address") - ); - assert_eq!(tuple3_tenure0.get("time"), tuple0.get("time")); - assert_eq!(tuple3_tenure0.get("vrf-seed"), tuple0.get("vrf-seed")); - assert_eq!( - tuple3_tenure0.get("block-reward"), - tuple0.get("block-reward") - ); - assert_eq!( - tuple3_tenure0.get("miner-spend-total"), - tuple0.get("miner-spend-total") - ); - assert_eq!( - tuple3_tenure0.get("miner-spend-winner"), - tuple0.get("miner-spend-winner") - ); + // lets issue the next bitcoin block + next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) + .unwrap(); - // this will point to the last block in the prior tenure (which should have been a 2.x block) - let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); - assert_eq!( - tuple3_block1.get("id-header-hash"), - tuple0.get("id-header-hash") - ); - assert_eq!(tuple3_block1.get("header-hash"), tuple0.get("header-hash")); - assert!(tuple3_block1 - .get("time") + let info = get_chain_info(&naka_conf); + info!("Chain info: {:?}", info); + let cur_stacks_block_height = info.stacks_tip_height as u128; + let cur_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let cur_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &cur_stacks_tip) + .unwrap() + .unwrap() + .into(); + let cur_tenure_start_block_id = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &cur_stacks_tip, + &info.stacks_tip_consensus_hash, + ) + .unwrap() + .unwrap() + .index_block_hash(); + + assert_eq!(cur_tenure_start_block_id, cur_stacks_tip); + assert_eq!(cur_stacks_block_height, last_stacks_block_height + 1); + assert_eq!(cur_tenure_height, last_tenure_height + 1); + + // first checks: get-block-info with the current tenure height should return None + let c0_cur_tenure = get_block_info(contract0_name, cur_tenure_height); + let c1_cur_tenure = get_block_info(contract1_name, cur_tenure_height); + // contract 3 uses the current stacks block height rather than current tenure. + let c3_cur_tenure = get_block_info(contract3_name, cur_stacks_block_height); + let c3_cur_tenure_ti = get_tenure_info(cur_stacks_block_height); + assert!(c0_cur_tenure["id-header-hash"] + .clone() + .expect_optional() .unwrap() + .is_none()); + assert!(c1_cur_tenure["id-header-hash"] .clone() .expect_optional() .unwrap() - .is_some()); - - // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(1)); - - // Mine a Nakamoto block - info!("Mining Nakamoto block"); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + .is_none()); + assert!(c3_cur_tenure["id-header-hash"] + .clone() + .expect_optional() + .unwrap() + .is_none()); + assert!(c3_cur_tenure_ti["burnchain-header-hash"] + .clone() + .expect_optional() + .unwrap() + .is_none()); + + // second checks: get-block-info with prior tenure height should return Some + let c0_last_tenure = get_block_info(contract0_name, last_tenure_height); + let c1_last_tenure = get_block_info(contract1_name, last_tenure_height); + // contract 3 uses the current stacks block height rather than current tenure. + let c3_last_tenure_bi = get_block_info(contract3_name, last_stacks_block_height); + let c3_last_tenure_ti = get_tenure_info(last_stacks_block_height); + let c3_last_tenure_start_bi = get_block_info(contract3_name, last_tenure_start_block_ht); + + // assert that c0 and c1 returned some data + assert_block_info(&c0_last_tenure, &miner, &miner_spend); + assert_block_info(&c1_last_tenure, &miner, &miner_spend); + assert_eq!(c0_last_tenure, c1_last_tenure); + + let c3_fetched_id_hash = parse_block_id(&c3_last_tenure_bi["id-header-hash"]); + assert_eq!(c3_fetched_id_hash, last_stacks_tip); + + // c0 and c1 should have different block info data than c3 + assert_ne!( + c0_last_tenure["header-hash"], + c3_last_tenure_bi["header-hash"] + ); + assert_ne!( + c0_last_tenure["id-header-hash"], + c3_last_tenure_bi["id-header-hash"] + ); + assert_ne!(c0_last_tenure["time"], c3_last_tenure_bi["time"]); + // c0 and c1 should have the same burn data as the *tenure info* lookup in c3 + for (key, value) in c3_last_tenure_ti.iter() { + assert_eq!(&c0_last_tenure[key], value); + } + // c0 and c1 should have the same header hash data as the *block info* lookup in c3 using last tenure start block ht + for key in ["header-hash", "id-header-hash"] { + assert_eq!(&c0_last_tenure[key], &c3_last_tenure_start_bi[key]); + } + // c0 should have the same index hash as last_tenure start block id + assert_eq!( + parse_block_id(&c0_last_tenure["id-header-hash"]), + last_tenure_start_block_id + ); - // submit a tx so that the miner will mine an extra block + // Now we want to test the behavior of a new nakamoto block within the same tenure + // We'll force a nakamoto block by submitting a transfer, then waiting for the nonce to bump + info!("Mining an interim nakamoto block"); let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -8034,93 +8115,94 @@ fn check_block_info() { sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } + wait_for(30, || { + thread::sleep(Duration::from_secs(1)); + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Failed to process the submitted transfer tx in a new nakamoto block"); - let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); - let last_stacks_block_height = info.stacks_tip_height as u128; - let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); - let last_tenure_height: u128 = - NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) + let info = get_chain_info(&naka_conf); + let interim_stacks_block_height = info.stacks_tip_height as u128; + let interim_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); + let interim_tenure_height: u128 = + NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &interim_stacks_tip) .unwrap() .unwrap() .into(); - - let tuple0 = get_block_info(contract0_name, last_tenure_height); - assert_block_info(&tuple0, &miner, &miner_spend); - - let tuple1 = get_block_info(contract1_name, last_tenure_height); - assert_eq!(tuple0, tuple1); - - let result3_tenure = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], - ); - let tuple3_tenure1 = result3_tenure.expect_tuple().unwrap().data_map; - // There should have been a tenure change, so these should be different. - assert_ne!(tuple3_tenure0, tuple3_tenure1); + let interim_tenure_start_block_id = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &interim_stacks_tip, + &info.stacks_tip_consensus_hash, + ) + .unwrap() + .unwrap() + .index_block_hash(); + assert_eq!(interim_tenure_height, cur_tenure_height); + assert_eq!(interim_tenure_start_block_id, cur_tenure_start_block_id); + assert_eq!(interim_stacks_block_height, cur_stacks_block_height + 1); + + // querying the same block heights that returned data before should yield the identical result assert_eq!( - tuple3_tenure1["burnchain-header-hash"], - tuple0["burnchain-header-hash"] + c0_last_tenure, + get_block_info(contract0_name, last_tenure_height) ); - assert_eq!(tuple3_tenure1["miner-address"], tuple0["miner-address"]); - assert_eq!(tuple3_tenure1["time"], tuple0["time"]); - assert_eq!(tuple3_tenure1["vrf-seed"], tuple0["vrf-seed"]); - assert_eq!(tuple3_tenure1["block-reward"], tuple0["block-reward"]); assert_eq!( - tuple3_tenure1.get("miner-spend-total"), - tuple0.get("miner-spend-total") + c1_last_tenure, + get_block_info(contract1_name, last_tenure_height) ); assert_eq!( - tuple3_tenure1.get("miner-spend-winner"), - tuple0.get("miner-spend-winner") + c3_last_tenure_bi, + get_block_info(contract3_name, last_stacks_block_height) ); - - let result3_block = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-block-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + assert_eq!(c3_last_tenure_ti, get_tenure_info(last_stacks_block_height)); + assert_eq!( + c3_last_tenure_start_bi, + get_block_info(contract3_name, last_tenure_start_block_ht) + ); + + // querying for the current tenure should work now though + let c0_cur_tenure = get_block_info(contract0_name, cur_tenure_height); + let c1_cur_tenure = get_block_info(contract1_name, cur_tenure_height); + // contract 3 uses the current stacks block height rather than current tenure. + let c3_cur_tenure = get_block_info(contract3_name, cur_stacks_block_height); + let c3_cur_tenure_ti = get_tenure_info(cur_stacks_block_height); + assert_block_info(&c0_cur_tenure, &miner, &miner_spend); + assert_block_info(&c1_cur_tenure, &miner, &miner_spend); + assert_eq!(c0_cur_tenure, c1_cur_tenure); + + // c0 and c1 should have the same header hash data as the *block info* lookup in c3 using cur_stacks_block + // (because cur_stacks_tip == cur_tenure_start_block_id, as was asserted before) + for key in ["header-hash", "id-header-hash"] { + assert_eq!(&c0_cur_tenure[key], &c3_cur_tenure[key]); + } + // c0 should have the same index hash as cur_tenure start block id + assert_eq!( + parse_block_id(&c0_cur_tenure["id-header-hash"]), + cur_tenure_start_block_id, + "c0 should have the same index hash as cur_tenure_start_block_id" ); - let tuple3_block2 = result3_block.expect_tuple().unwrap().data_map; - // There should have been a block change, so these should be different. - assert_ne!(tuple3_block1, tuple3_block2); - - // tuple 0 fetches the id-header-hash for the first block of the tenure (block1) + // c0 and c1 should have the same burn data as the *tenure info* lookup in c3 + for (key, value) in c3_cur_tenure_ti.iter() { + assert_eq!(&c0_cur_tenure[key], value); + } - let tuple3_block1 = get_block_info(contract3_name, last_stacks_block_height - 2); - assert_eq!(tuple3_block1["id-header-hash"], tuple0["id-header-hash"]); - assert_eq!(tuple3_block1["header-hash"], tuple0["header-hash"]); - assert!(tuple3_block2["time"] + let c3_interim_bi = get_block_info(contract3_name, interim_stacks_block_height); + let c3_interim_ti = get_tenure_info(interim_stacks_block_height); + assert!(c3_interim_bi["id-header-hash"] .clone() .expect_optional() .unwrap() - .is_some()); - - // Sleep to ensure the seconds have changed - thread::sleep(Duration::from_secs(1)); - - // Mine a Nakamoto block - info!("Mining Nakamoto block"); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); + .is_none()); + assert!(c3_interim_ti["burnchain-header-hash"] + .clone() + .expect_optional() + .unwrap() + .is_none()); - // submit a tx so that the miner will mine an extra block + // Now we'll mine one more interim block so that we can test that the stacks-block-info outputs update + // again. + info!("Mining a second interim nakamoto block"); let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -8129,55 +8211,72 @@ fn check_block_info() { &recipient, send_amt, ); + sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } - - let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); - let last_stacks_block_height = info.stacks_tip_height as u128; - let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); - let last_tenure_height: u128 = - NakamotoChainState::get_coinbase_height(&mut chainstate.index_conn(), &last_stacks_tip) - .unwrap() - .unwrap() - .into(); - - let tuple0 = get_block_info(contract0_name, last_tenure_height); - assert_block_info(&tuple0, &miner, &miner_spend); + wait_for(30, || { + thread::sleep(Duration::from_secs(1)); + let cur_sender_nonce = get_account(&http_origin, &to_addr(&sender_sk)).nonce; + Ok(cur_sender_nonce >= sender_nonce) + }) + .expect("Failed to process the submitted transfer tx in a new nakamoto block"); - let tuple1 = get_block_info(contract1_name, last_tenure_height); - assert_eq!(tuple0, tuple1); + let info = get_chain_info(&naka_conf); + assert_eq!( + info.stacks_tip_height as u128, + interim_stacks_block_height + 1 + ); - let result3_tenure = call_read_only( - &naka_conf, - &sender_addr, - contract3_name, - "get-tenure-info", - vec![&clarity::vm::Value::UInt(last_stacks_block_height - 1)], + // querying for the current tenure should work the same as before + assert_eq!( + c0_cur_tenure, + get_block_info(contract0_name, cur_tenure_height) ); - let tuple3_tenure1a = result3_tenure.expect_tuple().unwrap().data_map; - assert_eq!(tuple3_tenure1, tuple3_tenure1a); + assert_eq!( + c1_cur_tenure, + get_block_info(contract1_name, cur_tenure_height) + ); + // contract 3 uses the current stacks block height rather than current tenure. + assert_eq!( + c3_cur_tenure, + get_block_info(contract3_name, cur_stacks_block_height) + ); + assert_eq!(c3_cur_tenure_ti, get_tenure_info(cur_stacks_block_height)); + + // querying using the first interim's block height should now work in contract 3 + let c3_interim_bi = get_block_info(contract3_name, interim_stacks_block_height); + let c3_interim_ti = get_tenure_info(interim_stacks_block_height); - let tuple3_block3 = get_block_info(contract3_name, last_stacks_block_height - 1); - // There should have been a block change, so these should be different. - assert_ne!(tuple3_block3, tuple3_block2); - assert_eq!(tuple3_block1["id-header-hash"], tuple0["id-header-hash"]); - assert_eq!(tuple3_block1["header-hash"], tuple0["header-hash"]); - assert!(tuple3_block3["time"] + // it will *not* work in contracts 1 and 2 + let c0_interim = get_block_info(contract0_name, interim_stacks_block_height); + let c1_interim = get_block_info(contract1_name, interim_stacks_block_height); + assert!(c0_interim["id-header-hash"] .clone() .expect_optional() .unwrap() - .is_some()); + .is_none()); + assert!(c1_interim["id-header-hash"] + .clone() + .expect_optional() + .unwrap() + .is_none()); + + assert_eq!(c3_interim_ti, c3_cur_tenure_ti, "Tenure info should be the same whether queried using the starting block or the interim block height"); + + // c0 and c1 should have different block info data than the interim block + assert_ne!(c0_cur_tenure["header-hash"], c3_interim_bi["header-hash"]); + assert_ne!( + c0_cur_tenure["id-header-hash"], + c3_interim_bi["id-header-hash"] + ); + assert_ne!(c0_cur_tenure["time"], c3_interim_bi["time"]); + + // c3 should have gotten the interim's tip + assert_eq!( + parse_block_id(&c3_interim_bi["id-header-hash"]), + interim_stacks_tip, + "Contract 3 should be able to fetch the StacksBlockId of the tip" + ); coord_channel .lock() From 55d502d683e5a5b64dfbd3171360d7b05bb1a55c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 10:29:08 -0400 Subject: [PATCH 1359/1400] feat: add reward cycle check for 3.0 activation 3.0 can only activate in reward cycle 2 or later. --- testnet/stacks-node/src/config.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f4750c6242..02fb249306 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -550,6 +550,16 @@ impl Config { &burnchain.pox_constants ); } + let activation_reward_cycle = burnchain + .block_height_to_reward_cycle(epoch_30.start_height) + .expect("FATAL: Epoch 3.0 starts before the first burnchain block"); + if activation_reward_cycle < 2 { + panic!( + "FATAL: Epoch 3.0 must start at or after the second reward cycle. Epoch 3.0 start set to: {}. PoX Parameters: {:?}", + epoch_30.start_height, + &burnchain.pox_constants + ); + } } /// Connect to the MempoolDB using the configured cost estimation From 7144bb71a7fe5ee6e4cd2f9ee19a1ca4391e7459 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 17 Oct 2024 07:45:51 -0700 Subject: [PATCH 1360/1400] Fix comment Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3b619a46bd..009797a376 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7117,7 +7117,7 @@ fn continue_tenure_extend() { }) .unwrap(); - // Mine 3 nakamoto tenures + // Mine 3 nakamoto blocks for i in 0..3 { info!("Triggering Nakamoto blocks after extend ({})", i + 1); transfer_nonce += 1; From 097c4db2e376f56794b0585347cb692585758bbe Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Thu, 17 Oct 2024 16:44:44 +0100 Subject: [PATCH 1361/1400] feat: cache `coinbase_height` --- stackslib/src/net/api/getinfo.rs | 16 ++------------- stackslib/src/net/api/postmempoolquery.rs | 12 ++--------- stackslib/src/net/p2p.rs | 25 +++++++++++++++++++++-- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 846e5a18d2..52f07e937e 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -225,20 +225,8 @@ impl RPCRequestHandler for RPCPeerInfoRequestHandler { let ibd = node.ibd; let rpc_peer_info: Result = - node.with_node_state(|network, sortdb, chainstate, _mempool, rpc_args| { - let coinbase_height = NakamotoChainState::get_coinbase_height( - &mut chainstate.index_conn(), - &StacksBlockId::new( - &network.stacks_tip.consensus_hash, - &network.stacks_tip.block_hash, - ), - ) - .map_err(|e| { - StacksHttpResponse::new_error( - &preamble, - &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e)), - ) - })?; + node.with_node_state(|network, _sortdb, chainstate, _mempool, rpc_args| { + let coinbase_height = network.stacks_tip.coinbase_height; Ok(RPCPeerInfoData::from_network( network, diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 3710db7dc8..8e0c6f459c 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -275,16 +275,8 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { .ok_or(NetError::SendError("`mempool_query` not set".into()))?; let page_id = self.page_id.take(); - let stream_res = node.with_node_state(|network, sortdb, chainstate, mempool, _rpc_args| { - let coinbase_height = NakamotoChainState::get_coinbase_height( - &mut chainstate.index_conn(), - &StacksBlockId::new( - &network.stacks_tip.consensus_hash, - &network.stacks_tip.block_hash - ), - ) - .map_err(|e| StacksHttpResponse::new_error(&preamble, &HttpServerError::new(format!("Failed to load coinbase height: {:?}", &e))))? - .unwrap_or(0); + let stream_res = node.with_node_state(|network, _sortdb, _chainstate, mempool, _rpc_args| { + let coinbase_height = network.stacks_tip.coinbase_height.unwrap_or(0); let max_txs = network.connection_opts.mempool_max_tx_query; debug!( diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index eb224f3e80..05f6c1ac29 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -246,6 +246,7 @@ pub struct StacksTipInfo { pub consensus_hash: ConsensusHash, pub block_hash: BlockHeaderHash, pub height: u64, + pub coinbase_height: Option, pub is_nakamoto: bool, } @@ -255,6 +256,7 @@ impl StacksTipInfo { consensus_hash: ConsensusHash([0u8; 20]), block_hash: BlockHeaderHash([0u8; 32]), height: 0, + coinbase_height: None, is_nakamoto: false, } } @@ -4218,14 +4220,25 @@ impl PeerNetwork { let parent_tenure_start_header = NakamotoChainState::get_tenure_start_block_header(&mut chainstate.index_conn(), stacks_tip_block_id, &parent_header.consensus_hash)? .ok_or_else(|| { - debug!("{:?}: get_parent_stacks_tip: No tenure-start block for parent tenure {} off of child {} (parnet {})", self.get_local_peer(), &parent_header.consensus_hash, stacks_tip_block_id, &parent_block_id); + debug!("{:?}: get_parent_stacks_tip: No tenure-start block for parent tenure {} off of child {} (parent {})", self.get_local_peer(), &parent_header.consensus_hash, stacks_tip_block_id, &parent_block_id); net_error::DBError(db_error::NotFoundError) })?; + // TODO: Test this! + let parent_stacks_tip_block_hash = parent_tenure_start_header.anchored_header.block_hash(); + let parent_tenure_start_header_cbh = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &StacksBlockId::new( + &parent_tenure_start_header.consensus_hash, + &parent_stacks_tip_block_hash, + ), + )?; + let parent_stacks_tip = StacksTipInfo { consensus_hash: parent_tenure_start_header.consensus_hash, - block_hash: parent_tenure_start_header.anchored_header.block_hash(), + block_hash: parent_stacks_tip_block_hash, height: parent_tenure_start_header.anchored_header.height(), + coinbase_height: parent_tenure_start_header_cbh, is_nakamoto: parent_tenure_start_header .anchored_header .as_stacks_nakamoto() @@ -4377,6 +4390,12 @@ impl PeerNetwork { self.stacks_tip.is_nakamoto }; + // TODO: Test this! + let stacks_tip_cbh = NakamotoChainState::get_coinbase_height( + &mut chainstate.index_conn(), + &new_stacks_tip_block_id, + )?; + let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash != self.burnchain_tip.canonical_stacks_tip_consensus_hash || burnchain_tip_changed @@ -4415,6 +4434,7 @@ impl PeerNetwork { consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), block_hash: FIRST_STACKS_BLOCK_HASH.clone(), height: 0, + coinbase_height: None, is_nakamoto: false, } } @@ -4610,6 +4630,7 @@ impl PeerNetwork { consensus_hash: stacks_tip_ch, block_hash: stacks_tip_bhh, height: stacks_tip_height, + coinbase_height: stacks_tip_cbh, is_nakamoto: stacks_tip_is_nakamoto, }; self.parent_stacks_tip = parent_stacks_tip; From 8c0f9675ccfe913829fee62e532e453f2b2a7f33 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Thu, 17 Oct 2024 16:45:22 +0100 Subject: [PATCH 1362/1400] nit: field init shorthand --- stackslib/src/net/p2p.rs | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 05f6c1ac29..5f249ae12c 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -105,7 +105,7 @@ struct NetworkHandleServer { impl NetworkHandle { pub fn new(chan_in: SyncSender) -> NetworkHandle { - NetworkHandle { chan_in: chan_in } + NetworkHandle { chan_in } } /// Send out a command to the p2p thread. Do not bother waiting for the response. @@ -175,7 +175,7 @@ impl NetworkHandle { impl NetworkHandleServer { pub fn new(chan_in: Receiver) -> NetworkHandleServer { - NetworkHandleServer { chan_in: chan_in } + NetworkHandleServer { chan_in } } pub fn pair(bufsz: usize) -> (NetworkHandleServer, NetworkHandle) { @@ -483,11 +483,11 @@ impl PeerNetwork { } let mut network = PeerNetwork { - peer_version: peer_version, - epochs: epochs, + peer_version, + epochs, - local_peer: local_peer, - chain_view: chain_view, + local_peer, + chain_view, chain_view_stable_consensus_hash: ConsensusHash([0u8; 20]), ast_rules: ASTRules::Typical, heaviest_affirmation_map: AffirmationMap::empty(), @@ -506,8 +506,8 @@ impl PeerNetwork { tenure_start_block_id: StacksBlockId([0x00; 32]), current_reward_sets: BTreeMap::new(), - peerdb: peerdb, - atlasdb: atlasdb, + peerdb, + atlasdb, peers: PeerMap::new(), sockets: HashMap::new(), @@ -523,8 +523,8 @@ impl PeerNetwork { p2p_network_handle: 0, http_network_handle: 0, - burnchain: burnchain, - connection_opts: connection_opts, + burnchain, + connection_opts, work_state: PeerNetworkWorkState::GetPublicIP, nakamoto_work_state: PeerNetworkWorkState::GetPublicIP, @@ -555,8 +555,8 @@ impl PeerNetwork { attachments_downloader: None, stacker_db_syncs: Some(stacker_db_sync_map), - stacker_db_configs: stacker_db_configs, - stackerdbs: stackerdbs, + stacker_db_configs, + stackerdbs, prune_outbound_counts: HashMap::new(), prune_inbound_counts: HashMap::new(), @@ -3493,7 +3493,7 @@ impl PeerNetwork { } let microblocks_data = MicroblocksData { index_anchor_block: anchor_block_id.clone(), - microblocks: microblocks, + microblocks, }; debug!( @@ -4041,7 +4041,7 @@ impl PeerNetwork { peer_version: nk.peer_version, network_id: nk.network_id, ts: get_epoch_time_secs(), - pubkey: pubkey, + pubkey, }, ); @@ -5205,7 +5205,7 @@ mod test { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x01, ]), - port: port, + port, }, public_key: Secp256k1PublicKey::from_hex( "02fa66b66f8971a8cd4d20ffded09674e030f0f33883f337f34b95ad4935bac0e3", From 4172711a50ed9ef07fc124651662616fa96bd50b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 17 Oct 2024 11:03:31 -0500 Subject: [PATCH 1363/1400] feat: gate block-info behavior on chain_id (leave inactive in primary testnet) --- clarity/src/vm/functions/database.rs | 36 +++++++++++-------- .../src/tests/nakamoto_integrations.rs | 4 +++ 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index eecb5e2ba0..ff14507ead 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -16,6 +16,7 @@ use std::cmp; +use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::StacksEpochId; @@ -769,22 +770,27 @@ pub fn special_get_block_info( _ => return Ok(Value::none()), }; - let height_value = if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity3 { - if env.global_context.epoch_id < StacksEpochId::Epoch30 { - height_value - } else { - // interpretting height_value as a tenure height - let height_opt = env - .global_context - .database - .get_block_height_for_tenure_height(height_value)?; - match height_opt { - Some(x) => x, - None => return Ok(Value::none()), - } - } - } else { + // interpret height as a tenure height IFF + // * clarity version is less than Clarity3 + // * the evaluated epoch is geq 3.0 + // * we are not on (classic) primary testnet + let interpret_height_as_tenure_height = env.contract_context.get_clarity_version() + < &ClarityVersion::Clarity3 + && env.global_context.epoch_id >= StacksEpochId::Epoch30 + && env.global_context.chain_id != CHAIN_ID_TESTNET; + + let height_value = if !interpret_height_as_tenure_height { height_value + } else { + // interpretting height_value as a tenure height + let height_opt = env + .global_context + .database + .get_block_height_for_tenure_height(height_value)?; + match height_opt { + Some(x) => x, + None => return Ok(Value::none()), + } }; let current_block_height = env.global_context.database.get_current_block_height(); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 99a65ed4cc..eec06c1291 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7374,6 +7374,7 @@ fn check_block_times() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -7771,6 +7772,8 @@ fn check_block_info() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + // change the chain id so that it isn't the same as primary testnet + naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); @@ -8362,6 +8365,7 @@ fn check_block_info_rewards() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); From 60ed0cf0458b56836dd464714f44bf94bdf83bc1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 17 Oct 2024 10:00:50 -0700 Subject: [PATCH 1364/1400] Make the ports random in the v0 tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 82 +++++++++++----------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 448bd4c644..da2c4c3663 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -78,7 +78,7 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; -use crate::tests::{self, make_stacks_transfer}; +use crate::tests::{self, gen_random_port, make_stacks_transfer}; use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { @@ -1449,10 +1449,10 @@ fn multiple_miners() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); @@ -1735,13 +1735,14 @@ fn miner_forking() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); - let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); - let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); // partition the signer set so that ~half are listening and using node 1 for RPC and events, @@ -1764,11 +1765,10 @@ fn miner_forking() { Duration::from_secs(first_proposal_burn_block_timing); }, |config| { - let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); - config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); - config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); - config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -2744,13 +2744,14 @@ fn multiple_miners_mock_sign_epoch_25() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); - let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); - let node_2_rpc_bind = format!("127.0.0.1:{}", node_2_rpc); + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); // partition the signer set so that ~half are listening and using node 1 for RPC and events, @@ -2768,11 +2769,10 @@ fn multiple_miners_mock_sign_epoch_25() { signer_config.node_host = node_host.to_string(); }, |config| { - let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, node_1_rpc); - config.node.p2p_bind = format!("{}:{}", localhost, node_1_p2p); - config.node.data_url = format!("http://{}:{}", localhost, node_1_rpc); - config.node.p2p_address = format!("{}:{}", localhost, node_1_p2p); + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3469,10 +3469,10 @@ fn multiple_miners_with_nakamoto_blocks() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); @@ -3746,14 +3746,14 @@ fn partial_tenure_fork() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; - - let node_1_rpc_bind = format!("127.0.0.1:{}", node_1_rpc); + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + // All signers are listening to node 1 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -5193,10 +5193,10 @@ fn multiple_miners_with_custom_chain_id() { let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let node_1_rpc = 51024; - let node_1_p2p = 51023; - let node_2_rpc = 51026; - let node_2_p2p = 51025; + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); let localhost = "127.0.0.1"; let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); From f17721160a613078db39e4ee09763045d9fd89cf Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 17 Oct 2024 10:51:20 -0700 Subject: [PATCH 1365/1400] CRC: coinbase height always exists even if in pre nakamoto Signed-off-by: Jacinta Ferrant --- stacks-signer/src/client/mod.rs | 2 +- stackslib/src/net/api/getinfo.rs | 6 +-- stackslib/src/net/api/postmempoolquery.rs | 3 +- stackslib/src/net/api/tests/getinfo.rs | 2 +- stackslib/src/net/p2p.rs | 51 ++++++++++++++----- .../src/tests/nakamoto_integrations.rs | 11 ++-- 6 files changed, 48 insertions(+), 27 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 2cb8155f61..9885182d98 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -322,7 +322,7 @@ pub(crate) mod tests { stacks_tip_consensus_hash: generate_random_consensus_hash(), unanchored_tip: None, unanchored_seq: Some(0), - tenure_height: None, + tenure_height: thread_rng().next_u64(), exit_at_block_height: None, is_fully_synced: false, genesis_chainstate_hash: Sha256Sum::zero(), diff --git a/stackslib/src/net/api/getinfo.rs b/stackslib/src/net/api/getinfo.rs index 52f07e937e..d95b94803a 100644 --- a/stackslib/src/net/api/getinfo.rs +++ b/stackslib/src/net/api/getinfo.rs @@ -82,9 +82,7 @@ pub struct RPCPeerInfoData { pub genesis_chainstate_hash: Sha256Sum, pub unanchored_tip: Option, pub unanchored_seq: Option, - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] - pub tenure_height: Option, + pub tenure_height: u64, pub exit_at_block_height: Option, pub is_fully_synced: bool, #[serde(default)] @@ -110,7 +108,7 @@ impl RPCPeerInfoData { chainstate: &StacksChainState, exit_at_block_height: Option, genesis_chainstate_hash: &Sha256Sum, - coinbase_height: Option, + coinbase_height: u64, ibd: bool, ) -> RPCPeerInfoData { let server_version = version_string( diff --git a/stackslib/src/net/api/postmempoolquery.rs b/stackslib/src/net/api/postmempoolquery.rs index 8e0c6f459c..25da52a66d 100644 --- a/stackslib/src/net/api/postmempoolquery.rs +++ b/stackslib/src/net/api/postmempoolquery.rs @@ -276,8 +276,7 @@ impl RPCRequestHandler for RPCMempoolQueryRequestHandler { let page_id = self.page_id.take(); let stream_res = node.with_node_state(|network, _sortdb, _chainstate, mempool, _rpc_args| { - let coinbase_height = network.stacks_tip.coinbase_height.unwrap_or(0); - + let coinbase_height = network.stacks_tip.coinbase_height; let max_txs = network.connection_opts.mempool_max_tx_query; debug!( "Begin mempool query"; diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs index 2a0ae5eaf9..173918145e 100644 --- a/stackslib/src/net/api/tests/getinfo.rs +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -103,5 +103,5 @@ fn test_try_make_response() { ); let resp = response.decode_peer_info().unwrap(); - assert_eq!(resp.tenure_height, Some(1)); + assert_eq!(resp.tenure_height, 1); } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 5f249ae12c..054fefaf1d 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -246,7 +246,7 @@ pub struct StacksTipInfo { pub consensus_hash: ConsensusHash, pub block_hash: BlockHeaderHash, pub height: u64, - pub coinbase_height: Option, + pub coinbase_height: u64, pub is_nakamoto: bool, } @@ -256,7 +256,7 @@ impl StacksTipInfo { consensus_hash: ConsensusHash([0u8; 20]), block_hash: BlockHeaderHash([0u8; 32]), height: 0, - coinbase_height: None, + coinbase_height: 0, is_nakamoto: false, } } @@ -4224,21 +4224,35 @@ impl PeerNetwork { net_error::DBError(db_error::NotFoundError) })?; - // TODO: Test this! let parent_stacks_tip_block_hash = parent_tenure_start_header.anchored_header.block_hash(); - let parent_tenure_start_header_cbh = NakamotoChainState::get_coinbase_height( + let parent_stacks_tip_block_id = StacksBlockId::new( + &parent_tenure_start_header.consensus_hash, + &parent_stacks_tip_block_hash, + ); + let parent_coinbase_height = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), - &StacksBlockId::new( - &parent_tenure_start_header.consensus_hash, - &parent_stacks_tip_block_hash, - ), + &parent_stacks_tip_block_id, )?; + let coinbase_height = match parent_coinbase_height { + Some(cbh) => cbh, + None => { + if parent_tenure_start_header.is_epoch_2_block() { + // The coinbase height is the same as the stacks block height as + // every block contains a coinbase in epoch 2.x + parent_tenure_start_header.stacks_block_height + } else { + debug!("{:?}: get_parent_stacks_tip: No coinbase height found for nakamoto block {parent_stacks_tip_block_id}", self.get_local_peer()); + return Err(net_error::DBError(db_error::NotFoundError)); + } + } + }; + let parent_stacks_tip = StacksTipInfo { consensus_hash: parent_tenure_start_header.consensus_hash, block_hash: parent_stacks_tip_block_hash, height: parent_tenure_start_header.anchored_header.height(), - coinbase_height: parent_tenure_start_header_cbh, + coinbase_height, is_nakamoto: parent_tenure_start_header .anchored_header .as_stacks_nakamoto() @@ -4390,12 +4404,25 @@ impl PeerNetwork { self.stacks_tip.is_nakamoto }; - // TODO: Test this! let stacks_tip_cbh = NakamotoChainState::get_coinbase_height( &mut chainstate.index_conn(), &new_stacks_tip_block_id, )?; + let coinbase_height = match stacks_tip_cbh { + Some(cbh) => cbh, + None => { + if !stacks_tip_is_nakamoto { + // The coinbase height is the same as the stacks block height as + // every block contains a coinbase in epoch 2.x + stacks_tip_height + } else { + debug!("{:?}: No coinbase height found for nakamoto block {new_stacks_tip_block_id}", self.get_local_peer()); + return Err(net_error::DBError(db_error::NotFoundError)); + } + } + }; + let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash != self.burnchain_tip.canonical_stacks_tip_consensus_hash || burnchain_tip_changed @@ -4434,7 +4461,7 @@ impl PeerNetwork { consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), block_hash: FIRST_STACKS_BLOCK_HASH.clone(), height: 0, - coinbase_height: None, + coinbase_height: 0, is_nakamoto: false, } } @@ -4630,7 +4657,7 @@ impl PeerNetwork { consensus_hash: stacks_tip_ch, block_hash: stacks_tip_bhh, height: stacks_tip_height, - coinbase_height: stacks_tip_cbh, + coinbase_height, is_nakamoto: stacks_tip_is_nakamoto, }; self.parent_stacks_tip = parent_stacks_tip; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 5fdf79dcae..37b7a6c329 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5402,7 +5402,7 @@ fn check_block_heights() { // With the first Nakamoto block, the chain tip and the number of tenures // must be the same (before Nakamoto every block counts as a tenure) - assert_eq!(info.tenure_height.unwrap(), info.stacks_tip_height); + assert_eq!(info.tenure_height, info.stacks_tip_height); let mut last_burn_block_height; let mut last_stacks_block_height = info.stacks_tip_height as u128; @@ -5538,7 +5538,7 @@ fn check_block_heights() { last_tenure_height = bh1; let info = get_chain_info_result(&naka_conf).unwrap(); - assert_eq!(info.tenure_height.unwrap(), bh3 as u64); + assert_eq!(info.tenure_height, bh3 as u64); let sbh = heights3 .get("stacks-block-height") @@ -5649,7 +5649,7 @@ fn check_block_heights() { ); let info = get_chain_info_result(&naka_conf).unwrap(); - assert_eq!(info.tenure_height.unwrap(), bh3 as u64); + assert_eq!(info.tenure_height, bh3 as u64); let sbh = heights3 .get("stacks-block-height") @@ -5692,10 +5692,7 @@ fn check_block_heights() { ); let info = get_chain_info_result(&naka_conf).unwrap(); - assert_eq!( - info.tenure_height.unwrap(), - block_height_pre_3_0 + tenure_count - ); + assert_eq!(info.tenure_height, block_height_pre_3_0 + tenure_count); coord_channel .lock() From f78c778e6f81ce46e15bbe07cc9bef12cecd5196 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 17 Oct 2024 12:58:18 -0500 Subject: [PATCH 1366/1400] test: add TestPeer coverage for get-block-info? * add support to TestPeer for changing network_id/chain_id * test get-block-info? behavior in Clarity1 with primary testnet chain id and a different chain id --- stackslib/src/burnchains/tests/mod.rs | 16 +- .../chainstate/nakamoto/coordinator/tests.rs | 538 +++++++++++++++++- .../src/chainstate/nakamoto/tests/node.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 50 +- stackslib/src/chainstate/stacks/tests/mod.rs | 6 +- stackslib/src/net/mod.rs | 1 + stackslib/src/net/tests/mod.rs | 24 +- 7 files changed, 614 insertions(+), 25 deletions(-) diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 31e29c0b26..e7fa51a89c 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -123,11 +123,13 @@ pub struct TestMiner { pub nonce: u64, pub spent_at_nonce: HashMap, // how much uSTX this miner paid in a given tx's nonce pub test_with_tx_fees: bool, // set to true to make certain helper methods attach a pre-defined tx fee + pub chain_id: u32, } pub struct TestMinerFactory { pub key_seed: [u8; 32], pub next_miner_id: usize, + pub chain_id: u32, } impl TestMiner { @@ -136,6 +138,7 @@ impl TestMiner { privks: &Vec, num_sigs: u16, hash_mode: &AddressHashMode, + chain_id: u32, ) -> TestMiner { TestMiner { burnchain: burnchain.clone(), @@ -150,6 +153,7 @@ impl TestMiner { nonce: 0, spent_at_nonce: HashMap::new(), test_with_tx_fees: true, + chain_id, } } @@ -314,15 +318,7 @@ impl TestMinerFactory { TestMinerFactory { key_seed: [0u8; 32], next_miner_id: 1, - } - } - - pub fn from_u16(seed: u16) -> TestMinerFactory { - let mut bytes = [0u8; 32]; - (&mut bytes[0..2]).copy_from_slice(&seed.to_be_bytes()); - TestMinerFactory { - key_seed: bytes, - next_miner_id: seed as usize, + chain_id: CHAIN_ID_TESTNET, } } @@ -346,7 +342,7 @@ impl TestMinerFactory { } test_debug!("New miner: {:?} {}:{:?}", &hash_mode, num_sigs, &keys); - let mut m = TestMiner::new(burnchain, &keys, num_sigs, &hash_mode); + let mut m = TestMiner::new(burnchain, &keys, num_sigs, &hash_mode, self.chain_id); m.id = self.next_miner_id; self.next_miner_id += 1; m diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 0a59c1a67b..23146bb943 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -17,9 +17,12 @@ use std::collections::{HashMap, HashSet}; use std::sync::Mutex; +use clarity::consts::CHAIN_ID_TESTNET; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::types::PrincipalData; -use clarity::vm::Value; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::database::clarity_db::NullBurnStateDB; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use clarity::vm::{ClarityVersion, Value}; use rand::prelude::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; @@ -35,6 +38,7 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; +use crate::burnchains::tests::TestMiner; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::{ @@ -64,7 +68,7 @@ use crate::chainstate::stacks::events::TransactionOrigin; use crate::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksTransaction, StacksTransactionSigner, TenureChangeCause, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, - TransactionPayload, TransactionVersion, + TransactionPayload, TransactionSmartContract, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::StacksEpochExtension; @@ -76,6 +80,7 @@ use crate::stacks_common::codec::StacksMessageCodec; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{query_rows, u64_to_sql}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; +use crate::util_lib::strings::StacksString; impl<'a> NakamotoStagingBlocksConnRef<'a> { pub fn get_blocks_at_height(&self, height: u64) -> Vec { @@ -285,7 +290,7 @@ pub fn make_token_transfer( TokenTransferMemo([0x00; 34]), ), ); - stx_transfer.chain_id = 0x80000000; + stx_transfer.chain_id = chainstate.chain_id; stx_transfer.anchor_mode = TransactionAnchorMode::OnChainOnly; stx_transfer.set_tx_fee(fee); stx_transfer.auth.set_origin_nonce(nonce); @@ -297,6 +302,37 @@ pub fn make_token_transfer( stx_transfer_signed } +/// Make a token-transfer from a private key +pub fn make_contract( + chainstate: &mut StacksChainState, + name: &str, + code: &str, + private_key: &StacksPrivateKey, + version: ClarityVersion, + nonce: u64, + fee: u64, +) -> StacksTransaction { + let mut stx_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(private_key).unwrap(), + TransactionPayload::SmartContract( + TransactionSmartContract { + name: name.into(), + code_body: StacksString::from_str(code).unwrap(), + }, + Some(version), + ), + ); + stx_tx.chain_id = chainstate.chain_id; + stx_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + stx_tx.set_tx_fee(fee); + stx_tx.auth.set_origin_nonce(nonce); + + let mut tx_signer = StacksTransactionSigner::new(&stx_tx); + tx_signer.sign_origin(&private_key).unwrap(); + tx_signer.get_tx().unwrap() +} + /// Given the blocks and block-commits for a reward cycle, replay the sortitions on the given /// TestPeer, always processing the first block of the reward cycle before processing all /// subsequent blocks in random order. @@ -612,6 +648,67 @@ impl<'a> TestPeer<'a> { block } + pub fn mine_tenure(&mut self, block_builder: F) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + where + F: FnMut( + &mut TestMiner, + &mut StacksChainState, + &SortitionDB, + &[(NakamotoBlock, u64, ExecutionCost)], + ) -> Vec, + { + let (burn_ops, mut tenure_change, miner_key) = + self.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (burn_height, _, consensus_hash) = self.next_burnchain_block(burn_ops.clone()); + let pox_constants = self.sortdb().pox_constants.clone(); + let first_burn_height = self.sortdb().first_block_height; + let mut test_signers = self.config.test_signers.clone().unwrap(); + + info!( + "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", + pox_constants.is_in_prepare_phase(first_burn_height, burn_height), + pox_constants.is_naka_signing_cycle_start(first_burn_height, burn_height) + ); + let vrf_proof = self.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let nakamoto_tip = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + nakamoto_parent_tenure.last().as_ref().unwrap().block_id() + } else { + let tip = { + let chainstate = &mut self.stacks_node.as_mut().unwrap().chainstate; + let sort_db = self.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + tip.index_block_hash() + }; + + let miner_addr = self.miner.origin_address().unwrap(); + let miner_acct = self.get_account(&nakamoto_tip, &miner_addr.to_account_principal()); + + let tenure_change_tx = self + .miner + .make_nakamoto_tenure_change_with_nonce(tenure_change.clone(), miner_acct.nonce); + + let coinbase_tx = + self.miner + .make_nakamoto_coinbase_with_nonce(None, vrf_proof, miner_acct.nonce + 1); + + self.make_nakamoto_tenure_and( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |_| {}, + block_builder, + |_| true, + ) + } + pub fn single_block_tenure( &mut self, sender_key: &StacksPrivateKey, @@ -764,6 +861,439 @@ fn block_descendant() { ); } +#[test] +fn block_info_primary_testnet() { + block_info_tests(true) +} + +#[test] +fn block_info_other_testnet() { + block_info_tests(false) +} + +fn block_info_tests(use_primary_testnet: bool) { + let private_key = StacksPrivateKey::from_seed(&[2]); + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key)); + + let num_stackers: u32 = 4; + let mut signing_key_seed = num_stackers.to_be_bytes().to_vec(); + signing_key_seed.extend_from_slice(&[1, 1, 1, 1]); + let signing_key = StacksPrivateKey::from_seed(signing_key_seed.as_slice()); + let test_stackers = (0..num_stackers) + .map(|index| TestStacker { + signer_private_key: signing_key.clone(), + stacker_private_key: StacksPrivateKey::from_seed(&index.to_be_bytes()), + amount: u64::MAX as u128 - 10000, + pox_addr: Some(PoxAddress::Standard( + StacksAddress::new( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + Hash160::from_data(&index.to_be_bytes()), + ), + Some(AddressHashMode::SerializeP2PKH), + )), + max_amount: None, + }) + .collect::>(); + let test_signers = TestSigners::new(vec![signing_key]); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 28; + + let chain_id = if use_primary_testnet { + CHAIN_ID_TESTNET + } else { + CHAIN_ID_TESTNET + 1 + }; + let mut boot_plan = + NakamotoBootPlan::new(&format!("{}.{use_primary_testnet}", function_name!())) + .with_test_stackers(test_stackers.clone()) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key) + .with_network_id(chain_id); + boot_plan.pox_constants = pox_constants; + + // Supply an empty vec to make sure we have no nakamoto blocks when this test begins + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); + + let clar1_contract = " + (define-read-only (get-info (height uint)) (get-block-info? id-header-hash height)) + "; + let clar3_contract = " + (define-read-only (get-info (height uint)) (get-stacks-block-info? id-header-hash height)) + "; + + let clar1_contract_name = "clar1"; + let clar3_contract_name = "clar3"; + + let clar1_contract_id = QualifiedContractIdentifier { + issuer: addr.clone().into(), + name: clar1_contract_name.into(), + }; + let clar3_contract_id = QualifiedContractIdentifier { + issuer: addr.clone().into(), + name: clar3_contract_name.into(), + }; + + let get_tip_info = |peer: &mut TestPeer| { + peer.with_db_state(|sortdb, _, _, _| { + let (tip_ch, tip_bh, tip_height) = + SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn()).unwrap(); + let tip_block_id = StacksBlockId::new(&tip_ch, &tip_bh); + Ok((tip_block_id, tip_height)) + }) + .unwrap() + }; + + let get_info = |peer: &mut TestPeer, + version: ClarityVersion, + query_ht: u64, + tip_block_id: &StacksBlockId| { + let contract_id = match version { + ClarityVersion::Clarity1 => &clar1_contract_id, + ClarityVersion::Clarity2 => panic!(), + ClarityVersion::Clarity3 => &clar3_contract_id, + }; + peer.with_db_state(|sortdb, chainstate, _, _| { + let sortdb_handle = sortdb.index_handle_at_tip(); + let output = chainstate + .clarity_eval_read_only( + &sortdb_handle, + &tip_block_id, + contract_id, + &format!("(get-info u{query_ht})"), + ) + .expect_optional() + .unwrap() + .map(|value| StacksBlockId::from_vec(&value.expect_buff(32).unwrap()).unwrap()); + + info!("At stacks block {tip_block_id}, {contract_id} returned {output:?}"); + + Ok(output) + }) + .unwrap() + }; + + let (last_2x_block_id, last_2x_block_ht) = get_tip_info(&mut peer); + + peer.mine_tenure(|miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() > 0 { + return vec![]; + } + info!("Producing first nakamoto block, publishing our three contracts"); + let account = get_account(chainstate, sortdb, &addr); + let tx_0 = make_contract( + chainstate, + clar1_contract_name, + clar1_contract, + &private_key, + ClarityVersion::Clarity1, + account.nonce, + 1000, + ); + let tx_1 = make_contract( + chainstate, + clar3_contract_name, + clar3_contract, + &private_key, + ClarityVersion::Clarity3, + account.nonce + 1, + 1000, + ); + + vec![tx_0, tx_1] + }); + + let (tenure_1_start_block_id, tenure_1_block_ht) = get_tip_info(&mut peer); + assert_eq!( + get_info( + &mut peer, + ClarityVersion::Clarity1, + last_2x_block_ht, + &tenure_1_start_block_id + ) + .unwrap(), + last_2x_block_id, + ); + assert_eq!( + get_info( + &mut peer, + ClarityVersion::Clarity3, + last_2x_block_ht, + &tenure_1_start_block_id + ) + .unwrap(), + last_2x_block_id, + ); + assert!(get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_1_block_ht, + &tenure_1_start_block_id + ) + .is_none()); + assert!(get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_1_block_ht, + &tenure_1_start_block_id + ) + .is_none()); + + let recipient_addr = StacksAddress::p2pkh( + false, + &StacksPublicKey::from_private(&StacksPrivateKey::from_seed(&[2, 1, 2])), + ); + + let tenure_2_blocks: Vec<_> = peer + .mine_tenure(|miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() > 3 { + return vec![]; + } + info!("Producing block #{} in Tenure #2", blocks_so_far.len()); + let account = get_account(chainstate, sortdb, &addr); + let tx_0 = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![tx_0] + }) + .into_iter() + .map(|(block, ..)| block.header.block_id()) + .collect(); + + let (tenure_2_last_block_id, tenure_2_last_block_ht) = get_tip_info(&mut peer); + + assert_eq!(&tenure_2_last_block_id, tenure_2_blocks.last().unwrap()); + + let c3_tenure1_from_tenure2 = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_1_block_ht, + &tenure_2_blocks[0], + ) + .unwrap(); + let c1_tenure1_from_tenure2 = get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_1_block_ht, + &tenure_2_blocks[0], + ) + .unwrap(); + + // note, since tenure_1 only has one block in it, tenure_1_block_ht is *also* the tenure height, so this should return the + // same value regardless of the `primary_tesnet` flag + assert_eq!(c1_tenure1_from_tenure2, c3_tenure1_from_tenure2); + assert_eq!(c1_tenure1_from_tenure2, tenure_1_start_block_id); + + let tenure_2_start_block_ht = tenure_1_block_ht + 1; + let tenure_2_tenure_ht = tenure_1_block_ht + 1; + + // make sure we can't look up block info from the block we're evaluating at + if use_primary_testnet { + assert!(get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_start_block_ht, + &tenure_2_blocks[0] + ) + .is_none()); + } else { + assert!(get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_tenure_ht, + &tenure_2_blocks[0] + ) + .is_none()); + } + assert!(get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_2_start_block_ht, + &tenure_2_blocks[0] + ) + .is_none()); + + // but we can from the next block in the tenure + let c1_tenure_2_start_block = if use_primary_testnet { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_start_block_ht, + &tenure_2_blocks[1], + ) + .unwrap() + } else { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_tenure_ht, + &tenure_2_blocks[1], + ) + .unwrap() + }; + let c3_tenure_2_start_block = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_2_start_block_ht, + &tenure_2_blocks[1], + ) + .unwrap(); + assert_eq!(c1_tenure_2_start_block, c3_tenure_2_start_block); + assert_eq!(&c1_tenure_2_start_block, &tenure_2_blocks[0]); + + // try to query the middle block from the last block in the tenure + let c1_tenure_2_mid_block = if use_primary_testnet { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_start_block_ht + 1, + &tenure_2_blocks[2], + ) + } else { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_start_block_ht + 1, + &tenure_2_blocks[2], + ) + }; + let c3_tenure_2_mid_block = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_2_start_block_ht + 1, + &tenure_2_blocks[2], + ) + .unwrap(); + assert_eq!(&c3_tenure_2_mid_block, &tenure_2_blocks[1]); + if use_primary_testnet { + assert_eq!(c1_tenure_2_mid_block.unwrap(), c3_tenure_2_mid_block); + } else { + // if interpreted as a tenure-height, this will return none, because there's no tenure at height +1 yet + assert!(c1_tenure_2_mid_block.is_none()); + + // query the tenure height again from the latest block for good measure + let start_block_result = get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_2_tenure_ht, + &tenure_2_blocks[2], + ) + .unwrap(); + assert_eq!(&start_block_result, &tenure_2_blocks[0]); + } + + let tenure_3_tenure_ht = tenure_2_tenure_ht + 1; + let tenure_3_start_block_ht = + tenure_2_start_block_ht + u64::try_from(tenure_2_blocks.len()).unwrap(); + + let tenure_3_blocks: Vec<_> = peer + .mine_tenure(|miner, chainstate, sortdb, blocks_so_far| { + if blocks_so_far.len() > 3 { + return vec![]; + } + info!("Producing block #{} in Tenure #3", blocks_so_far.len()); + let account = get_account(chainstate, sortdb, &addr); + let tx_0 = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![tx_0] + }) + .into_iter() + .map(|(block, ..)| block.header.block_id()) + .collect(); + + let (tenure_3_last_block_id, tenure_3_last_block_ht) = get_tip_info(&mut peer); + + assert_eq!(&tenure_3_last_block_id, tenure_3_blocks.last().unwrap()); + assert_eq!(tenure_3_start_block_ht, tenure_2_last_block_ht + 1); + + // query the current tenure information from the middle block + let c1_tenure_3_start_block = if use_primary_testnet { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_start_block_ht, + &tenure_3_blocks[1], + ) + .unwrap() + } else { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_tenure_ht, + &tenure_3_blocks[1], + ) + .unwrap() + }; + let c3_tenure_3_start_block = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_3_start_block_ht, + &tenure_3_blocks[1], + ) + .unwrap(); + assert_eq!(c1_tenure_3_start_block, c3_tenure_3_start_block); + assert_eq!(&c1_tenure_3_start_block, &tenure_3_blocks[0]); + + // try to query the middle block from the last block in the tenure + let c1_tenure_3_mid_block = if use_primary_testnet { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_start_block_ht + 1, + &tenure_3_blocks[2], + ) + } else { + get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_start_block_ht + 1, + &tenure_3_blocks[2], + ) + }; + let c3_tenure_3_mid_block = get_info( + &mut peer, + ClarityVersion::Clarity3, + tenure_3_start_block_ht + 1, + &tenure_3_blocks[2], + ) + .unwrap(); + assert_eq!(&c3_tenure_3_mid_block, &tenure_3_blocks[1]); + if use_primary_testnet { + assert_eq!(c1_tenure_3_mid_block.unwrap(), c3_tenure_3_mid_block); + } else { + // if interpreted as a tenure-height, this will return none, because there's no tenure at height +1 yet + assert!(c1_tenure_3_mid_block.is_none()); + + // query the tenure height again from the latest block for good measure + let start_block_result = get_info( + &mut peer, + ClarityVersion::Clarity1, + tenure_3_tenure_ht, + &tenure_3_blocks[2], + ) + .unwrap(); + assert_eq!(&start_block_result, &tenure_3_blocks[0]); + } +} + #[test] // Test PoX Reward and Punish treatment in nakamoto // - create a 12 address PoX reward set diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index e7d6fef03f..6f929e0031 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -231,7 +231,7 @@ impl TestMiner { Some(vrf_proof), ), ); - tx_coinbase.chain_id = 0x80000000; + tx_coinbase.chain_id = self.chain_id; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; tx_coinbase.auth.set_origin_nonce(nonce); @@ -258,7 +258,7 @@ impl TestMiner { self.as_transaction_auth().unwrap(), TransactionPayload::TenureChange(tenure_change), ); - tx_tenure_change.chain_id = 0x80000000; + tx_tenure_change.chain_id = self.chain_id; tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; tx_tenure_change.auth.set_origin_nonce(nonce); diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 8562449dd3..fb36021152 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1998,6 +1998,44 @@ pub mod test { make_tx(key, nonce, 0, payload) } + pub fn make_pox_4_lockup_chain_id( + key: &StacksPrivateKey, + nonce: u64, + amount: u128, + addr: &PoxAddress, + lock_period: u128, + signer_key: &StacksPublicKey, + burn_ht: u64, + signature_opt: Option>, + max_amount: u128, + auth_id: u128, + chain_id: u32, + ) -> StacksTransaction { + let addr_tuple = Value::Tuple(addr.as_clarity_tuple().unwrap()); + let signature = match signature_opt { + Some(sig) => Value::some(Value::buff_from(sig).unwrap()).unwrap(), + None => Value::none(), + }; + let payload = TransactionPayload::new_contract_call( + boot_code_test_addr(), + "pox-4", + "stack-stx", + vec![ + Value::UInt(amount), + addr_tuple, + Value::UInt(burn_ht as u128), + Value::UInt(lock_period), + signature, + Value::buff_from(signer_key.to_bytes_compressed()).unwrap(), + Value::UInt(max_amount), + Value::UInt(auth_id), + ], + ) + .unwrap(); + + make_tx_chain_id(key, nonce, 0, payload, chain_id) + } + pub fn make_pox_2_or_3_lockup( key: &StacksPrivateKey, nonce: u64, @@ -2450,11 +2488,21 @@ pub mod test { nonce: u64, tx_fee: u64, payload: TransactionPayload, + ) -> StacksTransaction { + make_tx_chain_id(key, nonce, tx_fee, payload, CHAIN_ID_TESTNET) + } + + fn make_tx_chain_id( + key: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + payload: TransactionPayload, + chain_id: u32, ) -> StacksTransaction { let auth = TransactionAuth::from_p2pkh(key).unwrap(); let addr = auth.origin().address_testnet(); let mut tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); - tx.chain_id = 0x80000000; + tx.chain_id = chain_id; tx.auth.set_origin_nonce(nonce); tx.set_post_condition_mode(TransactionPostConditionMode::Allow); tx.set_tx_fee(tx_fee); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index cda74cb46d..8b66c019f0 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1050,7 +1050,7 @@ pub fn make_coinbase_with_nonce( None, ), ); - tx_coinbase.chain_id = 0x80000000; + tx_coinbase.chain_id = miner.chain_id; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; tx_coinbase.auth.set_origin_nonce(nonce); @@ -1147,7 +1147,7 @@ pub fn make_contract_call( .unwrap(), ); - tx_contract_call.chain_id = 0x80000000; + tx_contract_call.chain_id = miner.chain_id; tx_contract_call.auth.set_origin_nonce(miner.get_nonce()); if miner.test_with_tx_fees { @@ -1179,7 +1179,7 @@ pub fn make_token_transfer( TransactionPayload::TokenTransfer((*recipient).clone().into(), amount, (*memo).clone()), ); - tx_stx_transfer.chain_id = 0x80000000; + tx_stx_transfer.chain_id = miner.chain_id; tx_stx_transfer .auth .set_origin_nonce(nonce.unwrap_or(miner.get_nonce())); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index a33d1bc466..f0c5003740 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2379,6 +2379,7 @@ pub mod test { ) -> TestPeer<'a> { let test_path = TestPeer::make_test_path(&config); let mut miner_factory = TestMinerFactory::new(); + miner_factory.chain_id = config.network_id; let mut miner = miner_factory.next_miner(&config.burnchain, 1, 1, AddressHashMode::SerializeP2PKH); // manually set fees diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 07227c930e..6e61e7e610 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -48,7 +48,8 @@ use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::{ - key_to_stacks_addr, make_pox_4_lockup, make_signer_key_signature, with_sortdb, + key_to_stacks_addr, make_pox_4_lockup, make_pox_4_lockup_chain_id, make_signer_key_signature, + with_sortdb, }; use crate::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, @@ -66,6 +67,7 @@ use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; use crate::util_lib::boot::boot_code_id; +use crate::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; /// One step of a simulated Nakamoto node's bootup procedure. #[derive(Debug, PartialEq, Clone)] @@ -91,6 +93,7 @@ pub struct NakamotoBootPlan { pub num_peers: usize, /// Whether to add an initial balance for `private_key`'s account pub add_default_balance: bool, + pub network_id: u32, } impl NakamotoBootPlan { @@ -106,6 +109,7 @@ impl NakamotoBootPlan { observer: Some(TestEventObserver::new()), num_peers: 0, add_default_balance: true, + network_id: TestPeerConfig::default().network_id, } } @@ -114,6 +118,11 @@ impl NakamotoBootPlan { self } + pub fn with_network_id(mut self, network_id: u32) -> Self { + self.network_id = network_id; + self + } + pub fn with_pox_constants(mut self, cycle_length: u32, prepare_length: u32) -> Self { let new_consts = PoxConstants::new( cycle_length, @@ -328,6 +337,7 @@ impl NakamotoBootPlan { observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, Vec) { let mut peer_config = TestPeerConfig::new(&self.test_name, 0, 0); + peer_config.network_id = self.network_id; peer_config.private_key = self.private_key.clone(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -487,16 +497,19 @@ impl NakamotoBootPlan { .clone() .unwrap_or(default_pox_addr.clone()); let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); - let signature = make_signer_key_signature( + let signature = make_pox_4_signer_key_signature( &pox_addr, &test_stacker.signer_private_key, reward_cycle.into(), &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, - 12_u128, + peer.config.network_id, + 12, max_amount, 1, - ); - make_pox_4_lockup( + ) + .unwrap() + .to_rsv(); + make_pox_4_lockup_chain_id( &test_stacker.stacker_private_key, 0, test_stacker.amount, @@ -507,6 +520,7 @@ impl NakamotoBootPlan { Some(signature), max_amount, 1, + peer.config.network_id, ) }) .collect(); From 708bfe51294e77c55be6071c586905b2ff2da4eb Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 17 Oct 2024 11:03:36 -0700 Subject: [PATCH 1367/1400] Fix getinfo_compat test Signed-off-by: Jacinta Ferrant --- stackslib/src/net/api/tests/getinfo.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/api/tests/getinfo.rs b/stackslib/src/net/api/tests/getinfo.rs index 173918145e..89054e3453 100644 --- a/stackslib/src/net/api/tests/getinfo.rs +++ b/stackslib/src/net/api/tests/getinfo.rs @@ -63,9 +63,9 @@ fn test_try_parse_request() { #[test] fn test_getinfo_compat() { - let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false}"#; - let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d"}"#; - let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf"}"#; + let old_getinfo_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false, "tenure_height": 42}"#; + let getinfo_no_pubkey_hash_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d", "tenure_height": 42}"#; + let getinfo_no_pubkey_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf", "tenure_height": 0}"#; let getinfo_full_json = r#"{"peer_version":402653189,"pox_consensus":"b712eb731b613eebae814a8f416c5c15bc8391ec","burn_block_height":727631,"stable_pox_consensus":"53b5ed79842080500d7d83daa36aa1069dedf983","stable_burn_block_height":727624,"server_version":"stacks-node 0.0.1 (feat/faster-inv-generation:68f33190a, release build, linux [x86_64])","network_id":1,"parent_network_id":3652501241,"stacks_tip_height":52537,"stacks_tip":"b3183f2ac588e12319ff0fde78f97e62c92a218d87828c35710c29aaf7adbedc","stacks_tip_consensus_hash":"b712eb731b613eebae814a8f416c5c15bc8391ec","genesis_chainstate_hash":"74237aa39aa50a83de11a4f53e9d3bb7d43461d1de9873f402e5453ae60bc59b","unanchored_tip":"e76f68d607480e9984b4062b2691fb60a88423177898f5780b40ace17ae8982a","unanchored_seq":0,"exit_at_block_height":null,"is_fully_synced":false,"node_public_key":"029b27d345e7bd2a6627262cefe6e97d9bc482f41ec32ec76a7bec391bb441798d","node_public_key_hash":"046e6f832a83ff0da4a550907d3a44412cc1e4bf", "tenure_height": 2423}"#; // they all parse From d901503496a4c91671f9700a0e4df972beb1aafd Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 17 Oct 2024 14:20:39 -0500 Subject: [PATCH 1368/1400] add docs to address PR feedback --- clarity/src/vm/database/clarity_db.rs | 8 ++++++++ stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 2 +- stackslib/src/clarity_vm/database/mod.rs | 3 +++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 2ee31a8b41..50715fd98f 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -927,6 +927,14 @@ impl<'a> ClarityDatabase<'a> { } } + /// Return the block height for a given tenure height + /// if the block information is queryable for the tenure height. + /// The block information for a given tenure height is queryable iff: + /// * `tenure_height` falls in 2.x, and `tenure_height` < `current_height` + /// * `tenure_height` falls in 3.x, and the first block of the tenure + /// at `tenure_height` has a stacks block height less than `current_height` + /// + /// If the block information isn't queryable, return `Ok(None)` pub fn get_block_height_for_tenure_height( &mut self, tenure_height: u32, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 23146bb943..23bf3313e9 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -302,7 +302,7 @@ pub fn make_token_transfer( stx_transfer_signed } -/// Make a token-transfer from a private key +/// Make contract publish pub fn make_contract( chainstate: &mut StacksChainState, name: &str, diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index e26f9be6ba..44eeaa2e07 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -47,6 +47,9 @@ pub trait GetTenureStartId { tip: &StacksBlockId, tenure_id_consensus_hash: &ConsensusHash, ) -> Result, DBError>; + /// Return the StacksBlockId of the tenure start block for the + /// tenure with coinbase height `coinbase_height` in the fork + /// referenced by `tip`. fn get_tenure_block_id_at_cb_height( &self, tip: &StacksBlockId, From bb7e4eb779287b472ce082d5ddc395f9f1a21784 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 16:21:15 -0400 Subject: [PATCH 1369/1400] chore: add `backoff` to dispatcher warning logs This could be helpful to debug issues communicating with API. --- testnet/stacks-node/src/event_dispatcher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 43714f3573..a91b95cd1f 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -455,7 +455,8 @@ impl EventObserver { Err(err) => { warn!( "Event dispatcher: connection or request failed to {}:{} - {:?}", - &host, &port, err + &host, &port, err; + "backoff" => backoff ); } } From 2207ac4974b65bd4f78f99c4fe8c34592f31ae0a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 16:40:43 -0400 Subject: [PATCH 1370/1400] feat: cap the backoff for sending events to timeout * 3 --- testnet/stacks-node/src/event_dispatcher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index a91b95cd1f..448d71eb3c 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -429,6 +429,7 @@ impl EventObserver { .unwrap_or(PeerHost::DNS(host.to_string(), port)); let mut backoff = Duration::from_millis(100); + let max_backoff = timeout.saturating_mul(3); loop { let mut request = StacksHttpRequest::new_for_peer( peerhost.clone(), @@ -472,7 +473,7 @@ impl EventObserver { } sleep(backoff); - backoff *= 2; + backoff = std::cmp::min(backoff.saturating_mul(2), max_backoff); } } From 1c595adc0c7fb9b33c7fb06cf9b6acf75ab499d9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 16:57:29 -0400 Subject: [PATCH 1371/1400] feat: various improvements to retry logic in event dispatcher - Create the request outside the loop - Cap the backoff timeout at 3x timeout - Print the retry attempt and backoff time in the log - Add a jitter to the backoff time --- testnet/stacks-node/src/event_dispatcher.rs | 31 +++++++++++++-------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 448d71eb3c..96eca2247a 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -26,6 +26,7 @@ use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; +use rand::Rng; use rusqlite::{params, Connection}; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; @@ -429,17 +430,19 @@ impl EventObserver { .unwrap_or(PeerHost::DNS(host.to_string(), port)); let mut backoff = Duration::from_millis(100); + let mut attempts: i32 = 0; + // Cap the backoff at 3x the timeout let max_backoff = timeout.saturating_mul(3); - loop { - let mut request = StacksHttpRequest::new_for_peer( - peerhost.clone(), - "POST".into(), - url.path().into(), - HttpRequestContents::new().payload_json(payload.clone()), - ) - .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); - request.add_header("Connection".into(), "close".into()); + let mut request = StacksHttpRequest::new_for_peer( + peerhost.clone(), + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json(payload.clone()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + loop { match send_http_request(host, port, request, timeout) { Ok(response) => { if response.preamble().status_code == 200 { @@ -457,7 +460,8 @@ impl EventObserver { warn!( "Event dispatcher: connection or request failed to {}:{} - {:?}", &host, &port, err; - "backoff" => backoff + "backoff" => backoff, + "attempts" => attempts ); } } @@ -473,7 +477,12 @@ impl EventObserver { } sleep(backoff); - backoff = std::cmp::min(backoff.saturating_mul(2), max_backoff); + let jitter: u64 = rand::thread_rng().gen_range(0..100); + backoff = std::cmp::min( + backoff.saturating_mul(2) + Duration::from_millis(jitter), + max_backoff, + ); + attempts = attempts.saturating_add(1); } } From 7d995db34b6215d1d7e48569493a17f910209111 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 17:36:45 -0400 Subject: [PATCH 1372/1400] chore: fix log --- testnet/stacks-node/src/event_dispatcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 96eca2247a..72f6929ac2 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -460,7 +460,7 @@ impl EventObserver { warn!( "Event dispatcher: connection or request failed to {}:{} - {:?}", &host, &port, err; - "backoff" => backoff, + "backoff" => ?backoff, "attempts" => attempts ); } From 334cf0f970b2a03903f0cef03c21884a9e204ddd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 17 Oct 2024 17:53:25 -0400 Subject: [PATCH 1373/1400] chore: fix missing `clone` --- testnet/stacks-node/src/event_dispatcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 72f6929ac2..64c2b5ce90 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -443,7 +443,7 @@ impl EventObserver { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); loop { - match send_http_request(host, port, request, timeout) { + match send_http_request(host, port, request.clone(), timeout) { Ok(response) => { if response.preamble().status_code == 200 { debug!( From 607e3f115f811fb5d012ad03f19f6980f9ca8bea Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 10:32:03 +0200 Subject: [PATCH 1374/1400] feat: add `tenure_height` to `/new_block` event payload --- stackslib/src/chainstate/coordinator/mod.rs | 1 + stackslib/src/chainstate/coordinator/tests.rs | 1 + stackslib/src/chainstate/nakamoto/mod.rs | 2 ++ stackslib/src/chainstate/stacks/db/blocks.rs | 11 ++++++++++- stackslib/src/chainstate/stacks/db/mod.rs | 1 + stackslib/src/cost_estimates/tests/common.rs | 1 + stackslib/src/net/mod.rs | 1 + testnet/stacks-node/src/event_dispatcher.rs | 10 ++++++++++ testnet/stacks-node/src/run_loop/mod.rs | 1 + 9 files changed, 28 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index c8415e3f69..5b7c7e89b6 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -180,6 +180,7 @@ pub trait BlockEventDispatcher { reward_set_data: &Option, signer_bitvec: &Option>, block_timestamp: Option, + coinbase_height: u64, ); /// called whenever a burn block is about to be diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index be5f862839..d566113fad 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -431,6 +431,7 @@ impl BlockEventDispatcher for NullEventDispatcher { _reward_set_data: &Option, _signer_bitvec: &Option>, _block_timestamp: Option, + _coinbase_height: u64, ) { assert!( false, diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 8abbe058f5..b8d0441591 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2118,6 +2118,7 @@ impl NakamotoChainState { &reward_set_data, &Some(signer_bitvec), Some(block_timestamp), + receipt.coinbase_height, ); } @@ -4382,6 +4383,7 @@ impl NakamotoChainState { evaluated_epoch, epoch_transition: applied_epoch_transition, signers_updated, + coinbase_height, }; Ok((epoch_receipt, clarity_commit, reward_set_data)) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index a45a8d60cb..115678ada8 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -191,6 +191,7 @@ impl BlockEventDispatcher for DummyEventDispatcher { _reward_set_data: &Option, _signer_bitvec: &Option>, _block_timestamp: Option, + _coinbase_height: u64, ) { assert!( false, @@ -5809,8 +5810,10 @@ impl StacksChainState { .map(|(_, _, _, info)| info.clone()); if do_not_advance { + let regtest_genesis_header = StacksHeaderInfo::regtest_genesis(); + let coinbase_height = regtest_genesis_header.stacks_block_height; let epoch_receipt = StacksEpochReceipt { - header: StacksHeaderInfo::regtest_genesis(), + header: regtest_genesis_header, tx_receipts, matured_rewards, matured_rewards_info, @@ -5822,6 +5825,7 @@ impl StacksChainState { evaluated_epoch, epoch_transition: applied_epoch_transition, signers_updated: false, + coinbase_height, }; return Ok((epoch_receipt, clarity_commit, None)); @@ -5898,6 +5902,9 @@ impl StacksChainState { ); set_last_execution_cost_observed(&block_execution_cost, &block_limit); + // // The coinbase height is the same as the stacks block height in epoch 2.x + let coinbase_height = new_tip.stacks_block_height; + let epoch_receipt = StacksEpochReceipt { header: new_tip, tx_receipts, @@ -5911,6 +5918,7 @@ impl StacksChainState { evaluated_epoch, epoch_transition: applied_epoch_transition, signers_updated, + coinbase_height, }; Ok((epoch_receipt, clarity_commit, reward_set_data)) @@ -6411,6 +6419,7 @@ impl StacksChainState { &reward_set_data, &None, None, + next_staging_block.height, ); } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 857bfaead4..160e2dc60e 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -230,6 +230,7 @@ pub struct StacksEpochReceipt { pub epoch_transition: bool, /// Was .signers updated during this block? pub signers_updated: bool, + pub coinbase_height: u64, } /// Headers we serve over the network diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index fe6527ff53..01f6c32ec7 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -52,5 +52,6 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE evaluated_epoch: StacksEpochId::Epoch20, epoch_transition: false, signers_updated: false, + coinbase_height: 1234, } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index f0c5003740..2210160bee 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2037,6 +2037,7 @@ pub mod test { reward_set_data: &Option, _signer_bitvec: &Option>, _block_timestamp: Option, + _coinbase_height: u64, ) { self.blocks.lock().unwrap().push(TestEventObserverBlock { block: block.clone(), diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 43714f3573..e9fb79db88 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -748,6 +748,7 @@ impl EventObserver { reward_set_data: &Option, signer_bitvec_opt: &Option>, block_timestamp: Option, + coinbase_height: u64, ) -> serde_json::Value { // Serialize events to JSON let serialized_events: Vec = filtered_events @@ -809,6 +810,7 @@ impl EventObserver { "signer_bitvec": signer_bitvec_value, "reward_set": reward_set_value, "cycle_number": cycle_number_value, + "tenure_height": coinbase_height, }); let as_object_mut = payload.as_object_mut().unwrap(); @@ -1008,6 +1010,7 @@ impl BlockEventDispatcher for EventDispatcher { reward_set_data: &Option, signer_bitvec: &Option>, block_timestamp: Option, + coinbase_height: u64, ) { self.process_chain_tip( block, @@ -1026,6 +1029,7 @@ impl BlockEventDispatcher for EventDispatcher { reward_set_data, signer_bitvec, block_timestamp, + coinbase_height, ); } @@ -1209,6 +1213,7 @@ impl EventDispatcher { reward_set_data: &Option, signer_bitvec: &Option>, block_timestamp: Option, + coinbase_height: u64, ) { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); @@ -1261,6 +1266,7 @@ impl EventDispatcher { reward_set_data, signer_bitvec, block_timestamp, + coinbase_height, ); // Send payload @@ -1669,6 +1675,7 @@ mod test { let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); + let coinbase_height = 1234; let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1687,6 +1694,7 @@ mod test { &None, &Some(signer_bitvec.clone()), block_timestamp, + coinbase_height, ); assert_eq!( payload @@ -1737,6 +1745,7 @@ mod test { let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); + let coinbase_height = 1234; let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1755,6 +1764,7 @@ mod test { &None, &Some(signer_bitvec.clone()), block_timestamp, + coinbase_height, ); let event_signer_signature = payload diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 2be02659cd..b2b9aa3f75 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -198,5 +198,6 @@ pub fn announce_boot_receipts( &None, &None, None, + 0, ); } From 2b36268ef97ce1308c8a796f788efc266a927cae Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 18 Oct 2024 09:31:51 -0400 Subject: [PATCH 1375/1400] chore: revert change moving request creation out of loop This avoids a clone in the happy case (where no retry is needed). --- testnet/stacks-node/src/event_dispatcher.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 64c2b5ce90..8142bc2166 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -434,16 +434,16 @@ impl EventObserver { // Cap the backoff at 3x the timeout let max_backoff = timeout.saturating_mul(3); - let mut request = StacksHttpRequest::new_for_peer( - peerhost.clone(), - "POST".into(), - url.path().into(), - HttpRequestContents::new().payload_json(payload.clone()), - ) - .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); - request.add_header("Connection".into(), "close".into()); loop { - match send_http_request(host, port, request.clone(), timeout) { + let mut request = StacksHttpRequest::new_for_peer( + peerhost.clone(), + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json(payload.clone()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + match send_http_request(host, port, request, timeout) { Ok(response) => { if response.preamble().status_code == 200 { debug!( From cbb9456072ddbe2e8df441d741c8d2d056922e40 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 16:33:28 +0200 Subject: [PATCH 1376/1400] test: verify tenure_heights in new block events --- .../src/tests/nakamoto_integrations.rs | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e13a2ae27d..bc03b26784 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4866,6 +4866,45 @@ fn burn_ops_integration_test() { "Stack-stx tx without a signer_key shouldn't have been submitted" ); assert!(transfer_stx_found, "Expected transfer STX op"); + + let mut last_tenture_height = 0; + for block in blocks.iter() { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + let mut block_has_tenure_change = false; + for tx in transactions.iter().rev() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx != "0x00" { + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { + block_has_tenure_change = true; + } + } + } + // if `signer_bitvec` is set on a block, then it's a nakamoto block + let is_nakamoto_block = block.get("signer_bitvec").is_some(); + + let tenure_height = block.get("tenure_height").unwrap().as_u64().unwrap(); + let block_height = block.get("block_height").unwrap().as_u64().unwrap(); + + if is_nakamoto_block { + if block_has_tenure_change { + // tenure change block should have tenure height 1 more than the last tenure height + assert_eq!(last_tenture_height + 1, tenure_height); + last_tenture_height = tenure_height; + } else { + // tenure extend block should have the same tenure height as the last tenure height + assert_eq!(last_tenture_height, tenure_height); + } + last_tenture_height = block.get("block_height").unwrap().as_u64().unwrap(); + } else { + // epoch2.x block tenure height is the same as the block height + assert_eq!(tenure_height, block_height); + last_tenture_height = block_height; + } + } + assert!(delegate_stx_found, "Expected delegate STX op"); let sortdb = btc_regtest_controller.sortdb_mut(); let sortdb_conn = sortdb.conn(); From 23057d9aa876256f6c351be4c404e93cb33a5abb Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 18:17:12 +0200 Subject: [PATCH 1377/1400] chore: fix test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index bc03b26784..dfbedc2478 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4879,6 +4879,7 @@ fn burn_ops_integration_test() { StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { block_has_tenure_change = true; + continue; } } } @@ -4897,7 +4898,6 @@ fn burn_ops_integration_test() { // tenure extend block should have the same tenure height as the last tenure height assert_eq!(last_tenture_height, tenure_height); } - last_tenture_height = block.get("block_height").unwrap().as_u64().unwrap(); } else { // epoch2.x block tenure height is the same as the block height assert_eq!(tenure_height, block_height); From e27254033af4e613a0f15ba21be1d1b28ef8aa33 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 19:04:34 +0200 Subject: [PATCH 1378/1400] chore: move test --- .../src/tests/nakamoto_integrations.rs | 77 ++++++++++--------- 1 file changed, 39 insertions(+), 38 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index dfbedc2478..e2d6e0199a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4867,44 +4867,6 @@ fn burn_ops_integration_test() { ); assert!(transfer_stx_found, "Expected transfer STX op"); - let mut last_tenture_height = 0; - for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - let mut block_has_tenure_change = false; - for tx in transactions.iter().rev() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx != "0x00" { - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); - if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { - block_has_tenure_change = true; - continue; - } - } - } - // if `signer_bitvec` is set on a block, then it's a nakamoto block - let is_nakamoto_block = block.get("signer_bitvec").is_some(); - - let tenure_height = block.get("tenure_height").unwrap().as_u64().unwrap(); - let block_height = block.get("block_height").unwrap().as_u64().unwrap(); - - if is_nakamoto_block { - if block_has_tenure_change { - // tenure change block should have tenure height 1 more than the last tenure height - assert_eq!(last_tenture_height + 1, tenure_height); - last_tenture_height = tenure_height; - } else { - // tenure extend block should have the same tenure height as the last tenure height - assert_eq!(last_tenture_height, tenure_height); - } - } else { - // epoch2.x block tenure height is the same as the block height - assert_eq!(tenure_height, block_height); - last_tenture_height = block_height; - } - } - assert!(delegate_stx_found, "Expected delegate STX op"); let sortdb = btc_regtest_controller.sortdb_mut(); let sortdb_conn = sortdb.conn(); @@ -8366,6 +8328,45 @@ fn check_block_info() { "Contract 3 should be able to fetch the StacksBlockId of the tip" ); + let blocks = test_observer::get_blocks(); + let mut last_tenture_height = 0; + for block in blocks.iter() { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + let mut block_has_tenure_change = false; + for tx in transactions.iter().rev() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx != "0x00" { + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + if let TransactionPayload::TenureChange(_tenure_change) = parsed.payload { + block_has_tenure_change = true; + continue; + } + } + } + // if `signer_bitvec` is set on a block, then it's a nakamoto block + let is_nakamoto_block = block.get("signer_bitvec").is_some(); + + let tenure_height = block.get("tenure_height").unwrap().as_u64().unwrap(); + let block_height = block.get("block_height").unwrap().as_u64().unwrap(); + + if is_nakamoto_block { + if block_has_tenure_change { + // tenure change block should have tenure height 1 more than the last tenure height + assert_eq!(last_tenture_height + 1, tenure_height); + last_tenture_height = tenure_height; + } else { + // tenure extend block should have the same tenure height as the last tenure height + assert_eq!(last_tenture_height, tenure_height); + } + } else { + // epoch2.x block tenure height is the same as the block height + assert_eq!(tenure_height, block_height); + last_tenture_height = block_height; + } + } + coord_channel .lock() .expect("Mutex poisoned") From 858856851258241218d237c1ba68b017057ec77a Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Fri, 18 Oct 2024 22:22:08 +0200 Subject: [PATCH 1379/1400] chore: fix test --- .../stacks-node/src/tests/nakamoto_integrations.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e2d6e0199a..90334cce9b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -8328,7 +8328,9 @@ fn check_block_info() { "Contract 3 should be able to fetch the StacksBlockId of the tip" ); - let blocks = test_observer::get_blocks(); + let mut blocks = test_observer::get_blocks(); + blocks.sort_by_key(|block| block["block_height"].as_u64().unwrap()); + let mut last_tenture_height = 0; for block in blocks.iter() { let transactions = block.get("transactions").unwrap().as_array().unwrap(); @@ -8346,11 +8348,15 @@ fn check_block_info() { } } // if `signer_bitvec` is set on a block, then it's a nakamoto block - let is_nakamoto_block = block.get("signer_bitvec").is_some(); - + let is_nakamoto_block = block.get("signer_bitvec").map_or(false, |v| !v.is_null()); let tenure_height = block.get("tenure_height").unwrap().as_u64().unwrap(); let block_height = block.get("block_height").unwrap().as_u64().unwrap(); + if block_height == 0 { + // genesis block + continue; + } + if is_nakamoto_block { if block_has_tenure_change { // tenure change block should have tenure height 1 more than the last tenure height From 540fcd4481d365952ae3904b8d5aa78b76536992 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 18 Oct 2024 16:13:33 -0700 Subject: [PATCH 1380/1400] chore: convert BlockAccepted to a struct --- libsigner/src/v0/messages.rs | 107 +++++++++++++++--- stacks-signer/src/v0/signer.rs | 32 +++--- .../src/nakamoto_node/sign_coordinator.rs | 13 ++- testnet/stacks-node/src/tests/signer/mod.rs | 11 +- testnet/stacks-node/src/tests/signer/v0.rs | 41 +++---- 5 files changed, 142 insertions(+), 62 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 47d317992d..102da15a1d 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -603,7 +603,7 @@ impl From<&BlockResponse> for BlockResponseTypePrefix { #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BlockResponse { /// The Nakamoto block was accepted and therefore signed - Accepted((Sha512Trunc256Sum, MessageSignature)), + Accepted(BlockAccepted), /// The Nakamoto block was rejected and therefore not signed Rejected(BlockRejection), } @@ -616,7 +616,7 @@ impl std::fmt::Display for BlockResponse { write!( f, "BlockAccepted: signer_sighash = {}, signature = {}", - a.0, a.1 + a.signer_signature_hash, a.signature ) } BlockResponse::Rejected(r) => { @@ -633,7 +633,10 @@ impl std::fmt::Display for BlockResponse { impl BlockResponse { /// Create a new accepted BlockResponse for the provided block signer signature hash and signature pub fn accepted(hash: Sha512Trunc256Sum, sig: MessageSignature) -> Self { - Self::Accepted((hash, sig)) + Self::Accepted(BlockAccepted { + signer_signature_hash: hash, + signature: sig, + }) } /// Create a new rejected BlockResponse for the provided block signer signature hash and rejection code and sign it with the provided private key @@ -651,9 +654,8 @@ impl StacksMessageCodec for BlockResponse { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; match self { - BlockResponse::Accepted((hash, sig)) => { - write_next(fd, hash)?; - write_next(fd, sig)?; + BlockResponse::Accepted(accepted) => { + write_next(fd, accepted)?; } BlockResponse::Rejected(rejection) => { write_next(fd, rejection)?; @@ -667,9 +669,8 @@ impl StacksMessageCodec for BlockResponse { let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; let response = match type_prefix { BlockResponseTypePrefix::Accepted => { - let hash = read_next::(fd)?; - let sig = read_next::(fd)?; - BlockResponse::Accepted((hash, sig)) + let accepted = read_next::(fd)?; + BlockResponse::Accepted(accepted) } BlockResponseTypePrefix::Rejected => { let rejection = read_next::(fd)?; @@ -680,6 +681,32 @@ impl StacksMessageCodec for BlockResponse { } } +/// A rejection response from a signer for a proposed block +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockAccepted { + /// The signer signature hash of the block that was accepted + pub signer_signature_hash: Sha512Trunc256Sum, + /// The signer's signature across the acceptance + pub signature: MessageSignature, +} + +impl StacksMessageCodec for BlockAccepted { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signer_signature_hash)?; + write_next(fd, &self.signature)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signer_signature_hash = read_next::(fd)?; + let signature = read_next::(fd)?; + Ok(Self { + signer_signature_hash, + signature, + }) + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockRejection { @@ -894,7 +921,7 @@ mod test { use clarity::consts::CHAIN_ID_MAINNET; use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; use clarity::types::PrivateKey; - use clarity::util::hash::MerkleTree; + use clarity::util::hash::{hex_bytes, MerkleTree}; use clarity::util::secp256k1::MessageSignature; use rand::rngs::mock; use rand::{thread_rng, Rng, RngCore}; @@ -958,8 +985,11 @@ mod test { #[test] fn serde_block_response() { - let response = - BlockResponse::Accepted((Sha512Trunc256Sum([0u8; 32]), MessageSignature::empty())); + let accepted = BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum([0u8; 32]), + signature: MessageSignature::empty(), + }; + let response = BlockResponse::Accepted(accepted); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) .expect("Failed to deserialize BlockResponse"); @@ -979,10 +1009,11 @@ mod test { #[test] fn serde_signer_message() { - let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(( - Sha512Trunc256Sum([2u8; 32]), - MessageSignature::empty(), - ))); + let accepted = BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum([2u8; 32]), + signature: MessageSignature::empty(), + }; + let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)); let serialized_signer_message = signer_message.serialize_to_vec(); let deserialized_signer_message = read_next::(&mut &serialized_signer_message[..]) @@ -1122,4 +1153,48 @@ mod test { .expect("Failed to deserialize MockSignData"); assert_eq!(mock_block, deserialized_data); } + + #[test] + fn test_backwards_compatibility() { + let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3"; + let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8"; + let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_rejected = read_next::(&mut &block_rejected_bytes[..]) + .expect("Failed to deserialize BlockRejection"); + let block_accepted = read_next::(&mut &block_accepted_bytes[..]) + .expect("Failed to deserialize BlockRejection"); + + assert_eq!( + block_rejected, + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code: RejectCode::ValidationFailed(ValidateRejectCode::NoSuchTenure), + reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".to_string(), + signer_signature_hash: Sha512Trunc256Sum([145, 249, 95, 132, 183, 4, 95, 125, 206, 119, 87, 5, 44, 170, 152, 110, 240, 66, 203, 88, 247, 223, 80, 49, 163, 181, 181, 208, 227, 221, 166, 62]), + chain_id: CHAIN_ID_TESTNET, + signature: MessageSignature([ + 0, 111, 179, 73, 33, 46, 26, 26, 241, 163, 199, 18, 135, 141, 81, 89, 181, 236, + 20, 99, 106, 219, 111, 112, 190, 0, 166, 218, 74, 212, 248, 138, 153, 52, 216, + 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, + 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, + ]), + })) + ); + + assert_eq!( + block_accepted, + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum([ + 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, + 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 + ]), + signature: MessageSignature([ + 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, + 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, + 174, 57, 94, 168, 125, 228, 150, 72, 134, 68, 117, 7, 193, 131, 116, 183, 164, + 110, 226, 227, 113, 233, 191, 51, 47, 7, 6, 163, 232 + ]), + })) + ); + } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 5d068fad0f..fe626cb11f 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -25,14 +25,13 @@ use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectCode, - SignerMessage, + BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, + RejectCode, SignerMessage, }; use libsigner::{BlockProposal, SignerEvent}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error, info, warn}; @@ -494,8 +493,8 @@ impl Signer { block_response: &BlockResponse, ) { match block_response { - BlockResponse::Accepted((block_hash, signature)) => { - self.handle_block_signature(stacks_client, block_hash, signature); + BlockResponse::Accepted(accepted) => { + self.handle_block_signature(stacks_client, accepted); } BlockResponse::Rejected(block_rejection) => { self.handle_block_rejection(block_rejection); @@ -547,13 +546,13 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + let accepted = BlockAccepted { + signer_signature_hash: block_info.signer_signature_hash(), + signature, + }; // have to save the signature _after_ the block info - self.handle_block_signature( - stacks_client, - &block_info.signer_signature_hash(), - &signature, - ); - Some(BlockResponse::accepted(signer_signature_hash, signature)) + self.handle_block_signature(stacks_client, &accepted); + Some(BlockResponse::Accepted(accepted)) } /// Handle the block validate reject response. Returns our block response if we have one @@ -739,12 +738,11 @@ impl Signer { } /// Handle an observed signature from another signer - fn handle_block_signature( - &mut self, - stacks_client: &StacksClient, - block_hash: &Sha512Trunc256Sum, - signature: &MessageSignature, - ) { + fn handle_block_signature(&mut self, stacks_client: &StacksClient, accepted: &BlockAccepted) { + let BlockAccepted { + signer_signature_hash: block_hash, + signature, + } = accepted; debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); // Have we already processed this block? diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 2694d1d9ca..1954f6eb12 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -20,7 +20,9 @@ use std::sync::Arc; use std::time::Duration; use hashbrown::{HashMap, HashSet}; -use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; +use libsigner::v0::messages::{ + BlockAccepted, BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0, +}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -450,10 +452,11 @@ impl SignCoordinator { } match message { - SignerMessageV0::BlockResponse(BlockResponse::Accepted(( - response_hash, - signature, - ))) => { + SignerMessageV0::BlockResponse(BlockResponse::Accepted(accepted)) => { + let BlockAccepted { + signer_signature_hash: response_hash, + signature, + } = accepted; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { warn!( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 2e67234285..5fb318e234 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -36,7 +36,7 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; -use libsigner::v0::messages::{BlockResponse, SignerMessage}; +use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -578,10 +578,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + let BlockAccepted { + signer_signature_hash: hash, + signature, + } = accepted; if hash == *signer_signature_hash && expected_signers.iter().any(|pk| { pk.verify(hash.bits(), &signature) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 753883bead..6fc4c7078a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3411,7 +3411,7 @@ fn duplicate_signers() { }) .filter_map(|message| match message { SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { - info!("Message(accepted): {message:?}"); + info!("Message(accepted): {:?}", &m); Some(m) } _ => { @@ -3425,20 +3425,23 @@ fn duplicate_signers() { info!("------------------------- Assert there are {unique_signers} unique signatures and recovered pubkeys -------------------------"); // Pick a message hash - let (selected_sighash, _) = signer_accepted_responses + let accepted = signer_accepted_responses .iter() - .min_by_key(|(sighash, _)| *sighash) - .copied() + .min_by_key(|accepted| accepted.signer_signature_hash) .expect("No `BlockResponse::Accepted` messages recieved"); + let selected_sighash = accepted.signer_signature_hash; // Filter only resonses for selected block and collect unique pubkeys and signatures let (pubkeys, signatures): (HashSet<_>, HashSet<_>) = signer_accepted_responses .into_iter() - .filter(|(hash, _)| *hash == selected_sighash) - .map(|(msg, sig)| { - let pubkey = Secp256k1PublicKey::recover_to_pubkey(msg.bits(), &sig) - .expect("Failed to recover pubkey"); - (pubkey, sig) + .filter(|accepted| accepted.signer_signature_hash == selected_sighash) + .map(|accepted| { + let pubkey = Secp256k1PublicKey::recover_to_pubkey( + accepted.signer_signature_hash.bits(), + &accepted.signature, + ) + .expect("Failed to recover pubkey"); + (pubkey, accepted.signature) }) .unzip(); @@ -4652,10 +4655,11 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); match message { - SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { - ignoring_signers - .iter() - .find(|key| key.verify(hash.bits(), &signature).is_ok()) + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }) } _ => None, } @@ -4896,12 +4900,11 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(( - hash, - signature, - ))) => { - if block.header.signer_signature_hash() == hash { - Some(signature) + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + if block.header.signer_signature_hash() + == accepted.signer_signature_hash + { + Some(accepted.signature) } else { None } From 5f4c42fc5a28bb8f19a2a212f6587d2f1c64b9bb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 18 Oct 2024 16:54:27 -0700 Subject: [PATCH 1381/1400] feat: add signer message metadata to block responses --- libsigner/src/libsigner.rs | 10 ++ libsigner/src/v0/messages.rs | 148 +++++++++++++++++- stacks-signer/src/cli.rs | 3 +- stacks-signer/src/client/stackerdb.rs | 5 +- stacks-signer/src/lib.rs | 12 +- stacks-signer/src/main.rs | 3 +- stacks-signer/src/monitoring/server.rs | 2 +- stacks-signer/src/v0/signer.rs | 11 +- .../src/nakamoto_node/sign_coordinator.rs | 7 +- testnet/stacks-node/src/tests/signer/mod.rs | 17 +- testnet/stacks-node/src/tests/signer/v0.rs | 4 +- 11 files changed, 183 insertions(+), 39 deletions(-) diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 878d428bfc..b1b760af6d 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -49,8 +49,10 @@ use std::cmp::Eq; use std::fmt::Debug; use std::hash::Hash; +use blockstack_lib::version_string; use clarity::codec::StacksMessageCodec; use clarity::vm::types::QualifiedContractIdentifier; +use lazy_static::lazy_static; pub use crate::error::{EventError, RPCError}; pub use crate::events::{ @@ -74,3 +76,11 @@ pub trait SignerMessage: StacksMessageCodec { /// The contract identifier for the message slot in stacker db fn msg_id(&self) -> Option; } + +lazy_static! { + /// The version string for the signer + pub static ref VERSION_STRING: String = { + let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); + version_string("stacks-signer", pkg_version) + }; +} diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 102da15a1d..264a4cc107 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -70,7 +70,7 @@ use crate::http::{decode_http_body, decode_http_request}; use crate::stacks_common::types::PublicKey; use crate::{ BlockProposal, EventError, MessageSlotID as MessageSlotIDTrait, - SignerMessage as SignerMessageTrait, + SignerMessage as SignerMessageTrait, VERSION_STRING, }; define_u8_enum!( @@ -615,15 +615,15 @@ impl std::fmt::Display for BlockResponse { BlockResponse::Accepted(a) => { write!( f, - "BlockAccepted: signer_sighash = {}, signature = {}", - a.signer_signature_hash, a.signature + "BlockAccepted: signer_sighash = {}, signature = {}, version = {}", + a.signer_signature_hash, a.signature, a.metadata.server_version ) } BlockResponse::Rejected(r) => { write!( f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}, signature = {}", - r.reason_code, r.reason, r.signer_signature_hash, r.signature + "BlockRejected: signer_sighash = {}, code = {}, reason = {}, signature = {}, version = {}", + r.reason_code, r.reason, r.signer_signature_hash, r.signature, r.metadata.server_version ) } } @@ -636,6 +636,7 @@ impl BlockResponse { Self::Accepted(BlockAccepted { signer_signature_hash: hash, signature: sig, + metadata: SignerMessageMetadata::default(), }) } @@ -681,6 +682,57 @@ impl StacksMessageCodec for BlockResponse { } } +/// Metadata for signer messages +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct SignerMessageMetadata { + /// The signer's server version + pub server_version: String, +} + +/// To ensure backwards compatibility, when deserializing, +/// if no bytes are found, return empty metadata +impl StacksMessageCodec for SignerMessageMetadata { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.server_version.as_bytes().to_vec())?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + match read_next::, _>(fd) { + Ok(server_version) => { + let server_version = String::from_utf8(server_version).map_err(|e| { + CodecError::DeserializeError(format!( + "Failed to decode server version: {:?}", + &e + )) + })?; + Ok(Self { server_version }) + } + Err(_) => { + // For backwards compatibility, return empty metadata + Ok(Self::empty()) + } + } + } +} + +impl Default for SignerMessageMetadata { + fn default() -> Self { + Self { + server_version: VERSION_STRING.to_string(), + } + } +} + +impl SignerMessageMetadata { + /// Empty metadata + pub fn empty() -> Self { + Self { + server_version: String::new(), + } + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockAccepted { @@ -688,25 +740,41 @@ pub struct BlockAccepted { pub signer_signature_hash: Sha512Trunc256Sum, /// The signer's signature across the acceptance pub signature: MessageSignature, + /// Signer message metadata + pub metadata: SignerMessageMetadata, } impl StacksMessageCodec for BlockAccepted { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.signer_signature_hash)?; write_next(fd, &self.signature)?; + write_next(fd, &self.metadata)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let signer_signature_hash = read_next::(fd)?; let signature = read_next::(fd)?; + let metadata = read_next::(fd)?; Ok(Self { signer_signature_hash, signature, + metadata, }) } } +impl BlockAccepted { + /// Create a new BlockAccepted for the provided block signer signature hash and signature + pub fn new(signer_signature_hash: Sha512Trunc256Sum, signature: MessageSignature) -> Self { + Self { + signer_signature_hash, + signature, + metadata: SignerMessageMetadata::default(), + } + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockRejection { @@ -720,6 +788,8 @@ pub struct BlockRejection { pub signature: MessageSignature, /// The chain id pub chain_id: u32, + /// Signer message metadata + pub metadata: SignerMessageMetadata, } impl BlockRejection { @@ -741,6 +811,7 @@ impl BlockRejection { signer_signature_hash, signature: MessageSignature::empty(), chain_id, + metadata: SignerMessageMetadata::default(), }; rejection .sign(private_key) @@ -765,6 +836,7 @@ impl BlockRejection { signer_signature_hash: reject.signer_signature_hash, chain_id, signature: MessageSignature::empty(), + metadata: SignerMessageMetadata::default(), }; rejection .sign(private_key) @@ -814,6 +886,7 @@ impl StacksMessageCodec for BlockRejection { write_next(fd, &self.signer_signature_hash)?; write_next(fd, &self.chain_id)?; write_next(fd, &self.signature)?; + write_next(fd, &self.metadata)?; Ok(()) } @@ -826,12 +899,14 @@ impl StacksMessageCodec for BlockRejection { let signer_signature_hash = read_next::(fd)?; let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; + let metadata = read_next::(fd)?; Ok(Self { reason, reason_code, signer_signature_hash, chain_id, signature, + metadata, }) } } @@ -988,6 +1063,7 @@ mod test { let accepted = BlockAccepted { signer_signature_hash: Sha512Trunc256Sum([0u8; 32]), signature: MessageSignature::empty(), + metadata: SignerMessageMetadata::default(), }; let response = BlockResponse::Accepted(accepted); let serialized_response = response.serialize_to_vec(); @@ -1012,6 +1088,7 @@ mod test { let accepted = BlockAccepted { signer_signature_hash: Sha512Trunc256Sum([2u8; 32]), signature: MessageSignature::empty(), + metadata: SignerMessageMetadata::default(), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)); let serialized_signer_message = signer_message.serialize_to_vec(); @@ -1178,6 +1255,55 @@ mod test { 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, ]), + metadata: SignerMessageMetadata::empty(), + })) + ); + + assert_eq!( + block_accepted, + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum([ + 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, + 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 + ]), + metadata: SignerMessageMetadata::empty(), + signature: MessageSignature([ + 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, + 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, + 174, 57, 94, 168, 125, 228, 150, 72, 134, 68, 117, 7, 193, 131, 116, 183, 164, + 110, 226, 227, 113, 233, 191, 51, 47, 7, 6, 163, 232 + ]), + })) + ); + } + + #[test] + fn test_block_response_metadata() { + let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df30000000b48656c6c6f20776f726c64"; + let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e80000000b48656c6c6f20776f726c64"; + let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_rejected = read_next::(&mut &block_rejected_bytes[..]) + .expect("Failed to deserialize BlockRejection"); + let block_accepted = read_next::(&mut &block_accepted_bytes[..]) + .expect("Failed to deserialize BlockRejection"); + + assert_eq!( + block_rejected, + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason_code: RejectCode::ValidationFailed(ValidateRejectCode::NoSuchTenure), + reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".to_string(), + signer_signature_hash: Sha512Trunc256Sum([145, 249, 95, 132, 183, 4, 95, 125, 206, 119, 87, 5, 44, 170, 152, 110, 240, 66, 203, 88, 247, 223, 80, 49, 163, 181, 181, 208, 227, 221, 166, 62]), + chain_id: CHAIN_ID_TESTNET, + signature: MessageSignature([ + 0, 111, 179, 73, 33, 46, 26, 26, 241, 163, 199, 18, 135, 141, 81, 89, 181, 236, + 20, 99, 106, 219, 111, 112, 190, 0, 166, 218, 74, 212, 248, 138, 153, 52, 216, + 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, + 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, + ]), + metadata: SignerMessageMetadata { + server_version: "Hello world".to_string(), + }, })) ); @@ -1188,6 +1314,9 @@ mod test { 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 ]), + metadata: SignerMessageMetadata { + server_version: "Hello world".to_string(), + }, signature: MessageSignature([ 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, @@ -1197,4 +1326,13 @@ mod test { })) ); } + + #[test] + fn test_empty_metadata() { + let serialized_metadata = [0u8; 0]; + let deserialized_metadata = + read_next::(&mut &serialized_metadata[..]) + .expect("Failed to deserialize SignerMessageMetadata"); + assert_eq!(deserialized_metadata, SignerMessageMetadata::empty()); + } } diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index 97829b6977..4e9067498d 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -29,6 +29,7 @@ use clarity::util::hash::Sha256Sum; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; +use libsigner::VERSION_STRING; use serde::{Deserialize, Serialize}; use stacks_common::address::{ b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, @@ -38,8 +39,6 @@ use stacks_common::address::{ use stacks_common::define_u8_enum; use stacks_common::types::chainstate::StacksPrivateKey; -use crate::VERSION_STRING; - extern crate alloc; #[derive(Parser, Debug)] diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 0fc43350db..117dd4814f 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -235,7 +235,9 @@ mod tests { use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; - use libsigner::v0::messages::{BlockRejection, BlockResponse, RejectCode, SignerMessage}; + use libsigner::v0::messages::{ + BlockRejection, BlockResponse, RejectCode, SignerMessage, SignerMessageMetadata, + }; use rand::{thread_rng, RngCore}; use super::*; @@ -283,6 +285,7 @@ mod tests { signer_signature_hash: block.header.signer_signature_hash(), chain_id: thread_rng().next_u32(), signature: MessageSignature::empty(), + metadata: SignerMessageMetadata::empty(), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 3555435eaa..246015bfb7 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -48,11 +48,9 @@ mod tests; use std::fmt::{Debug, Display}; use std::sync::mpsc::{channel, Receiver, Sender}; -use blockstack_lib::version_string; use chainstate::SortitionsView; use config::GlobalConfig; -use lazy_static::lazy_static; -use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait}; +use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait, VERSION_STRING}; use runloop::SignerResult; use slog::{slog_info, slog_warn}; use stacks_common::{info, warn}; @@ -61,14 +59,6 @@ use crate::client::StacksClient; use crate::config::SignerConfig; use crate::runloop::RunLoop; -lazy_static! { - /// The version string for the signer - pub static ref VERSION_STRING: String = { - let pkg_version = option_env!("STACKS_NODE_VERSION").unwrap_or(env!("CARGO_PKG_VERSION")); - version_string("stacks-signer", pkg_version) - }; -} - /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { /// Create a new `Signer` instance diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 56f322b185..a23918f6f8 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -32,7 +32,7 @@ use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_ke use clap::Parser; use clarity::types::chainstate::StacksPublicKey; use clarity::util::sleep_ms; -use libsigner::SignerSession; +use libsigner::{SignerSession, VERSION_STRING}; use libstackerdb::StackerDBChunkData; use slog::{slog_debug, slog_error}; use stacks_common::util::hash::to_hex; @@ -47,7 +47,6 @@ use stacks_signer::config::GlobalConfig; use stacks_signer::monitor_signers::SignerMonitor; use stacks_signer::utils::stackerdb_session; use stacks_signer::v0::SpawnedSigner; -use stacks_signer::VERSION_STRING; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs index f5e3cceef1..15267c44ee 100644 --- a/stacks-signer/src/monitoring/server.rs +++ b/stacks-signer/src/monitoring/server.rs @@ -19,6 +19,7 @@ use std::time::Instant; use clarity::util::hash::to_hex; use clarity::util::secp256k1::Secp256k1PublicKey; +use libsigner::VERSION_STRING; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; use tiny_http::{Response as HttpResponse, Server as HttpServer}; @@ -28,7 +29,6 @@ use crate::client::{ClientError, StacksClient}; use crate::config::{GlobalConfig, Network}; use crate::monitoring::prometheus::gather_metrics_string; use crate::monitoring::{update_signer_nonce, update_stacks_tip_height}; -use crate::VERSION_STRING; #[derive(thiserror::Error, Debug)] /// Monitoring server errors diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fe626cb11f..2cb10a9817 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -546,10 +546,7 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - let accepted = BlockAccepted { - signer_signature_hash: block_info.signer_signature_hash(), - signature, - }; + let accepted = BlockAccepted::new(block_info.signer_signature_hash(), signature); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); Some(BlockResponse::Accepted(accepted)) @@ -742,8 +739,12 @@ impl Signer { let BlockAccepted { signer_signature_hash: block_hash, signature, + metadata, } = accepted; - debug!("{self}: Received a block-accept signature: ({block_hash}, {signature})"); + debug!( + "{self}: Received a block-accept signature: ({block_hash}, {signature}, {})", + metadata.server_version + ); // Have we already processed this block? match self diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 1954f6eb12..697dddeb03 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -456,6 +456,7 @@ impl SignCoordinator { let BlockAccepted { signer_signature_hash: response_hash, signature, + metadata, } = accepted; let block_sighash = block.header.signer_signature_hash(); if block_sighash != response_hash { @@ -466,7 +467,8 @@ impl SignCoordinator { "response_hash" => %response_hash, "slot_id" => slot_id, "reward_cycle_id" => reward_cycle_id, - "response_hash" => %response_hash + "response_hash" => %response_hash, + "server_version" => %metadata.server_version ); continue; } @@ -514,7 +516,8 @@ impl SignCoordinator { "signer_weight" => signer_entry.weight, "total_weight_signed" => total_weight_signed, "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() + "stacks_block_id" => %block.header.block_id(), + "server_version" => metadata.server_version, ); gathered_signatures.insert(slot_id, signature); responded_signers.insert(signer_pubkey); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5fb318e234..42b894398d 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -36,7 +36,7 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; -use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage}; +use libsigner::v0::messages::{BlockResponse, SignerMessage}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -579,17 +579,16 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest { - let BlockAccepted { - signer_signature_hash: hash, - signature, - } = accepted; - if hash == *signer_signature_hash + if accepted.signer_signature_hash == *signer_signature_hash && expected_signers.iter().any(|pk| { - pk.verify(hash.bits(), &signature) - .expect("Failed to verify signature") + pk.verify( + accepted.signer_signature_hash.bits(), + &accepted.signature, + ) + .expect("Failed to verify signature") }) { - Some(signature) + Some(accepted.signature) } else { None } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6fc4c7078a..e02d5b62ca 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -25,7 +25,7 @@ use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; -use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use libsigner::{BlockProposal, SignerSession, StackerDBSession, VERSION_STRING}; use stacks::address::AddressHashMode; use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -2567,10 +2567,12 @@ fn empty_sortition() { }; if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason_code, + metadata, .. })) = latest_msg { assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + assert_eq!(metadata.server_version, VERSION_STRING.to_string()); found_rejections.push(*slot_id); } else { info!("Latest message from slot #{slot_id} isn't a block rejection, will wait to see if the signer updates to a rejection"); From a1208c89e1e8101944af7f1d39f5dd92f302662a Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 18 Oct 2024 16:56:27 -0700 Subject: [PATCH 1382/1400] feat: add metadata to mock signatures --- libsigner/src/v0/messages.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 264a4cc107..2436421fa8 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -439,6 +439,8 @@ pub struct MockSignature { signature: MessageSignature, /// The mock block proposal that was signed across pub mock_proposal: MockProposal, + /// The signature metadata + pub metadata: SignerMessageMetadata, } impl MockSignature { @@ -447,6 +449,7 @@ impl MockSignature { let mut sig = Self { signature: MessageSignature::empty(), mock_proposal, + metadata: SignerMessageMetadata::default(), }; sig.sign(stacks_private_key) .expect("Failed to sign MockSignature"); @@ -476,15 +479,18 @@ impl StacksMessageCodec for MockSignature { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &self.signature)?; self.mock_proposal.consensus_serialize(fd)?; + self.metadata.consensus_serialize(fd)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { let signature = read_next::(fd)?; let mock_proposal = MockProposal::consensus_deserialize(fd)?; + let metadata = SignerMessageMetadata::consensus_deserialize(fd)?; Ok(Self { signature, mock_proposal, + metadata, }) } } @@ -1206,6 +1212,7 @@ mod test { let mut mock_signature = MockSignature { signature: MessageSignature::empty(), mock_proposal: random_mock_proposal(), + metadata: SignerMessageMetadata::default(), }; mock_signature .sign(&StacksPrivateKey::new()) From 899ff22c2b8205eb2f12af4833aa1b32eb59a321 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 18 Oct 2024 19:34:00 -0700 Subject: [PATCH 1383/1400] Do not assume every tenure has inter_blocks_per_tenure Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 37 ++++++++++++++++------ 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 753883bead..44ff3854b4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3867,7 +3867,14 @@ fn partial_tenure_fork() { let mut min_miner_2_tenures = u64::MAX; let mut ignore_block = 0; - while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { + let mut miner_1_blocks = 0; + let mut miner_2_blocks = 0; + // Make sure that both miner 1 and 2 mine at least 1 block each + while miner_1_tenures < min_miner_1_tenures + || miner_2_tenures < min_miner_2_tenures + || miner_1_blocks == 0 + || miner_2_blocks == 0 + { if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3959,6 +3966,7 @@ fn partial_tenure_fork() { min_miner_1_tenures = miner_1_tenures + 1; } + let mut blocks = inter_blocks_per_tenure; // mine (or attempt to mine) the interim blocks for interim_block_ix in 0..inter_blocks_per_tenure { let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); @@ -4030,6 +4038,7 @@ fn partial_tenure_fork() { Err(e) => { if e.to_string().contains("TooMuchChaining") { info!("TooMuchChaining error, skipping block"); + blocks = interim_block_ix; break; } else { panic!("Failed to submit tx: {}", e); @@ -4044,21 +4053,24 @@ fn partial_tenure_fork() { if miner == 1 { miner_1_tenures += 1; + miner_1_blocks += blocks; } else { miner_2_tenures += 1; + miner_2_blocks += blocks; } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}, Miner 1 before: {}, Miner 2 before: {}", - miner_1_tenures, miner_2_tenures, mined_before_1, mined_before_2, - ); let mined_1 = blocks_mined1.load(Ordering::SeqCst); let mined_2 = blocks_mined2.load(Ordering::SeqCst); + + info!( + "Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}, Miner 1 before: {mined_before_1}, Miner 2 before: {mined_before_2}, Miner 1 blocks: {mined_1}, Miner 2 blocks: {mined_2}", + ); + if miner == 1 { - assert_eq!(mined_1, mined_before_1 + inter_blocks_per_tenure + 1); + assert_eq!(mined_1, mined_before_1 + blocks + 1); } else { if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + inter_blocks_per_tenure + 1); + assert_eq!(mined_2, mined_before_2 + blocks + 1); } else { // Miner 2 should have mined 0 blocks after the fork assert_eq!(mined_2, mined_before_2); @@ -4078,11 +4090,16 @@ fn partial_tenure_fork() { assert_eq!(peer_2_height, ignore_block - 1); // The height may be higher than expected due to extra transactions waiting // to be mined during the forking miner's tenure. + // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure + let min_num_miner_2_blocks = std::cmp::min( + miner_2_blocks, + min_miner_2_tenures * (inter_blocks_per_tenure + 1), + ); assert!( - peer_1_height - >= pre_nakamoto_peer_1_height - + (miner_1_tenures + min_miner_2_tenures - 1) * (inter_blocks_per_tenure + 1) + miner_2_tenures >= min_miner_2_tenures, + "Miner 2 failed to win its minimum number of tenures" ); + assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_num_miner_2_blocks,); assert_eq!( btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() From 9f9ad40c8ae57cae5a9dab9e7fd5bf8528ac51e2 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sun, 20 Oct 2024 12:12:50 -0700 Subject: [PATCH 1384/1400] fix: use hex instead of u8 bytes in fixture --- libsigner/src/v0/messages.rs | 48 +++++++++++------------------------- 1 file changed, 14 insertions(+), 34 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 2436421fa8..618aa20937 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -1254,14 +1254,9 @@ mod test { SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason_code: RejectCode::ValidationFailed(ValidateRejectCode::NoSuchTenure), reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".to_string(), - signer_signature_hash: Sha512Trunc256Sum([145, 249, 95, 132, 183, 4, 95, 125, 206, 119, 87, 5, 44, 170, 152, 110, 240, 66, 203, 88, 247, 223, 80, 49, 163, 181, 181, 208, 227, 221, 166, 62]), + signer_signature_hash: Sha512Trunc256Sum::from_hex("91f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e").unwrap(), chain_id: CHAIN_ID_TESTNET, - signature: MessageSignature([ - 0, 111, 179, 73, 33, 46, 26, 26, 241, 163, 199, 18, 135, 141, 81, 89, 181, 236, - 20, 99, 106, 219, 111, 112, 190, 0, 166, 218, 74, 212, 248, 138, 153, 52, 216, - 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, - 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, - ]), + signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata::empty(), })) ); @@ -1269,17 +1264,12 @@ mod test { assert_eq!( block_accepted, SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { - signer_signature_hash: Sha512Trunc256Sum([ - 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, - 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 - ]), + signer_signature_hash: Sha512Trunc256Sum::from_hex( + "11717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19" + ) + .unwrap(), metadata: SignerMessageMetadata::empty(), - signature: MessageSignature([ - 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, - 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, - 174, 57, 94, 168, 125, 228, 150, 72, 134, 68, 117, 7, 193, 131, 116, 183, 164, - 110, 226, 227, 113, 233, 191, 51, 47, 7, 6, 163, 232 - ]), + signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), })) ); } @@ -1300,14 +1290,9 @@ mod test { SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason_code: RejectCode::ValidationFailed(ValidateRejectCode::NoSuchTenure), reason: "Block is not a tenure-start block, and has an unrecognized tenure consensus hash".to_string(), - signer_signature_hash: Sha512Trunc256Sum([145, 249, 95, 132, 183, 4, 95, 125, 206, 119, 87, 5, 44, 170, 152, 110, 240, 66, 203, 88, 247, 223, 80, 49, 163, 181, 181, 208, 227, 221, 166, 62]), + signer_signature_hash: Sha512Trunc256Sum::from_hex("91f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e").unwrap(), chain_id: CHAIN_ID_TESTNET, - signature: MessageSignature([ - 0, 111, 179, 73, 33, 46, 26, 26, 241, 163, 199, 18, 135, 141, 81, 89, 181, 236, - 20, 99, 106, 219, 111, 112, 190, 0, 166, 218, 74, 212, 248, 138, 153, 52, 216, - 169, 171, 178, 41, 98, 13, 216, 224, 242, 37, 214, 52, 1, 227, 108, 100, 129, - 127, 178, 158, 108, 5, 89, 29, 203, 233, 92, 81, 45, 243, - ]), + signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, @@ -1317,19 +1302,14 @@ mod test { assert_eq!( block_accepted, SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { - signer_signature_hash: Sha512Trunc256Sum([ - 17, 113, 113, 73, 103, 124, 42, 201, 125, 21, 174, 89, 84, 247, 167, 22, 241, - 1, 0, 185, 203, 129, 162, 191, 39, 85, 27, 47, 46, 84, 239, 25 - ]), + signer_signature_hash: Sha512Trunc256Sum::from_hex( + "11717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19" + ) + .unwrap(), metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, - signature: MessageSignature([ - 0, 28, 105, 79, 129, 52, 197, 201, 15, 47, 43, 205, 51, 14, 159, 66, 50, 4, - 136, 79, 0, 27, 93, 240, 5, 15, 54, 162, 196, 255, 121, 221, 147, 82, 43, 178, - 174, 57, 94, 168, 125, 228, 150, 72, 134, 68, 117, 7, 193, 131, 116, 183, 164, - 110, 226, 227, 113, 233, 191, 51, 47, 7, 6, 163, 232 - ]), + signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), })) ); } From 5ae2906816b222ffa4f0546715474e8bfc1e7071 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 21 Oct 2024 11:01:43 -0500 Subject: [PATCH 1385/1400] chore: signers should be more permissive about a slow miner wakeup --- stacks-signer/src/config.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 3392906682..c0514274e1 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -35,6 +35,7 @@ use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; +const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -239,8 +240,11 @@ impl TryFrom for GlobalConfig { StacksAddress::p2pkh_from_hash(raw_data.network.is_mainnet(), signer_hash); let event_timeout = Duration::from_millis(raw_data.event_timeout_ms.unwrap_or(EVENT_TIMEOUT_MS)); - let first_proposal_burn_block_timing = - Duration::from_secs(raw_data.first_proposal_burn_block_timing_secs.unwrap_or(30)); + let first_proposal_burn_block_timing = Duration::from_secs( + raw_data + .first_proposal_burn_block_timing_secs + .unwrap_or(DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS), + ); let db_path = raw_data.db_path.into(); let metrics_endpoint = match raw_data.metrics_endpoint { From cc8bddc3a58ceb4de48ef40e61f402d112da5fb4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 21 Oct 2024 14:06:41 -0500 Subject: [PATCH 1386/1400] chore: set 3.0 mainnet activation height --- stackslib/src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index ade8a82589..491ba21ca0 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -97,7 +97,7 @@ pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; /// This is Epoch-2.5, activation height proposed in SIP-021 pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 840_360; /// This is Epoch-3.0, activation height proposed in SIP-021 -pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 2_000_000; +pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 867_867; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; From c184f5469cfadb71803410014e9ef3c555d5f219 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 21 Oct 2024 18:08:43 -0400 Subject: [PATCH 1387/1400] Fix default affirmation map settings for 3.0 --- testnet/stacks-node/src/config.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index f3c10b72d3..0658862246 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -853,12 +853,6 @@ impl Config { "Attempted to run mainnet node with `use_test_genesis_chainstate`" )); } - } else if node.require_affirmed_anchor_blocks { - // testnet requires that we use the 2.05 rules for anchor block affirmations, - // because reward cycle 360 (and possibly future ones) has a different anchor - // block choice in 2.05 rules than in 2.1 rules. - debug!("Set `require_affirmed_anchor_blocks` to `false` for non-mainnet config"); - node.require_affirmed_anchor_blocks = false; } if node.stacker || node.miner { @@ -1968,7 +1962,7 @@ impl Default for NodeConfig { marf_defer_hashing: true, pox_sync_sample_secs: 30, use_test_genesis_chainstate: None, - always_use_affirmation_maps: false, + always_use_affirmation_maps: true, require_affirmed_anchor_blocks: true, fault_injection_block_push_fail_probability: None, fault_injection_hide_blocks: false, From c4a88f6298302b06dc86bbd0853a223e99cd5381 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 22 Oct 2024 08:10:07 -0700 Subject: [PATCH 1388/1400] Only check if we advanced pre fork miner 2 blocks forward Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 307a3e8518..1744a3b4a8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3874,12 +3874,9 @@ fn partial_tenure_fork() { let mut miner_1_blocks = 0; let mut miner_2_blocks = 0; - // Make sure that both miner 1 and 2 mine at least 1 block each - while miner_1_tenures < min_miner_1_tenures - || miner_2_tenures < min_miner_2_tenures - || miner_1_blocks == 0 - || miner_2_blocks == 0 - { + let mut min_miner_2_blocks = 0; + + while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { if btc_blocks_mined >= max_nakamoto_tenures { panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); } @@ -3963,6 +3960,7 @@ fn partial_tenure_fork() { // Ensure that miner 2 runs at least one more tenure min_miner_2_tenures = miner_2_tenures + 1; fork_initiated = true; + min_miner_2_blocks = miner_2_blocks; } if miner == 2 && miner_2_tenures == min_miner_2_tenures { // This is the forking tenure. Ensure that miner 1 runs one more @@ -4096,15 +4094,9 @@ fn partial_tenure_fork() { // The height may be higher than expected due to extra transactions waiting // to be mined during the forking miner's tenure. // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure - let min_num_miner_2_blocks = std::cmp::min( - miner_2_blocks, - min_miner_2_tenures * (inter_blocks_per_tenure + 1), - ); - assert!( - miner_2_tenures >= min_miner_2_tenures, - "Miner 2 failed to win its minimum number of tenures" - ); - assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_num_miner_2_blocks,); + // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 + // before the fork was initiated + assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); assert_eq!( btc_blocks_mined, u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() From a7432bbe8965d43e23af3d7e9e7b1b28031213b0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 22 Oct 2024 13:00:45 -0500 Subject: [PATCH 1389/1400] feat: update tx estimator when tx yields too big tx error --- stackslib/src/chainstate/nakamoto/miner.rs | 11 +++- stackslib/src/chainstate/stacks/miner.rs | 68 ++++++++++++++++++++-- stackslib/src/chainstate/stacks/mod.rs | 10 ++-- 3 files changed, 79 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 4d99b53821..04401a0d9b 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -674,10 +674,19 @@ impl BlockBuilder for NakamotoBlockBuilder { tx.txid(), 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget + ); + let mut measured_cost = cost_after; + let measured_cost = if measured_cost.sub(&cost_before).is_ok() { + Some(measured_cost) + } else { + warn!( + "Failed to compute measured cost of a too big transaction" ); + None + }; return TransactionResult::error( &tx, - Error::TransactionTooBigError, + Error::TransactionTooBigError(measured_cost), ); } else { warn!( diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index d3298855da..7fb08335a2 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1040,9 +1040,18 @@ impl<'a> StacksMicroblockBuilder<'a> { 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget ); + let mut measured_cost = cost_after.clone(); + let measured_cost = if measured_cost.sub(cost_before).is_ok() { + Some(measured_cost) + } else { + warn!( + "Failed to compute measured cost of a too big transaction" + ); + None + }; return Ok(TransactionResult::error( &tx, - Error::TransactionTooBigError, + Error::TransactionTooBigError(measured_cost), )); } else { warn!( @@ -1323,7 +1332,22 @@ impl<'a> StacksMicroblockBuilder<'a> { return Ok(None); } } - Error::TransactionTooBigError => { + Error::TransactionTooBigError(measured_cost) => { + if update_estimator { + if let Some(measured_cost) = measured_cost { + if let Err(e) = estimator.notify_event( + &mempool_tx.tx.payload, + &measured_cost, + &block_limit, + &stacks_epoch_id, + ) { + warn!("Error updating estimator"; + "txid" => %mempool_tx.metadata.txid, + "error" => ?e); + } + } + } + invalidated_txs.push(mempool_tx.metadata.txid); } _ => {} @@ -2405,7 +2429,22 @@ impl StacksBlockBuilder { return Ok(None); } } - Error::TransactionTooBigError => { + Error::TransactionTooBigError(measured_cost) => { + if update_estimator { + if let Some(measured_cost) = measured_cost { + if let Err(e) = estimator.notify_event( + &txinfo.tx.payload, + &measured_cost, + &block_limit, + &stacks_epoch_id, + ) { + warn!("Error updating estimator"; + "txid" => %txinfo.metadata.txid, + "error" => ?e); + } + } + } + invalidated_txs.push(txinfo.metadata.txid); } Error::InvalidStacksTransaction(_, true) => { @@ -2714,9 +2753,18 @@ impl BlockBuilder for StacksBlockBuilder { 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget ); + let mut measured_cost = cost_after; + let measured_cost = if measured_cost.sub(&cost_before).is_ok() { + Some(measured_cost) + } else { + warn!( + "Failed to compute measured cost of a too big transaction" + ); + None + }; return TransactionResult::error( &tx, - Error::TransactionTooBigError, + Error::TransactionTooBigError(measured_cost), ); } else { warn!( @@ -2795,9 +2843,19 @@ impl BlockBuilder for StacksBlockBuilder { 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, &total_budget ); + let mut measured_cost = cost_after; + let measured_cost = if measured_cost.sub(&cost_before).is_ok() { + Some(measured_cost) + } else { + warn!( + "Failed to compute measured cost of a too big transaction" + ); + None + }; + return TransactionResult::error( &tx, - Error::TransactionTooBigError, + Error::TransactionTooBigError(measured_cost), ); } else { warn!( diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 2ce250d991..8af9cf6ec7 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -104,7 +104,7 @@ pub enum Error { NotInSameFork, InvalidChainstateDB, BlockTooBigError, - TransactionTooBigError, + TransactionTooBigError(Option), BlockCostExceeded, NoTransactionsToMine, MicroblockStreamTooLongError, @@ -168,7 +168,9 @@ impl fmt::Display for Error { Error::NoSuchBlockError => write!(f, "No such Stacks block"), Error::InvalidChainstateDB => write!(f, "Invalid chainstate database"), Error::BlockTooBigError => write!(f, "Too much data in block"), - Error::TransactionTooBigError => write!(f, "Too much data in transaction"), + Error::TransactionTooBigError(ref c) => { + write!(f, "Too much data in transaction: measured_cost={c:?}") + } Error::BlockCostExceeded => write!(f, "Block execution budget exceeded"), Error::MicroblockStreamTooLongError => write!(f, "Too many microblocks in stream"), Error::IncompatibleSpendingConditionError => { @@ -246,7 +248,7 @@ impl error::Error for Error { Error::NoSuchBlockError => None, Error::InvalidChainstateDB => None, Error::BlockTooBigError => None, - Error::TransactionTooBigError => None, + Error::TransactionTooBigError(..) => None, Error::BlockCostExceeded => None, Error::MicroblockStreamTooLongError => None, Error::IncompatibleSpendingConditionError => None, @@ -291,7 +293,7 @@ impl Error { Error::NoSuchBlockError => "NoSuchBlockError", Error::InvalidChainstateDB => "InvalidChainstateDB", Error::BlockTooBigError => "BlockTooBigError", - Error::TransactionTooBigError => "TransactionTooBigError", + Error::TransactionTooBigError(..) => "TransactionTooBigError", Error::BlockCostExceeded => "BlockCostExceeded", Error::MicroblockStreamTooLongError => "MicroblockStreamTooLongError", Error::IncompatibleSpendingConditionError => "IncompatibleSpendingConditionError", From 5262c16873fe13c1a53c843e3a9c0215ddcd8411 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 15:14:01 -0400 Subject: [PATCH 1390/1400] fix: retry `insert_payload` on failure --- testnet/stacks-node/src/event_dispatcher.rs | 39 +++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index b63f9d462e..bb05cd6128 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -355,6 +355,42 @@ impl EventObserver { Ok(()) } + /// Insert a payload into the database, retrying on failure. + fn insert_payload_with_retry( + conn: &Connection, + url: &str, + payload: &serde_json::Value, + timeout: Duration, + ) { + let mut attempts = 0i64; + let mut backoff = Duration::from_millis(100); // Initial backoff duration + let max_backoff = Duration::from_secs(5); // Cap the backoff duration + + loop { + match Self::insert_payload(conn, url, payload, timeout) { + Ok(_) => { + // Successful insert, break the loop + return; + } + Err(err) => { + // Log the error, then retry after a delay + warn!("Failed to insert payload into event observer database: {:?}", err; + "backoff" => ?backoff, + "attempts" => attempts + ); + + // Wait for the backoff duration + sleep(backoff); + + // Increase the backoff duration (with exponential backoff) + backoff = std::cmp::min(backoff.saturating_mul(2), max_backoff); + + attempts = attempts.saturating_add(1); + } + } + } + } + fn get_pending_payloads( conn: &Connection, ) -> Result, db_error> { @@ -524,8 +560,7 @@ impl EventObserver { Connection::open(db_path).expect("Failed to open database for event observer"); // Insert the new payload into the database - Self::insert_payload(&conn, &full_url, payload, self.timeout) - .expect("Failed to insert payload into event observer database"); + Self::insert_payload_with_retry(&conn, &full_url, payload, self.timeout); // Process all pending payloads Self::process_pending_payloads(&conn); From fb9f046603c9afd5705a4fdd02f42f7a694427f9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 15:58:00 -0400 Subject: [PATCH 1391/1400] chore: update changelog for 3.0.0.0.0 --- CHANGELOG.md | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4025a66c3a..0304c8fbe5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [Unreleased] +## [3.0.0.0.0] -- Added support for Clarity 3 +### Added + +- Nakamoto consensus rules, activating in epoch 3.0 at block 867,867 +- Clarity 3, activating with epoch 3.0 - Keywords / variable - `tenure-height` added - `stacks-block-height` added @@ -16,10 +19,28 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - `get-stacks-block-info?` added - `get-tenure-info?` added - `get-block-info?` removed -- Added `/v3/signer/{signer_pubkey}/{reward_cycle}` endpoint -- Added `tenure_height` to `/v2/info` endpoint -- Added optional `timeout_ms` to `events_observer` configuration -- Added support for re-sending events to event observers across restarts +- New RPC endpoints + - `/v3/blocks/:block_id` + - `/v3/blocks/upload/` + - `/v3/signer/:signer_pubkey/:cycle_num` + - `/v3/sortitions` + - `/v3/stacker_set/:cycle_num` + - `/v3/tenures/:block_id` + - `/v3/tenures/fork_info/:start/:stop` + - `/v3/tenures/info` + - `/v3/tenures/tip/:consensus_hash` +- Re-send events to event observers across restarts +- Support custom chain-ids for testing +- Add `replay-block` command to CLI + +### Changed + +- Strict config file validation (unknown fields will cause the node to fail to start) +- Add optional `timeout_ms` to `events_observer` configuration +- Modified RPC endpoints + - Include `tenure_height` in `/v2/info` endpoint + - Include `block_time` and `tenure_height` in `/new/block` event payload +- Various improvements to logging, reducing log spam and improving log messages ## [2.5.0.0.7] From 98930ed1c54f5feec77217c348e5190795c2b98b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 16:01:20 -0400 Subject: [PATCH 1392/1400] chore: add `[Unreleased]` and bold Nakamoto bullet --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0304c8fbe5..7548d55545 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + ## [3.0.0.0.0] ### Added -- Nakamoto consensus rules, activating in epoch 3.0 at block 867,867 +- **Nakamoto consensus rules, activating in epoch 3.0 at block 867,867** - Clarity 3, activating with epoch 3.0 - Keywords / variable - `tenure-height` added From 0cf78d269e019a21d10f4287f89d3bf3b2905adb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 16:02:13 -0400 Subject: [PATCH 1393/1400] chore: various improvements and bugfixes --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7548d55545..dfb9d8b809 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Include `tenure_height` in `/v2/info` endpoint - Include `block_time` and `tenure_height` in `/new/block` event payload - Various improvements to logging, reducing log spam and improving log messages +- Various improvements and bugfixes ## [2.5.0.0.7] From 1ba674fdd9eb58187df91f93e545b60be26763ba Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 22 Oct 2024 13:07:25 -0700 Subject: [PATCH 1394/1400] feat: update signer 3.0 changelog --- stacks-signer/CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index aa2b87deb7..489fd39cf7 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,24 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +## [3.0.0.0.0] + +### Added + +- Improved StackerDB message structures +- Improved mock signing during epoch 2.5 +- Include the `stacks-signer` binary version in startup logging and StackerDB messages +- Added a `monitor-signers` CLI command for better visibility into other signers on the network +- Support custom Chain ID in signer configuration +- Refresh the signer's sortition view when it sees a block proposal for a new tenure +- Fixed a race condition where a signer would try to update before StackerDB configuration was set + +### Changed + +- Migrate to new Stacks Node RPC endpoint `/v3/tenures/fork_info/:start/:stop` +- Improved chainstate storage for handling of forks and other state +- Updated prometheus metric labels to reduce high cardinality + ## [2.5.0.0.5.3] ### Added From 51879b990cce1fac8843632f2886d9e85759d190 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 22 Oct 2024 16:10:49 -0400 Subject: [PATCH 1395/1400] chore: add SIP link to changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dfb9d8b809..fe5e200d17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,7 +11,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -- **Nakamoto consensus rules, activating in epoch 3.0 at block 867,867** +- **Nakamoto consensus rules, activating in epoch 3.0 at block 867,867** (see [SIP-021](https://github.com/stacksgov/sips/blob/main/sips/sip-021/sip-021-nakamoto.md) for details) - Clarity 3, activating with epoch 3.0 - Keywords / variable - `tenure-height` added From 434317177bd294997bce13ba658870db06b96930 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:13:59 -0700 Subject: [PATCH 1396/1400] PR comments - remove unused fields --- docs/mining.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/mining.md b/docs/mining.md index a2a914c998..4b3160d43b 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -13,14 +13,9 @@ seed = "YOUR PRIVATE KEY" #mock_mining = True [miner] -# Smallest allowed tx fee, in microSTX -min_tx_fee = 100 # Time to spend on the first attempt to make a block, in milliseconds. # This can be small, so your node gets a block-commit into the Bitcoin mempool early. first_attempt_time_ms = 1000 -# Time to spend on subsequent attempts to make a block, in milliseconds. -# This can be bigger -- new block-commits will be RBF'ed. -subsequent_attempt_time_ms = 60000 # Time to spend mining a Nakamoto block, in milliseconds. nakamoto_attempt_time_ms = 20000 From beb3e63f19e77b5db435e98fa1a6d7aac3b84cca Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:15:37 -0700 Subject: [PATCH 1397/1400] remove unwanted field --- docs/mining.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/mining.md b/docs/mining.md index 4b3160d43b..34a299cd1c 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -13,9 +13,6 @@ seed = "YOUR PRIVATE KEY" #mock_mining = True [miner] -# Time to spend on the first attempt to make a block, in milliseconds. -# This can be small, so your node gets a block-commit into the Bitcoin mempool early. -first_attempt_time_ms = 1000 # Time to spend mining a Nakamoto block, in milliseconds. nakamoto_attempt_time_ms = 20000 From 5574a17530d1029c0037f0981caac62ae064c8a9 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:17:21 -0700 Subject: [PATCH 1398/1400] uncomment signer event-observer for sample config --- testnet/stacks-node/conf/mainnet-signer.toml | 12 +++++------- testnet/stacks-node/conf/testnet-signer.toml | 12 +++++------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/conf/mainnet-signer.toml b/testnet/stacks-node/conf/mainnet-signer.toml index 226fcae806..8683f076f2 100644 --- a/testnet/stacks-node/conf/mainnet-signer.toml +++ b/testnet/stacks-node/conf/mainnet-signer.toml @@ -14,11 +14,9 @@ peer_host = "127.0.0.1" # events_keys = ["*"] # timeout_ms = 60_000 -# Used if running a local stacks-signer service -# [[events_observer]] -# endpoint = "127.0.0.1:30000" -# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] +[[events_observer]] +endpoint = "127.0.0.1:30000" +events_keys = ["stackerdb", "block_proposal", "burn_blocks"] -# Used if running a local stacks-signer service -# [connection_options] -# auth_token = "" # fill with a unique password +[connection_options] +auth_token = "" # fill with a unique password diff --git a/testnet/stacks-node/conf/testnet-signer.toml b/testnet/stacks-node/conf/testnet-signer.toml index 80226c5b89..f4a9bc3b71 100644 --- a/testnet/stacks-node/conf/testnet-signer.toml +++ b/testnet/stacks-node/conf/testnet-signer.toml @@ -18,14 +18,12 @@ pox_reward_length = 900 # events_keys = ["*"] # timeout_ms = 60_000 -# Used if running a local stacks-signer service -# [[events_observer]] -# endpoint = "127.0.0.1:30000" -# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] +[[events_observer]] +endpoint = "127.0.0.1:30000" +events_keys = ["stackerdb", "block_proposal", "burn_blocks"] -# Used if running a local stacks-signer service -# [connection_options] -# auth_token = "" # fill with a unique password +[connection_options] +auth_token = "" # fill with a unique password [[ustx_balance]] address = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2" From 5b773d41e66be1b60a1802c03eebb6f5bcd9f24d Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 22 Oct 2024 14:20:36 -0700 Subject: [PATCH 1399/1400] adding commented event_observer for mainnet --- testnet/stacks-node/conf/mainnet-follower-conf.toml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index 941b349034..226fcae806 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -13,3 +13,12 @@ peer_host = "127.0.0.1" # endpoint = "localhost:3700" # events_keys = ["*"] # timeout_ms = 60_000 + +# Used if running a local stacks-signer service +# [[events_observer]] +# endpoint = "127.0.0.1:30000" +# events_keys = ["stackerdb", "block_proposal", "burn_blocks"] + +# Used if running a local stacks-signer service +# [connection_options] +# auth_token = "" # fill with a unique password From c1790367135fdc7b31f02603afd97418886339df Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 22 Oct 2024 16:55:37 -0700 Subject: [PATCH 1400/1400] Fix conf file to have valid port values in test Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/conf/testnet-miner-conf.toml | 4 ++-- testnet/stacks-node/src/config.rs | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index 39af98b091..93455dcee5 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -10,8 +10,8 @@ mode = "krypton" peer_host = "127.0.0.1" username = "" password = "" -rpc_port = -peer_port = +rpc_port = 12345 # Bitcoin RPC port +peer_port = 6789 # Bitcoin P2P port pox_prepare_length = 100 pox_reward_length = 900 # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 0658862246..0beed9471d 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -3033,8 +3033,9 @@ mod tests { if path.is_file() { let file_name = path.file_name().unwrap().to_str().unwrap(); if file_name.ends_with(".toml") { + debug!("Parsing config file: {file_name}"); let _config = ConfigFile::from_path(path.to_str().unwrap()).unwrap(); - debug!("Parsed config file: {}", file_name); + debug!("Parsed config file: {file_name}"); } } }